repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/sagemaker/test_sagemaker_stateful_sessions.py | tests/entrypoints/sagemaker/test_sagemaker_stateful_sessions.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import openai # use the official client for correctness check
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import (
HEADER_SAGEMAKER_CLOSED_SESSION_ID,
HEADER_SAGEMAKER_NEW_SESSION_ID,
HEADER_SAGEMAKER_SESSION_ID,
MODEL_NAME_SMOLLM,
)
CLOSE_BADREQUEST_CASES = [
(
"nonexistent_session_id",
{"session_id": "nonexistent-session-id"},
{},
"session not found",
),
("malformed_close_request", {}, {"extra-field": "extra-field-data"}, None),
]
@pytest.mark.asyncio
async def test_create_session_badrequest(basic_server_with_lora: RemoteOpenAIServer):
bad_response = requests.post(
basic_server_with_lora.url_for("invocations"),
json={"requestType": "NEW_SESSION", "extra-field": "extra-field-data"},
)
assert bad_response.status_code == 400
@pytest.mark.asyncio
@pytest.mark.parametrize(
"test_name,session_id_change,request_body_change,expected_error",
CLOSE_BADREQUEST_CASES,
)
async def test_close_session_badrequest(
basic_server_with_lora: RemoteOpenAIServer,
test_name: str,
session_id_change: dict[str, str],
request_body_change: dict[str, str],
expected_error: str | None,
):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
close_request_json = {"requestType": "CLOSE"}
if request_body_change:
close_request_json.update(request_body_change)
bad_session_id = session_id_change.get("session_id")
bad_close_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: bad_session_id or valid_session_id},
json=close_request_json,
)
# clean up created session, should succeed
clean_up_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
clean_up_response.raise_for_status()
assert bad_close_response.status_code == 400
if expected_error:
assert expected_error in bad_close_response.json()["error"]["message"]
@pytest.mark.asyncio
async def test_close_session_invalidrequest(
basic_server_with_lora: RemoteOpenAIServer, async_client: openai.AsyncOpenAI
):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
close_request_json = {"requestType": "CLOSE"}
invalid_close_response = requests.post(
url,
# no headers to specify session_id
json=close_request_json,
)
# clean up created session, should succeed
clean_up_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
clean_up_response.raise_for_status()
assert invalid_close_response.status_code == 424
assert "invalid session_id" in invalid_close_response.json()["error"]["message"]
@pytest.mark.asyncio
async def test_session(basic_server_with_lora: RemoteOpenAIServer):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
# test invocation with session id
request_args = {
"model": MODEL_NAME_SMOLLM,
"prompt": "what is 1+1?",
"max_completion_tokens": 5,
"temperature": 0.0,
"logprobs": False,
}
invocation_response = requests.post(
basic_server_with_lora.url_for("invocations"),
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json=request_args,
)
invocation_response.raise_for_status()
# close created session, should succeed
close_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
close_response.raise_for_status()
assert (
close_response.headers.get(HEADER_SAGEMAKER_CLOSED_SESSION_ID)
== valid_session_id
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/sagemaker/conftest.py | tests/entrypoints/sagemaker/conftest.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Shared fixtures and utilities for SageMaker tests."""
import pytest
import pytest_asyncio
from ...utils import RemoteOpenAIServer
# Model name constants used across tests
MODEL_NAME_SMOLLM = "HuggingFaceTB/SmolLM2-135M-Instruct"
LORA_ADAPTER_NAME_SMOLLM = "jekunz/smollm-135m-lora-fineweb-faroese"
# SageMaker header constants
HEADER_SAGEMAKER_CLOSED_SESSION_ID = "X-Amzn-SageMaker-Closed-Session-Id"
HEADER_SAGEMAKER_SESSION_ID = "X-Amzn-SageMaker-Session-Id"
HEADER_SAGEMAKER_NEW_SESSION_ID = "X-Amzn-SageMaker-New-Session-Id"
@pytest.fixture(scope="session")
def smollm2_lora_files():
"""Download LoRA files once per test session."""
from huggingface_hub import snapshot_download
return snapshot_download(repo_id=LORA_ADAPTER_NAME_SMOLLM)
@pytest.fixture(scope="module")
def basic_server_with_lora(smollm2_lora_files):
"""Basic server fixture with standard configuration."""
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"8192",
"--enforce-eager",
# lora config below
"--enable-lora",
"--max-lora-rank",
"256",
"--max-cpu-loras",
"2",
"--max-num-seqs",
"64",
]
envs = {
"VLLM_ALLOW_RUNTIME_LORA_UPDATING": "True",
"SAGEMAKER_ENABLE_STATEFUL_SESSIONS": "True",
}
with RemoteOpenAIServer(MODEL_NAME_SMOLLM, args, env_dict=envs) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def async_client(basic_server_with_lora: RemoteOpenAIServer):
"""Async OpenAI client fixture for use with basic_server."""
async with basic_server_with_lora.get_async_client() as async_client:
yield async_client
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/sagemaker/__init__.py | tests/entrypoints/sagemaker/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/sagemaker/test_sagemaker_handler_overrides.py | tests/entrypoints/sagemaker/test_sagemaker_handler_overrides.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Integration tests for handler override functionality.
Tests real customer usage scenarios:
- Using @custom_ping_handler and @custom_invocation_handler decorators
to override handlers
- Setting environment variables for handler specifications
- Writing customer scripts with custom_sagemaker_ping_handler() and
custom_sagemaker_invocation_handler() functions
- Priority: env vars > decorators > customer script files > framework
defaults
Note: These tests focus on validating server responses rather than directly calling
get_ping_handler() and get_invoke_handler() to ensure full integration testing.
"""
import os
import tempfile
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import (
MODEL_NAME_SMOLLM,
)
class TestHandlerOverrideIntegration:
"""Integration tests simulating real customer usage scenarios.
Each test simulates a fresh server startup where customers:
- Use @custom_ping_handler and @custom_invocation_handler decorators
- Set environment variables (CUSTOM_FASTAPI_PING_HANDLER, etc.)
- Write customer scripts with custom_sagemaker_ping_handler() and
custom_sagemaker_invocation_handler() functions
"""
def setup_method(self):
"""Setup for each test - simulate fresh server startup."""
self._clear_caches()
self._clear_env_vars()
def teardown_method(self):
"""Cleanup after each test."""
self._clear_env_vars()
def _clear_caches(self):
"""Clear handler registry and function loader cache."""
try:
from model_hosting_container_standards.common.handler import (
handler_registry,
)
from model_hosting_container_standards.sagemaker.sagemaker_loader import (
SageMakerFunctionLoader,
)
handler_registry.clear()
SageMakerFunctionLoader._default_function_loader = None
except ImportError:
pytest.skip("model-hosting-container-standards not available")
def _clear_env_vars(self):
"""Clear SageMaker environment variables."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
# Clear SageMaker env vars
for var in [
SageMakerEnvVars.SAGEMAKER_MODEL_PATH,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME,
]:
os.environ.pop(var, None)
# Clear FastAPI env vars
for var in [
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER,
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER,
]:
os.environ.pop(var, None)
except ImportError:
pass
@pytest.mark.asyncio
async def test_customer_script_functions_auto_loaded(self):
"""Test customer scenario: script functions automatically override
framework defaults."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script file with ping() and invoke() functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "customer_override",
"message": "Custom ping from customer script"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Custom response from customer script"],
"source": "customer_override"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Customer sets SageMaker environment variables to point to their script
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Customer tests their server and sees their overrides work
# automatically
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Customer sees their functions are used
assert ping_data["source"] == "customer_override"
assert ping_data["message"] == "Custom ping from customer script"
assert invoke_data["source"] == "customer_override"
assert invoke_data["predictions"] == [
"Custom response from customer script"
]
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_customer_decorator_usage(self):
"""Test customer scenario: using @custom_ping_handler and
@custom_invocation_handler decorators."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script file with decorators
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request
@sagemaker_standards.custom_ping_handler
async def my_ping():
return {
"type": "ping",
"source": "customer_decorator"
}
@sagemaker_standards.custom_invocation_handler
async def my_invoke(request: Request):
return {
"type": "invoke",
"source": "customer_decorator"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Customer sees their handlers are used by the server
assert ping_data["source"] == "customer_decorator"
assert invoke_data["source"] == "customer_decorator"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_handler_priority_order(self):
"""Test priority: @custom_ping_handler/@custom_invocation_handler
decorators vs script functions."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script with both decorator and regular functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request
# Customer uses @custom_ping_handler decorator (higher priority than script functions)
@sagemaker_standards.custom_ping_handler
async def decorated_ping():
return {
"status": "healthy",
"source": "ping_decorator_in_script",
"priority": "decorator"
}
# Customer also has a regular function (lower priority than
# @custom_ping_handler decorator)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script_function",
"priority": "function"
}
# Customer has a regular invoke function
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script_invoke_function",
"priority": "function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# @custom_ping_handler decorator has higher priority than
# script function
assert ping_data["source"] == "ping_decorator_in_script"
assert ping_data["priority"] == "decorator"
# Script function is used for invoke
assert invoke_data["source"] == "script_invoke_function"
assert invoke_data["priority"] == "function"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_environment_variable_script_loading(self):
"""Test that environment variables correctly specify script location
and loading."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script in a specific directory
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "env_loaded_script",
"method": "environment_variable_loading"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Loaded via environment variables"],
"source": "env_loaded_script",
"method": "environment_variable_loading"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Test environment variable script loading
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Verify that the script was loaded via environment variables
assert ping_data["source"] == "env_loaded_script"
assert ping_data["method"] == "environment_variable_loading"
assert invoke_data["source"] == "env_loaded_script"
assert invoke_data["method"] == "environment_variable_loading"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_framework_default_handlers(self):
"""Test that framework default handlers work when no customer
overrides exist."""
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
# Explicitly pass empty env_dict to ensure no SageMaker env vars are set
# This prevents pollution from previous tests
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
env_dict = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: "",
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: "",
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: "",
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: "",
}
except ImportError:
env_dict = {}
with RemoteOpenAIServer(MODEL_NAME_SMOLLM, args, env_dict=env_dict) as server:
# Test that default ping works
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
# Test that default invocations work
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
@pytest.mark.asyncio
async def test_handler_env_var_override(self):
"""Test CUSTOM_FASTAPI_PING_HANDLER and CUSTOM_FASTAPI_INVOCATION_HANDLER
environment variable overrides."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Create a script with both env var handlers and script functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request, Response
import json
async def env_var_ping_handler(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "env_var_ping",
"method": "environment_variable"
}),
media_type="application/json"
)
async def env_var_invoke_handler(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Environment variable response"],
"source": "env_var_invoke",
"method": "environment_variable"
}),
media_type="application/json"
)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script_ping",
"method": "script_function"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script_invoke",
"method": "script_function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables to override both handlers
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: (
f"{script_name}:env_var_ping_handler"
),
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: (
f"{script_name}:env_var_invoke_handler"
),
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test ping handler override
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
# Environment variable should override script function
assert ping_data["method"] == "environment_variable"
assert ping_data["source"] == "env_var_ping"
# Test invocation handler override
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Environment variable should override script function
assert invoke_data["method"] == "environment_variable"
assert invoke_data["source"] == "env_var_invoke"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_env_var_priority_over_decorator_and_script(self):
"""Test that environment variables have highest priority over decorators
and script functions for both ping and invocation handlers."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Create a script with all three handler types for both ping and invocation
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request, Response
import json
# Environment variable handlers (highest priority)
async def env_priority_ping(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "env_var",
"priority": "environment_variable"
}),
media_type="application/json"
)
async def env_priority_invoke(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Environment variable response"],
"source": "env_var",
"priority": "environment_variable"
}),
media_type="application/json"
)
# Decorator handlers (medium priority)
@sagemaker_standards.custom_ping_handler
async def decorator_ping(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "decorator",
"priority": "decorator"
}),
media_type="application/json"
)
@sagemaker_standards.custom_invocation_handler
async def decorator_invoke(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Decorator response"],
"source": "decorator",
"priority": "decorator"
}),
media_type="application/json"
)
# Script functions (lowest priority)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script",
"priority": "script_function"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script",
"priority": "script_function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables to specify highest priority handlers
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: (
f"{script_name}:env_priority_ping"
),
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: (
f"{script_name}:env_priority_invoke"
),
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test ping handler priority
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
# Environment variable has highest priority and should be used
assert ping_data["priority"] == "environment_variable"
assert ping_data["source"] == "env_var"
# Test invocation handler priority
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Environment variable has highest priority and should be used
assert invoke_data["priority"] == "environment_variable"
assert invoke_data["source"] == "env_var"
finally:
os.unlink(script_path)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/offline_mode/test_offline_mode.py | tests/entrypoints/offline_mode/test_offline_mode.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for HF_HUB_OFFLINE mode"""
import dataclasses
import importlib
import sys
import pytest
import urllib3
from vllm import LLM
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.engine.arg_utils import EngineArgs
MODEL_CONFIGS = [
{
"model": "facebook/opt-125m",
"enforce_eager": True,
"gpu_memory_utilization": 0.20,
"max_model_len": 64,
"max_num_batched_tokens": 64,
"max_num_seqs": 64,
"tensor_parallel_size": 1,
},
{
"model": "Qwen/Qwen3-0.6B",
"enforce_eager": True,
"gpu_memory_utilization": 0.50,
"max_model_len": 64,
"max_num_batched_tokens": 64,
"max_num_seqs": 64,
"tensor_parallel_size": 1,
"tokenizer": "Qwen/Qwen3-4B",
},
{
"model": "mistralai/Mistral-7B-Instruct-v0.1",
"enforce_eager": True,
"gpu_memory_utilization": 0.95,
"max_model_len": 64,
"max_num_batched_tokens": 64,
"max_num_seqs": 64,
"tensor_parallel_size": 1,
"tokenizer_mode": "mistral",
},
# TODO: re-enable once these tests are run with V1
# {
# "model": "sentence-transformers/all-MiniLM-L12-v2",
# "enforce_eager": True,
# "gpu_memory_utilization": 0.20,
# "max_model_len": 64,
# "max_num_batched_tokens": 64,
# "max_num_seqs": 64,
# "tensor_parallel_size": 1,
# },
]
@pytest.fixture(scope="module")
def cache_models():
# Cache model files first
for model_config in MODEL_CONFIGS:
LLM(**model_config)
cleanup_dist_env_and_memory()
yield
@pytest.mark.skip_global_cleanup
@pytest.mark.usefixtures("cache_models")
def test_offline_mode(monkeypatch: pytest.MonkeyPatch):
# Set HF to offline mode and ensure we can still construct an LLM
with monkeypatch.context() as m:
try:
m.setenv("HF_HUB_OFFLINE", "1")
m.setenv("VLLM_NO_USAGE_STATS", "1")
def disable_connect(*args, **kwargs):
raise RuntimeError("No http calls allowed")
m.setattr(
urllib3.connection.HTTPConnection,
"connect",
disable_connect,
)
m.setattr(
urllib3.connection.HTTPSConnection,
"connect",
disable_connect,
)
# Need to re-import huggingface_hub
# and friends to set up offline mode
_re_import_modules()
# Cached model files should be used in offline mode
for model_config in MODEL_CONFIGS:
LLM(**model_config)
finally:
# Reset the environment after the test
# NB: Assuming tests are run in online mode
_re_import_modules()
def _re_import_modules():
hf_hub_module_names = [k for k in sys.modules if k.startswith("huggingface_hub")]
transformers_module_names = [
k
for k in sys.modules
if k.startswith("transformers") and not k.startswith("transformers_modules")
]
reload_exception = None
for module_name in hf_hub_module_names + transformers_module_names:
try:
importlib.reload(sys.modules[module_name])
except Exception as e:
reload_exception = e
# Try to continue clean up so that other tests are less likely to
# be affected
# Error this test if reloading a module failed
if reload_exception is not None:
raise reload_exception
@pytest.mark.skip_global_cleanup
@pytest.mark.usefixtures("cache_models")
def test_model_from_huggingface_offline(monkeypatch: pytest.MonkeyPatch):
# Set HF to offline mode and ensure we can still construct an LLM
with monkeypatch.context() as m:
try:
m.setenv("HF_HUB_OFFLINE", "1")
m.setenv("VLLM_NO_USAGE_STATS", "1")
def disable_connect(*args, **kwargs):
raise RuntimeError("No http calls allowed")
m.setattr(
urllib3.connection.HTTPConnection,
"connect",
disable_connect,
)
m.setattr(
urllib3.connection.HTTPSConnection,
"connect",
disable_connect,
)
# Need to re-import huggingface_hub
# and friends to set up offline mode
_re_import_modules()
engine_args = EngineArgs(model="facebook/opt-125m")
LLM(**dataclasses.asdict(engine_args))
finally:
# Reset the environment after the test
# NB: Assuming tests are run in online mode
_re_import_modules()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/offline_mode/__init__.py | tests/entrypoints/offline_mode/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/rpc/__init__.py | tests/entrypoints/rpc/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/rpc/test_collective_rpc.py | tests/entrypoints/rpc/test_collective_rpc.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import pytest
import requests
from tests.utils import RemoteOpenAIServer
MODEL_NAME = "Qwen/Qwen3-0.6B"
class TestWorkerExtension:
def get_model_name(self) -> str:
"""Test non-pydantic return type."""
return MODEL_NAME
def echo_args_kwargs(self, *args, **kwargs) -> dict[str, Any]:
"""Echo back both args and kwargs."""
return dict(
args=list(args),
kwargs=kwargs,
total_items=len(args) + len(kwargs),
)
def return_none(self, *args, **kwargs) -> None:
"""Test method that does not return anything"""
return
@pytest.fixture(scope="module")
def server():
args = [
"--max-model-len",
"8192",
"--max-num-seqs",
"128",
"--worker-extension-cls",
"tests.entrypoints.rpc.test_collective_rpc.TestWorkerExtension",
]
with RemoteOpenAIServer(
MODEL_NAME,
args,
env_dict={"VLLM_SERVER_DEV_MODE": "1", "CUDA_VISIBLE_DEVICES": "0"},
) as remote_server:
yield remote_server
def test_get_model_name(server):
"""Test basic response"""
response = requests.post(
server.url_for("collective_rpc"), json={"method": "get_model_name"}
)
assert response.status_code == 200
results = response.json()
assert "results" in results
assert results["results"] == [MODEL_NAME]
def test_return_none(server):
"""Test return none"""
response = requests.post(
server.url_for("collective_rpc"), json={"method": "return_none"}
)
assert response.status_code == 200
results = response.json()
assert results["results"] == [None]
def test_echo_args_kwargs(server):
"""Test args, kwargs, and dict response"""
args = ["arg1", "arg2"]
kwargs = {"key1": "value1", "key2": "value2"}
response = requests.post(
server.url_for("collective_rpc"),
json={"method": "echo_args_kwargs", "args": args, "kwargs": kwargs},
)
assert response.status_code == 200
results = response.json()
result = results["results"][0]
assert result["args"] == args
assert result["kwargs"] == kwargs
assert result["total_items"] == len(args) + len(kwargs)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/__init__.py | tests/entrypoints/pooling/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/reward/test_offline.py | tests/entrypoints/pooling/reward/test_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
import torch
from tests.models.utils import softmax
from vllm import LLM, PoolingParams
from vllm.distributed import cleanup_dist_env_and_memory
MODEL_NAME = "internlm/internlm2-1_8b-reward"
prompts = ["The chef prepared a delicious meal."]
@pytest.fixture(scope="module")
def llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=32768,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
enforce_eager=True,
trust_remote_code=True,
seed=0,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_config(llm: LLM):
vllm_config = llm.llm_engine.vllm_config
assert vllm_config.cache_config.enable_prefix_caching
assert vllm_config.scheduler_config.enable_chunked_prefill
def test_pooling_params(llm: LLM):
def get_outputs(use_activation):
outputs = llm.reward(
prompts,
pooling_params=PoolingParams(use_activation=use_activation),
use_tqdm=False,
)
return torch.cat([x.outputs.data for x in outputs])
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(softmax(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/reward/__init__.py | tests/entrypoints/pooling/reward/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/basic/test_truncation.py | tests/entrypoints/pooling/basic/test_truncation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import openai
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from vllm.platforms import current_platform
MODEL_NAME = "sentence-transformers/all-MiniLM-L12-v2"
max_model_len = 128
input = """Immerse yourself in the enchanting chronicle of calculus, a
mathematical domain that has radically transformed our comprehension of
change and motion. Despite its roots in ancient civilizations, the
formal birth of calculus predominantly occurred in the 17th century,
primarily under the influential guidance of Sir Isaac Newton and Gottfried
Wilhelm Leibniz. The earliest traces of calculus concepts are found in
ancient Greek mathematics,most notably in the works of Eudoxus and
Archimedes, around 300 BCE. They utilized the 'method of exhaustion'—a
technique for computing areas and volumes through the use of finite sums.
This methodology laid crucial foundational work for integral calculus.
In the 17th century, both Newton and Leibniz independently pioneered
calculus, each contributing unique perspectives that would shape this new
field."""
@pytest.fixture(scope="module")
def server():
args = [
"--runner",
"pooling",
"--dtype",
"bfloat16",
"--enforce-eager",
"--max-model-len",
str(max_model_len),
]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
async def test_smaller_truncation_size(client: openai.AsyncOpenAI):
truncation_size = 10
kwargs: dict[str, Any] = {
"model": MODEL_NAME,
"input": input,
"truncate_prompt_tokens": truncation_size,
}
response = await client.post(path="embeddings", cast_to=object, body={**kwargs})
assert response["usage"]["prompt_tokens"] == truncation_size
@pytest.mark.asyncio
async def test_zero_truncation_size(client: openai.AsyncOpenAI):
truncation_size = 0
kwargs: dict[str, Any] = {
"model": MODEL_NAME,
"input": input,
"truncate_prompt_tokens": truncation_size,
}
response = await client.post(path="embeddings", cast_to=object, body={**kwargs})
assert response["usage"]["prompt_tokens"] == truncation_size
@pytest.mark.asyncio
async def test_bigger_truncation_size(client: openai.AsyncOpenAI):
truncation_size = max_model_len + 1
kwargs: dict[str, Any] = {
"model": MODEL_NAME,
"input": input,
"truncate_prompt_tokens": truncation_size,
}
with pytest.raises(openai.BadRequestError) as err:
await client.post(path="embeddings", cast_to=object, body={**kwargs})
assert err.value.status_code == 400
error_details = err.value.response.json()["error"]
assert error_details["type"] == "BadRequestError"
expected_message = (
"truncate_prompt_tokens value is "
"greater than max_model_len."
" Please, select a smaller truncation size."
)
assert error_details["message"] == expected_message
@pytest.mark.asyncio
async def test_max_truncation_size(client: openai.AsyncOpenAI):
truncation_size = -1
kwargs: dict[str, Any] = {
"model": MODEL_NAME,
"input": input,
"truncate_prompt_tokens": truncation_size,
}
response = await client.post(path="embeddings", cast_to=object, body={**kwargs})
assert response["usage"]["prompt_tokens"] == max_model_len
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/basic/test_encode.py | tests/entrypoints/pooling/basic/test_encode.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
from vllm import LLM, PoolingParams
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.platforms import current_platform
MODEL_NAME = "intfloat/multilingual-e5-small"
PROMPTS = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
TOKEN_IDS = [
# Using ID={0, 1, 2, 3} results in NaN values,
# so we add this offset of 1000
[1000],
[1000, 1001],
[1000, 1002, 1001],
[1000, 1003, 1001, 1002],
]
@pytest.fixture(scope="module")
def llm():
# ROCm: Use FLEX_ATTENTION backend as it's the only attention backend
# that supports encoder-only models on ROCm.
attention_config = None
if current_platform.is_rocm():
attention_config = {"backend": "FLEX_ATTENTION"}
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=32768,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
enforce_eager=True,
seed=0,
attention_config=attention_config,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_multiple_pooling_params(llm: LLM):
pooling_params = [
PoolingParams(),
PoolingParams(),
PoolingParams(),
PoolingParams(),
]
# Multiple PoolingParams should be matched with each prompt
outputs = llm.encode(PROMPTS, pooling_params=pooling_params, pooling_task="embed")
assert len(PROMPTS) == len(outputs)
# Exception raised, if the size of params does not match the size of prompts
with pytest.raises(ValueError):
outputs = llm.encode(
PROMPTS, pooling_params=pooling_params[:3], pooling_task="embed"
)
# Single PoolingParams should be applied to every prompt
single_pooling_params = PoolingParams()
outputs = llm.encode(
PROMPTS, pooling_params=single_pooling_params, pooling_task="embed"
)
assert len(PROMPTS) == len(outputs)
# pooling_params is None, default params should be applied
outputs = llm.encode(PROMPTS, pooling_params=None, pooling_task="embed")
assert len(PROMPTS) == len(outputs)
def test_right_side_truncation(llm: LLM):
# Embeddings models should truncate the end of the prompt
tokenizer = llm.get_tokenizer()
assert tokenizer.truncation_side == "right"
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/basic/__init__.py | tests/entrypoints/pooling/basic/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/test_utils.py | tests/entrypoints/pooling/score/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import patch
import pytest
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import ChatTemplateResolutionError
from vllm.entrypoints.score_utils import get_score_prompt
from vllm.inputs import TokensPrompt
from vllm.tokenizers import get_tokenizer
# A cross-encoder model for testing
CROSS_ENCODER_MODEL_ID = "cross-encoder/ms-marco-MiniLM-L-6-v2"
def assert_prompt_tokenization_consistent(
tokenizer, full_prompt, engine_prompt, add_special_tokens=True
):
"""Verify that engine_prompt token_ids match tokenizing full_prompt."""
expected_ids = tokenizer(full_prompt, add_special_tokens=add_special_tokens)[
"input_ids"
]
actual_ids = engine_prompt["prompt_token_ids"]
assert actual_ids == expected_ids, (
f"Token IDs don't match.\nExpected: {expected_ids}\nActual: {actual_ids}"
)
@pytest.fixture(scope="module")
def cross_encoder_model_config():
return ModelConfig(
CROSS_ENCODER_MODEL_ID,
runner="pooling",
)
@pytest.fixture(scope="module")
def cross_encoder_tokenizer(cross_encoder_model_config):
return get_tokenizer(
CROSS_ENCODER_MODEL_ID,
trust_remote_code=cross_encoder_model_config.trust_remote_code,
)
@pytest.fixture(scope="module")
def llm_reranker_model_config():
"""Model config for LLM-as-reranker style (no pad token)."""
config = ModelConfig(
CROSS_ENCODER_MODEL_ID,
runner="pooling",
)
# use_pad_token is a property that reads from hf_config,
# so we set it there to override the default (True)
config.hf_config.use_pad_token = False
return config
@pytest.fixture
def tokenization_kwargs():
"""Common tokenization kwargs used across tests."""
return {"add_special_tokens": True, "return_tensors": None}
@pytest.fixture
def mock_model_with_score_template():
"""Mock model class that supports score template and tracks post_process calls."""
class MockModelWithScoreTemplate:
supports_score_template = True
post_process_called: list[TokensPrompt] = []
@staticmethod
def get_score_template(p1: str, p2: str) -> str:
return f"[QUERY]{p1}[SEP][DOC]{p2}"
@staticmethod
def post_process_tokens(prompt: TokensPrompt) -> None:
MockModelWithScoreTemplate.post_process_called.append(prompt)
return MockModelWithScoreTemplate
@pytest.fixture
def mock_model_no_score_template():
"""Mock model class that does not support score template."""
class MockModelNoScoreTemplate:
supports_score_template = False
return MockModelNoScoreTemplate
class TestGetScorePrompt:
"""Tests for the get_score_prompt function."""
def test_tokenization_kwargs_passed_through(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
):
"""Test that tokenization kwargs are properly passed through."""
data_1 = "Query text"
data_2 = "Document text"
# Test with truncation - custom kwargs for this test
custom_tokenization_kwargs = {
"add_special_tokens": True,
"return_tensors": None,
"truncation": True,
"max_length": 20,
}
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config,
cross_encoder_tokenizer,
custom_tokenization_kwargs,
data_1,
data_2,
)
assert isinstance(full_prompt, str)
assert "prompt_token_ids" in engine_prompt
# With max_length=20 and truncation, should not exceed this
assert len(engine_prompt["prompt_token_ids"]) <= 20
# Since truncation was applied, token_ids should be a prefix of full encoding
full_ids = cross_encoder_tokenizer(full_prompt, add_special_tokens=True)[
"input_ids"
]
actual_ids = engine_prompt["prompt_token_ids"]
assert full_ids[: len(actual_ids)] == actual_ids, (
f"Token IDs are not a prefix of full encoding.\n"
f"Full IDs: {full_ids}\n"
f"Actual IDs: {actual_ids}"
)
def test_model_supports_score_template(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test when model supports score template (no score_template arg)."""
with patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query text",
"document text",
)
assert full_prompt == "[QUERY]query text[SEP][DOC]document text"
assert "prompt_token_ids" in engine_prompt
assert len(engine_prompt["prompt_token_ids"]) > 0
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_model_supports_score_template_but_custom_template_provided(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test when model supports score template but custom template is provided."""
template = (
'TEMPLATE_USED {{ messages[0]["content"] }} {{ messages[1]["content"] }}'
)
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"doc",
score_template=template, # Providing a template
)
assert "prompt_token_ids" in engine_prompt
assert full_prompt == "TEMPLATE_USED query doc"
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_not_using_default_template(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
# FIXME: For now, we only apply a template when one is explicitly provided.
# We cannot rely on the tokenizer's chat template because many models
# inherit junk templates from their base LLM, which breaks both the models
# and the tests that use them.
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.score_utils.apply_hf_chat_template",
return_value="test querytest doc",
),
):
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"test query",
"test doc",
)
assert full_prompt == "test querytest doc"
assert "prompt_token_ids" in engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_fallback_with_pad_token(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
"""Test fallback path when ChatTemplateResolutionError
and use_pad_token=True."""
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.score_utils.apply_hf_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config, # use_pad_token=True
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"document",
)
assert "prompt_token_ids" in engine_prompt
# Should have token_type_ids from text_pair encoding
assert "token_type_ids" in engine_prompt
assert "query" in full_prompt
assert "document" in full_prompt
assert full_prompt != "querydocument"
assert (
engine_prompt["prompt_token_ids"]
== cross_encoder_tokenizer(
"query", text_pair="document", add_special_tokens=True
)["input_ids"]
)
# FIXME(?): add_special_tokens=False is needed because in this case
# full_prompt is obtained by decoding the tokenized prompt, which includes
# special tokens and we would get duplicated special tokens otherwise.
# This is inconsistent with other cases.
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer,
full_prompt,
engine_prompt,
add_special_tokens=False,
)
def test_fallback_without_pad_token(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
"""Test fallback path when ChatTemplateResolutionError
and use_pad_token=False."""
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.score_utils.apply_hf_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config, # use_pad_token=False
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"document",
)
assert full_prompt == "querydocument"
assert "prompt_token_ids" in engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_post_process_tokens_called(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test that post_process_tokens is called on the engine prompt."""
# Reset the call tracker
mock_model_with_score_template.post_process_called.clear()
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
),
patch(
"vllm.entrypoints.score_utils.apply_hf_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"doc",
)
# post_process_tokens should have been called once
assert len(mock_model_with_score_template.post_process_called) == 1
assert mock_model_with_score_template.post_process_called[0] is engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/test_offline.py | tests/entrypoints/pooling/score/test_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
import torch
from tests.models.utils import softmax
from vllm import LLM, PoolingParams
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.platforms import current_platform
MODEL_NAME = "tomaarsen/Qwen3-Reranker-0.6B-seq-cls"
@pytest.fixture(scope="module")
def llm():
# ROCm: Use FLEX_ATTENTION backend as it's the only attention backend
# that supports encoder-only models on ROCm.
attention_config = None
if current_platform.is_rocm():
attention_config = {"backend": "FLEX_ATTENTION"}
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=32768,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
enforce_eager=True,
seed=0,
attention_config=attention_config,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
def test_pooling_params(llm: LLM):
def get_outputs(use_activation):
text_1 = "What is the capital of France?"
text_2 = "The capital of France is Paris."
outputs = llm.score(
text_1,
text_2,
pooling_params=PoolingParams(use_activation=use_activation),
use_tqdm=False,
)
return torch.tensor([x.outputs.score for x in outputs])
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(softmax(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/test_online_score.py | tests/entrypoints/pooling/score/test_online_score.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import pytest
import requests
import torch
import torch.nn.functional as F
from torch import tensor
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.score.protocol import ScoreResponse
from vllm.platforms import current_platform
MODELS = [
{"name": "BAAI/bge-reranker-v2-m3", "is_cross_encoder": True},
{"name": "BAAI/bge-base-en-v1.5", "is_cross_encoder": False},
]
DTYPE = "half"
def run_transformers(hf_model, model, text_pairs):
if model["is_cross_encoder"]:
return hf_model.predict(text_pairs).tolist()
else:
hf_embeddings = [hf_model.encode(text_pair) for text_pair in text_pairs]
return [
F.cosine_similarity(tensor(pair[0]), tensor(pair[1]), dim=0)
for pair in hf_embeddings
]
@pytest.fixture(scope="class", params=MODELS)
def model(request):
yield request.param
@pytest.fixture(scope="class")
def server(model: dict[str, Any]):
args = ["--enforce-eager", "--max-model-len", "100", "--dtype", DTYPE]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(model["name"], args) as remote_server:
yield remote_server
@pytest.fixture(scope="class")
def runner(model: dict[str, Any], hf_runner):
kwargs = {
"dtype": DTYPE,
"is_cross_encoder"
if model["is_cross_encoder"]
else "is_sentence_transformer": True,
}
with hf_runner(model["name"], **kwargs) as hf_model:
yield hf_model
class TestModel:
def test_text_1_str_text_2_list(
self, server: RemoteOpenAIServer, model: dict[str, Any], runner
):
text_1 = "What is the capital of France?"
text_2 = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
score_response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 2
vllm_outputs = [d.score for d in score.data]
text_pairs = [[text_1, text_2[0]], [text_1, text_2[1]]]
hf_outputs = run_transformers(runner, model, text_pairs)
for i in range(len(vllm_outputs)):
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
def test_text_1_list_text_2_list(
self, server: RemoteOpenAIServer, model: dict[str, Any], runner
):
text_1 = [
"What is the capital of the United States?",
"What is the capital of France?",
]
text_2 = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
score_response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 2
vllm_outputs = [d.score for d in score.data]
text_pairs = [[text_1[0], text_2[0]], [text_1[1], text_2[1]]]
hf_outputs = run_transformers(runner, model, text_pairs)
for i in range(len(vllm_outputs)):
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
def test_text_1_str_text_2_str(
self, server: RemoteOpenAIServer, model: dict[str, Any], runner
):
text_1 = "What is the capital of France?"
text_2 = "The capital of France is Paris."
score_response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
vllm_outputs = [d.score for d in score.data]
text_pairs = [[text_1, text_2]]
hf_outputs = run_transformers(runner, model, text_pairs)
for i in range(len(vllm_outputs)):
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
def test_score_max_model_len(
self, server: RemoteOpenAIServer, model: dict[str, Any]
):
text_1 = "What is the capital of France?" * 20
text_2 = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
score_response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
},
)
assert score_response.status_code == 400
# Assert just a small fragments of the response
assert "Please reduce the length of the input." in score_response.text
# Test truncation
score_response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
"truncate_prompt_tokens": 101,
},
)
assert score_response.status_code == 400
assert "Please, select a smaller truncation size." in score_response.text
def test_invocations(self, server: RemoteOpenAIServer, model: dict[str, Any]):
text_1 = "What is the capital of France?"
text_2 = "The capital of France is Paris."
request_args = {
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
}
score_response = requests.post(server.url_for("score"), json=request_args)
score_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
score_output = score_response.json()
invocation_output = invocation_response.json()
assert score_output.keys() == invocation_output.keys()
for score_data, invocation_data in zip(
score_output["data"], invocation_output["data"]
):
assert score_data.keys() == invocation_data.keys()
assert score_data["score"] == pytest.approx(
invocation_data["score"], rel=0.05
)
# TODO: reset this tolerance to 0.01 once we find
# an alternative to flash_attn with bfloat16
def test_use_activation(self, server: RemoteOpenAIServer, model: dict[str, Any]):
def get_outputs(use_activation):
text_1 = "What is the capital of France?"
text_2 = "The capital of France is Paris."
response = requests.post(
server.url_for("score"),
json={
"model": model["name"],
"text_1": text_1,
"text_2": text_2,
"use_activation": use_activation,
},
)
if response.status_code != 200:
return response
outputs = response.json()
return torch.tensor([x["score"] for x in outputs["data"]])
if model["is_cross_encoder"]:
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(F.sigmoid(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
else:
get_outputs(use_activation=None)
# The activation parameter only works for the is_cross_encoder model
response = get_outputs(use_activation=True)
assert response.status_code == 400
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/test_online_rerank.py | tests/entrypoints/pooling/score/test_online_rerank.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import requests
import torch
import torch.nn.functional as F
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.pooling.protocol import PoolingResponse
from vllm.entrypoints.pooling.score.protocol import RerankResponse
from vllm.platforms import current_platform
MODEL_NAME = "BAAI/bge-reranker-base"
DTYPE = "bfloat16"
@pytest.fixture(scope="module")
def server():
args = ["--enforce-eager", "--max-model-len", "100", "--dtype", DTYPE]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_rerank_texts(server: RemoteOpenAIServer, model_name: str):
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
rerank_response = requests.post(
server.url_for("rerank"),
json={
"model": model_name,
"query": query,
"documents": documents,
},
)
rerank_response.raise_for_status()
rerank = RerankResponse.model_validate(rerank_response.json())
assert rerank.id is not None
assert rerank.results is not None
assert len(rerank.results) == 2
assert rerank.results[0].relevance_score >= 0.9
assert rerank.results[1].relevance_score <= 0.01
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_top_n(server: RemoteOpenAIServer, model_name: str):
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
"Cross-encoder models are neat",
]
rerank_response = requests.post(
server.url_for("rerank"),
json={"model": model_name, "query": query, "documents": documents, "top_n": 2},
)
rerank_response.raise_for_status()
rerank = RerankResponse.model_validate(rerank_response.json())
assert rerank.id is not None
assert rerank.results is not None
assert len(rerank.results) == 2
assert rerank.results[0].relevance_score >= 0.9
assert rerank.results[1].relevance_score <= 0.01
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_rerank_max_model_len(server: RemoteOpenAIServer, model_name: str):
query = "What is the capital of France?" * 100
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
rerank_response = requests.post(
server.url_for("rerank"),
json={"model": model_name, "query": query, "documents": documents},
)
assert rerank_response.status_code == 400
# Assert just a small fragments of the response
assert "Please reduce the length of the input." in rerank_response.text
def test_invocations(server: RemoteOpenAIServer):
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
request_args = {
"model": MODEL_NAME,
"query": query,
"documents": documents,
}
rerank_response = requests.post(server.url_for("rerank"), json=request_args)
rerank_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
rerank_output = rerank_response.json()
invocation_output = invocation_response.json()
assert rerank_output.keys() == invocation_output.keys()
for rerank_result, invocations_result in zip(
rerank_output["results"], invocation_output["results"]
):
assert rerank_result.keys() == invocations_result.keys()
assert rerank_result["relevance_score"] == pytest.approx(
invocations_result["relevance_score"], rel=0.05
)
# TODO: reset this tolerance to 0.01 once we find
# an alternative to flash_attn with bfloat16
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_use_activation(server: RemoteOpenAIServer, model_name: str):
async def get_outputs(use_activation):
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
response = requests.post(
server.url_for("rerank"),
json={
"model": model_name,
"query": query,
"documents": documents,
"use_activation": use_activation,
},
)
outputs = response.json()
return torch.tensor([x["relevance_score"] for x in outputs["results"]])
default = await get_outputs(use_activation=None)
w_activation = await get_outputs(use_activation=True)
wo_activation = await get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(F.sigmoid(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_classify(server: RemoteOpenAIServer, model_name: str):
input_text = "This product was excellent and exceeded my expectations"
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_text,
"encoding_format": "float",
"task": "classify",
},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 1
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_token_classify(server: RemoteOpenAIServer, model_name: str):
input_text = ["The chef prepared a delicious meal."]
response = requests.post(
server.url_for("pooling"),
json={"model": model_name, "input": input_text, "encoding_format": "float"},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 11
assert len(poolings.data[0].data[0]) == 1
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("task", ["embed", "token_embed", "plugin"])
async def test_pooling_not_supported(
server: RemoteOpenAIServer, model_name: str, task: str
):
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": "test",
"encoding_format": "float",
"task": task,
},
)
assert response.json()["error"]["type"] == "BadRequestError"
assert response.json()["error"]["message"].startswith(
f"Task {task} is not supported"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/__init__.py | tests/entrypoints/pooling/score/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/score/test_correctness_mteb.py | tests/entrypoints/pooling/score/test_correctness_mteb.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import pytest
from tests.models.language.pooling_mteb_test.mteb_score_utils import (
MTEB_RERANK_LANGS,
MTEB_RERANK_TASKS,
MTEB_RERANK_TOL,
RerankClientMtebEncoder,
ScoreClientMtebEncoder,
run_mteb_rerank,
)
from tests.utils import RemoteOpenAIServer
from vllm.platforms import current_platform
os.environ["VLLM_LOGGING_LEVEL"] = "WARNING"
MODEL_NAME = "cross-encoder/ms-marco-MiniLM-L-6-v2"
st_main_score = 0.33457
@pytest.fixture(scope="module")
def server():
args = ["--runner", "pooling", "--enforce-eager", "--disable-uvicorn-access-log"]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
def test_mteb_score(server):
url = server.url_for("score")
encoder = ScoreClientMtebEncoder(MODEL_NAME, url)
vllm_main_score = run_mteb_rerank(encoder, MTEB_RERANK_TASKS, MTEB_RERANK_LANGS)
print("VLLM main score: ", vllm_main_score)
print("SentenceTransformer main score: ", st_main_score)
print("Difference: ", st_main_score - vllm_main_score)
# We are not concerned that the vllm mteb results are better
# than SentenceTransformers, so we only perform one-sided testing.
assert st_main_score - vllm_main_score < MTEB_RERANK_TOL
def test_mteb_rerank(server):
url = server.url_for("rerank")
encoder = RerankClientMtebEncoder(MODEL_NAME, url)
vllm_main_score = run_mteb_rerank(encoder, MTEB_RERANK_TASKS, MTEB_RERANK_LANGS)
print("VLLM main score: ", vllm_main_score)
print("SentenceTransformer main score: ", st_main_score)
print("Difference: ", st_main_score - vllm_main_score)
# We are not concerned that the vllm mteb results are better
# than SentenceTransformers, so we only perform one-sided testing.
assert st_main_score - vllm_main_score < MTEB_RERANK_TOL
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/pooling/test_online.py | tests/entrypoints/pooling/pooling/test_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
import json
import numpy as np
import pytest
import requests
import torch
from tests.models.utils import check_embeddings_close
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.pooling.protocol import PoolingResponse
from vllm.tokenizers import get_tokenizer
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
MetadataItem,
binary2tensor,
build_metadata_items,
decode_pooling_output,
)
MODEL_NAME = "internlm/internlm2-1_8b-reward"
DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501
@pytest.fixture(scope="module")
def server():
args = [
"--runner",
"pooling",
# use half precision for speed and memory savings in CI environment
"--dtype",
"bfloat16",
"--enforce-eager",
"--max-model-len",
"512",
"--chat-template",
DUMMY_CHAT_TEMPLATE,
"--trust-remote-code",
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_single_pooling(server: RemoteOpenAIServer, model_name: str):
input_texts = [
"The chef prepared a delicious meal.",
]
# test single pooling
response = requests.post(
server.url_for("pooling"),
json={"model": model_name, "input": input_texts, "encoding_format": "float"},
)
response.raise_for_status()
poolings = PoolingResponse.model_validate(response.json())
assert poolings.id is not None
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 8
assert poolings.usage.completion_tokens == 0
assert poolings.usage.prompt_tokens == 8
assert poolings.usage.total_tokens == 8
# test using token IDs
input_tokens = [1, 1, 1, 1, 1]
response = requests.post(
server.url_for("pooling"),
json={"model": model_name, "input": input_tokens, "encoding_format": "float"},
)
response.raise_for_status()
poolings = PoolingResponse.model_validate(response.json())
assert poolings.id is not None
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 5
assert poolings.usage.completion_tokens == 0
assert poolings.usage.prompt_tokens == 5
assert poolings.usage.total_tokens == 5
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_batch_pooling(server: RemoteOpenAIServer, model_name: str):
# test list[str]
input_texts = [
"The cat sat on the mat.",
"A feline was resting on a rug.",
"Stars twinkle brightly in the night sky.",
]
response = requests.post(
server.url_for("pooling"),
json={"model": model_name, "input": input_texts, "encoding_format": "float"},
)
response.raise_for_status()
poolings = PoolingResponse.model_validate(response.json())
assert poolings.id is not None
assert len(poolings.data) == 3
assert len(poolings.data[0].data) == 8
assert poolings.usage.completion_tokens == 0
assert poolings.usage.prompt_tokens == 29
assert poolings.usage.total_tokens == 29
# test list[list[int]]
input_tokens = [
[4, 5, 7, 9, 20],
[15, 29, 499],
[24, 24, 24, 24, 24],
[25, 32, 64, 77],
]
response = requests.post(
server.url_for("pooling"),
json={"model": model_name, "input": input_tokens, "encoding_format": "float"},
)
response.raise_for_status()
poolings = PoolingResponse.model_validate(response.json())
assert poolings.id is not None
assert len(poolings.data) == 4
assert len(poolings.data[0].data) == 5
assert poolings.usage.completion_tokens == 0
assert poolings.usage.prompt_tokens == 17
assert poolings.usage.total_tokens == 17
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_conversation_pooling(server: RemoteOpenAIServer, model_name: str):
messages = [
{
"role": "user",
"content": "The cat sat on the mat.",
},
{
"role": "assistant",
"content": "A feline was resting on a rug.",
},
{
"role": "user",
"content": "Stars twinkle brightly in the night sky.",
},
]
chat_response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"messages": messages,
"encoding_format": "float",
},
)
chat_response.raise_for_status()
chat_poolings = PoolingResponse.model_validate(chat_response.json())
tokenizer = get_tokenizer(tokenizer_name=model_name, trust_remote_code=True)
prompt = tokenizer.apply_chat_template(
messages,
chat_template=DUMMY_CHAT_TEMPLATE,
add_generation_prompt=True,
continue_final_message=False,
tokenize=False,
)
completions_response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": prompt,
"encoding_format": "float",
# To be consistent with chat
"add_special_tokens": False,
},
)
completions_response.raise_for_status()
completion_poolings = PoolingResponse.model_validate(completions_response.json())
assert chat_poolings.id is not None
assert completion_poolings.id is not None
assert chat_poolings.created <= completion_poolings.created
assert chat_poolings.model_dump(exclude={"id", "created"}) == (
completion_poolings.model_dump(exclude={"id", "created"})
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_batch_base64_pooling(server: RemoteOpenAIServer, model_name: str):
input_texts = [
"Hello my name is",
"The best thing about vLLM is that it supports many different models",
]
float_response = requests.post(
server.url_for("pooling"),
json={
"input": input_texts,
"model": model_name,
"encoding_format": "float",
},
)
float_response.raise_for_status()
responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
base64_response = requests.post(
server.url_for("pooling"),
json={
"input": input_texts,
"model": model_name,
"encoding_format": "base64",
},
)
base64_response.raise_for_status()
responses_base64 = PoolingResponse.model_validate(base64_response.json())
decoded_responses_base64_data = []
for data in responses_base64.data:
decoded_responses_base64_data.append(
np.frombuffer(base64.b64decode(data.data), dtype="float32").tolist()
)
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=decoded_responses_base64_data,
name_0="float32",
name_1="base64",
)
# Default response is float32 decoded from base64 by OpenAI Client
default_response = requests.post(
server.url_for("pooling"),
json={
"input": input_texts,
"model": model_name,
},
)
default_response.raise_for_status()
responses_default = PoolingResponse.model_validate(default_response.json())
default_data = [
np.array(d.data).squeeze(-1).tolist() for d in responses_default.data
]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=default_data,
name_0="float32",
name_1="default",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype_and_endianness(
server: RemoteOpenAIServer, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
url = server.url_for("pooling")
float_response = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "float",
},
)
responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
responses_base64 = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "base64",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
base64_data = []
for data in responses_base64.json()["data"]:
binary = base64.b64decode(data["data"])
tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
base64_data.append(tensor.to(torch.float32).tolist())
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=base64_data,
name_0="float_data",
name_1="base64_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_bytes_embed_dtype_and_endianness(
server: RemoteOpenAIServer, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
url = server.url_for("pooling")
float_response = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "float",
},
)
responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
metadata = json.loads(responses_bytes.headers["metadata"])
body = responses_bytes.content
items = [MetadataItem(**x) for x in metadata["data"]]
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).view(-1).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_bytes_only_embed_dtype_and_endianness(
server: RemoteOpenAIServer, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
] * 2
url = server.url_for("pooling")
float_response = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "float",
},
)
responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
n_tokens = responses_float.usage.prompt_tokens // len(input_texts)
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes_only",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
assert "metadata" not in responses_bytes.headers
body = responses_bytes.content
items = build_metadata_items(
embed_dtype=embed_dtype,
endianness=endianness,
shape=(n_tokens, 1),
n_request=len(input_texts),
)
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).view(-1).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("param_name", ["encoding_format", "embed_dtype", "endianness"])
async def test_params_not_supported(
server: RemoteOpenAIServer, model_name: str, param_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_base64 = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "base64",
param_name: f"bad_{param_name}",
},
)
assert responses_base64.status_code == 400
assert "literal_error" in responses_base64.json()["error"]["message"]
assert f"bad_{param_name}" in responses_base64.json()["error"]["message"]
@pytest.mark.asyncio
async def test_invocations(server: RemoteOpenAIServer):
input_texts = [
"The chef prepared a delicious meal.",
]
request_args = {
"model": MODEL_NAME,
"input": input_texts,
"encoding_format": "float",
}
completion_response = requests.post(server.url_for("pooling"), json=request_args)
completion_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
completion_output = completion_response.json()
invocation_output = invocation_response.json()
assert completion_output.keys() == invocation_output.keys()
for completion_data, invocation_data in zip(
completion_output["data"], invocation_output["data"]
):
assert completion_data.keys() == invocation_data.keys()
check_embeddings_close(
embeddings_0_lst=completion_data["data"],
embeddings_1_lst=invocation_data["data"],
name_0="completion",
name_1="invocation",
)
@pytest.mark.asyncio
async def test_invocations_conversation(server: RemoteOpenAIServer):
messages = [
{
"role": "user",
"content": "The cat sat on the mat.",
},
{
"role": "assistant",
"content": "A feline was resting on a rug.",
},
{
"role": "user",
"content": "Stars twinkle brightly in the night sky.",
},
]
request_args = {
"model": MODEL_NAME,
"messages": messages,
"encoding_format": "float",
}
chat_response = requests.post(server.url_for("pooling"), json=request_args)
chat_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
chat_output = chat_response.json()
invocation_output = invocation_response.json()
assert chat_output.keys() == invocation_output.keys()
for chat_data, invocation_data in zip(
chat_output["data"], invocation_output["data"]
):
assert chat_data.keys() == invocation_data.keys()
check_embeddings_close(
embeddings_0_lst=chat_data["data"],
embeddings_1_lst=invocation_data["data"],
name_0="chat",
name_1="invocation",
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/pooling/__init__.py | tests/entrypoints/pooling/pooling/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_online_long_text.py | tests/entrypoints/pooling/embed/test_online_long_text.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Test cases for long text embedding with automatic chunking mechanism.
This test suite validates vLLM's automatic chunking functionality for handling
text inputs that exceed the model's maximum token length, specifically targeting
the intfloat/multilingual-e5-small model (max token length: 512).
"""
import random
import openai
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.embed.protocol import EmbeddingResponse
from vllm.platforms import current_platform
def _generate_random_text(word_count: int) -> str:
"""Generate random text with approximately the specified word count."""
# Common English words with focus on verbs and nouns for realistic text
common_words = [
# Essential articles and pronouns (minimal)
"the",
"and",
"you",
"they",
"this",
"that",
"these",
"those",
# Action verbs
"create",
"build",
"develop",
"design",
"implement",
"execute",
"analyze",
"process",
"generate",
"calculate",
"evaluate",
"optimize",
"transform",
"integrate",
"configure",
"deploy",
"monitor",
"manage",
"discover",
"explore",
"investigate",
"research",
"study",
"examine",
"improve",
"enhance",
"upgrade",
"modify",
"update",
"maintain",
"solve",
"resolve",
"handle",
"address",
"tackle",
"overcome",
"communicate",
"collaborate",
"coordinate",
"organize",
"plan",
"achieve",
"accomplish",
"complete",
"finish",
"deliver",
"provide",
# Technology and science nouns
"system",
"application",
"software",
"hardware",
"network",
"database",
"algorithm",
"model",
"framework",
"platform",
"interface",
"protocol",
"architecture",
"infrastructure",
"component",
"module",
"service",
"technology",
"innovation",
"solution",
"methodology",
"approach",
"artificial",
"intelligence",
"machine",
"learning",
"neural",
"network",
"computer",
"processor",
"memory",
"storage",
"computation",
"data",
"information",
"knowledge",
"insight",
"pattern",
"trend",
"analysis",
"research",
"development",
"engineering",
"science",
"mathematics",
"statistics",
"probability",
"optimization",
"performance",
"efficiency",
# General nouns
"project",
"team",
"organization",
"company",
"business",
"industry",
"market",
"customer",
"user",
"client",
"product",
"feature",
"function",
"requirement",
"specification",
"documentation",
"report",
"result",
"outcome",
"impact",
"benefit",
"advantage",
"challenge",
"problem",
"opportunity",
"strategy",
"goal",
"objective",
"target",
"milestone",
"process",
"procedure",
"workflow",
"pipeline",
"operation",
"task",
"activity",
"event",
"session",
"meeting",
"discussion",
"decision",
]
words = []
for _ in range(word_count):
words.append(random.choice(common_words))
# Add some punctuation for more realistic text
text = " ".join(words)
# Add periods every 10-20 words
words_list = text.split()
result = []
for i, word in enumerate(words_list):
result.append(word)
if (i + 1) % random.randint(10, 20) == 0 and i < len(words_list) - 1:
result[-1] += "."
return " ".join(result)
MODEL_NAME = "intfloat/multilingual-e5-small"
DTYPE = "bfloat16"
# Test text: Generate text with approximately 1500 words to exceed 1024 tokens
LONG_TEXT_1500_WORDS = _generate_random_text(1500)
# Test text: Generate text with approximately 2500 words to exceed 2048 tokens
LONG_TEXT_2500_WORDS = _generate_random_text(2500)
@pytest.fixture(scope="module")
def server_with_chunked_processing():
"""Start server with automatic chunking processing enabled."""
args = [
"--runner",
"pooling",
"--dtype",
DTYPE,
"--enforce-eager",
"--max-model-len",
"512", # Set smaller max_model_len to trigger chunking mechanism
"--pooler-config",
(
'{"pooling_type": "MEAN", "normalize": true, '
'"enable_chunked_processing": true, "max_embed_len": 10000}'
),
"--gpu-memory-utilization",
"0.8",
]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client_with_chunked_processing(server_with_chunked_processing):
"""Create async client with chunking processing support."""
async with server_with_chunked_processing.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_long_text_embedding_1500_chars(
client_with_chunked_processing: openai.AsyncOpenAI, model_name: str
):
"""Test embedding processing for ~1500 character long text
(~1028 tokens, exceeding 512 token limit)."""
# Verify text length
# Verify text has sufficient word count (approximately 1500 words)
word_count = len(LONG_TEXT_1500_WORDS.split())
assert word_count >= 1400, f"Test text word count insufficient: {word_count} words"
# Send embedding request
embedding_response = await client_with_chunked_processing.embeddings.create(
model=model_name,
input=[LONG_TEXT_1500_WORDS],
encoding_format="float",
)
# Verify response structure
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert (
len(embeddings.data[0].embedding) == 384
) # multilingual-e5-small embedding dimension
assert embeddings.usage.completion_tokens == 0
# Due to chunked processing, token count should
# reflect actual processed tokens
# With ~1500 words, we expect roughly
# 1024+ tokens (exceeding 512 token limit)
# Should exceed single chunk limit of 512
assert embeddings.usage.prompt_tokens > 800
assert embeddings.usage.total_tokens == embeddings.usage.prompt_tokens
# Verify embedding vector validity
embedding_vector = embeddings.data[0].embedding
assert all(isinstance(x, float) for x in embedding_vector), (
"Embedding vector should contain floats"
)
assert not all(x == 0 for x in embedding_vector), (
"Embedding vector should not be all zeros"
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_long_text_embedding_2500_chars(
client_with_chunked_processing: openai.AsyncOpenAI, model_name: str
):
"""Test embedding processing for ~2500 character long text
(~2048 tokens, requiring multiple chunks)."""
# Verify text length
# Verify text has sufficient word count (approximately 2500 words)
word_count = len(LONG_TEXT_2500_WORDS.split())
assert word_count >= 2300, f"Test text word count insufficient: {word_count} words"
# Send embedding request
embedding_response = await client_with_chunked_processing.embeddings.create(
model=model_name,
input=[LONG_TEXT_2500_WORDS],
encoding_format="float",
)
# Verify response structure
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert (
len(embeddings.data[0].embedding) == 384
) # multilingual-e5-small embedding dimension
assert embeddings.usage.completion_tokens == 0
# Due to chunked processing, token count should
# reflect actual processed tokens
# With ~2500 words, we expect
# roughly 2048+ tokens (requiring multiple chunks)
# Should require multiple chunks for processing
assert embeddings.usage.prompt_tokens > 1500
assert embeddings.usage.total_tokens == embeddings.usage.prompt_tokens
# Verify embedding vector validity
embedding_vector = embeddings.data[0].embedding
assert all(isinstance(x, float) for x in embedding_vector), (
"Embedding vector should contain floats"
)
assert not all(x == 0 for x in embedding_vector), (
"Embedding vector should not be all zeros"
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_batch_long_text_embedding(
client_with_chunked_processing: openai.AsyncOpenAI, model_name: str
):
"""Test batch long text embedding processing."""
input_texts = [
LONG_TEXT_1500_WORDS,
LONG_TEXT_2500_WORDS,
"This is a short text test.", # Short text for comparison
]
# Send batch embedding request
embedding_response = await client_with_chunked_processing.embeddings.create(
model=model_name,
input=input_texts,
encoding_format="float",
)
# Verify response structure
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 3 # Three input texts
# Verify each embedding dimension
for i, embedding_data in enumerate(embeddings.data):
assert len(embedding_data.embedding) == 384
assert embedding_data.index == i
# Verify embedding vector validity
embedding_vector = embedding_data.embedding
assert all(isinstance(x, float) for x in embedding_vector)
assert not all(x == 0 for x in embedding_vector)
# Verify token usage
assert embeddings.usage.completion_tokens == 0
# Total token count should be very substantial
assert embeddings.usage.prompt_tokens > 1000
assert embeddings.usage.total_tokens == embeddings.usage.prompt_tokens
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_chunked_vs_normal_consistency(
client_with_chunked_processing: openai.AsyncOpenAI, model_name: str
):
"""Test consistency between chunked and
normal processing (using short text)."""
# Use a short text within the 512 token limit
short_text = (
"Artificial intelligence technology is changing our world, "
"bringing unprecedented opportunities and challenges."
)
# Send embedding request
embedding_response = await client_with_chunked_processing.embeddings.create(
model=model_name,
input=[short_text],
encoding_format="float",
)
# Verify response structure
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
# Short text should not require chunked processing
assert embeddings.usage.prompt_tokens < 512
assert embeddings.usage.total_tokens == embeddings.usage.prompt_tokens
# 验证embedding向量的有效性
embedding_vector = embeddings.data[0].embedding
assert all(isinstance(x, float) for x in embedding_vector)
assert not all(x == 0 for x in embedding_vector)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_chunked_processing_response_format(
client_with_chunked_processing: openai.AsyncOpenAI, model_name: str
):
"""Test response format and structure during chunked processing."""
# Test with long text to trigger chunking
embedding_response = await client_with_chunked_processing.embeddings.create(
model=model_name,
input=[LONG_TEXT_1500_WORDS],
encoding_format="float",
)
# Verify response structure
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert embeddings.data[0].object == "embedding"
assert embeddings.data[0].index == 0
# Verify embedding vector properties
embedding_vector = embeddings.data[0].embedding
import math
vector_norm = math.sqrt(sum(x * x for x in embedding_vector))
# Check that the vector is normalized
# (default behavior for most embedding models)
assert 0.8 < vector_norm < 1.2, (
f"Vector norm should be reasonable, actual: {vector_norm}"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_online.py | tests/entrypoints/pooling/embed/test_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
import json
import numpy as np
import openai
import pytest
import pytest_asyncio
import requests
import torch
import torch.nn.functional as F
from tests.models.language.pooling.embed_utils import run_embedding_correctness_test
from tests.models.utils import check_embeddings_close
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.embed.protocol import EmbeddingResponse
from vllm.entrypoints.pooling.pooling.protocol import PoolingResponse
from vllm.platforms import current_platform
from vllm.tokenizers import get_tokenizer
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
MetadataItem,
binary2tensor,
build_metadata_items,
decode_pooling_output,
)
MODEL_NAME = "intfloat/multilingual-e5-small"
DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501
DTYPE = "bfloat16"
if current_platform.is_rocm():
# Disable Flash/MemEfficient SDP on ROCm to avoid HF Transformers
# accuracy issues: https://github.com/vllm-project/vllm/issues/30167
# TODO: Remove once ROCm SDP accuracy issues are resolved on HuggingFace
torch.backends.cuda.enable_flash_sdp(False)
torch.backends.cuda.enable_mem_efficient_sdp(False)
torch.backends.cuda.enable_math_sdp(True)
@pytest.fixture(scope="module")
def server():
args = [
"--runner",
"pooling",
# use half precision for speed and memory savings in CI environment
"--dtype",
DTYPE,
"--enforce-eager",
"--max-model-len",
"512",
"--chat-template",
DUMMY_CHAT_TEMPLATE,
]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.fixture(scope="module")
def hf_model(hf_runner):
with hf_runner(MODEL_NAME, dtype=DTYPE, is_sentence_transformer=True) as hf_model:
yield hf_model
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_single_embedding(hf_model, client: openai.AsyncOpenAI, model_name: str):
input_texts = [
"The chef prepared a delicious meal.",
]
# test single embedding
embedding_response = await client.embeddings.create(
model=model_name,
input=input_texts,
encoding_format="float",
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 11
assert embeddings.usage.total_tokens == 11
vllm_outputs = [d.embedding for d in embeddings.data]
run_embedding_correctness_test(hf_model, input_texts, vllm_outputs)
# test using token IDs
input_tokens = [1, 1, 1, 1, 1]
embedding_response = await client.embeddings.create(
model=model_name,
input=input_tokens,
encoding_format="float",
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 5
assert embeddings.usage.total_tokens == 5
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_batch_embedding(hf_model, client: openai.AsyncOpenAI, model_name: str):
# test list[str]
input_texts = [
"The cat sat on the mat.",
"A feline was resting on a rug.",
"Stars twinkle brightly in the night sky.",
]
embedding_response = await client.embeddings.create(
model=model_name,
input=input_texts,
encoding_format="float",
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 3
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 33
assert embeddings.usage.total_tokens == 33
vllm_outputs = [d.embedding for d in embeddings.data]
run_embedding_correctness_test(hf_model, input_texts, vllm_outputs)
# test list[list[int]]
input_tokens = [
[4, 5, 7, 9, 20],
[15, 29, 499],
[24, 24, 24, 24, 24],
[25, 32, 64, 77],
]
embedding_response = await client.embeddings.create(
model=model_name,
input=input_tokens,
encoding_format="float",
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 4
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 17
assert embeddings.usage.total_tokens == 17
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_conversation_embedding(
server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
):
messages = [
{
"role": "user",
"content": "The cat sat on the mat.",
},
{
"role": "assistant",
"content": "A feline was resting on a rug.",
},
{
"role": "user",
"content": "Stars twinkle brightly in the night sky.",
},
]
chat_response = requests.post(
server.url_for("v1/embeddings"),
json={
"model": model_name,
"messages": messages,
"encoding_format": "float",
},
)
chat_response.raise_for_status()
chat_embeddings = EmbeddingResponse.model_validate(chat_response.json())
tokenizer = get_tokenizer(tokenizer_name=model_name)
prompt = tokenizer.apply_chat_template(
messages,
chat_template=DUMMY_CHAT_TEMPLATE,
add_generation_prompt=True,
continue_final_message=False,
tokenize=False,
)
completion_response = await client.embeddings.create(
model=model_name,
input=prompt,
encoding_format="float",
# To be consistent with chat
extra_body={"add_special_tokens": False},
)
completion_embeddings = EmbeddingResponse.model_validate(
completion_response.model_dump(mode="json")
)
assert chat_embeddings.id is not None
assert completion_embeddings.id is not None
assert chat_embeddings.created <= completion_embeddings.created
assert chat_embeddings.model_dump(exclude={"id", "created"}) == (
completion_embeddings.model_dump(exclude={"id", "created"})
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_batch_base64_embedding(
hf_model, client: openai.AsyncOpenAI, model_name: str
):
input_texts = [
"Hello my name is",
"The best thing about vLLM is that it supports many different models",
]
responses_float = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float"
)
float_data = [d.embedding for d in responses_float.data]
run_embedding_correctness_test(hf_model, input_texts, float_data)
responses_base64 = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="base64"
)
base64_data = []
for data in responses_base64.data:
base64_data.append(
np.frombuffer(base64.b64decode(data.embedding), dtype="float32").tolist()
)
run_embedding_correctness_test(hf_model, input_texts, base64_data)
# Default response is float32 decoded from base64 by OpenAI Client
responses_default = await client.embeddings.create(
input=input_texts, model=model_name
)
default_data = [d.embedding for d in responses_default.data]
run_embedding_correctness_test(hf_model, input_texts, default_data)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype_and_endianness(
server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_float = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float"
)
float_data = [d.embedding for d in responses_float.data]
for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
responses_base64 = requests.post(
server.url_for("/v1/embeddings"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "base64",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
base64_data = []
for data in responses_base64.json()["data"]:
binary = base64.b64decode(data["embedding"])
tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
base64_data.append(tensor.to(torch.float32).tolist())
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=base64_data,
name_0="float_data",
name_1="base64_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_bytes_embed_dtype_and_endianness(
server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_float = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float"
)
float_data = [d.embedding for d in responses_float.data]
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
server.url_for("/v1/embeddings"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
metadata = json.loads(responses_bytes.headers["metadata"])
body = responses_bytes.content
items = [MetadataItem(**x) for x in metadata["data"]]
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_bytes_only_embed_dtype_and_endianness(
server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
] * 2
responses_float = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float"
)
float_data = [d.embedding for d in responses_float.data]
embedding_size = len(float_data[0])
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
server.url_for("/v1/embeddings"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes_only",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
assert "metadata" not in responses_bytes.headers
body = responses_bytes.content
items = build_metadata_items(
embed_dtype=embed_dtype,
endianness=endianness,
shape=(embedding_size,),
n_request=len(input_texts),
)
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("param_name", ["encoding_format", "embed_dtype", "endianness"])
async def test_params_not_supported(
server: RemoteOpenAIServer, model_name: str, param_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_base64 = requests.post(
server.url_for("/v1/embeddings"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "base64",
param_name: f"bad_{param_name}",
},
)
assert responses_base64.status_code == 400
assert "literal_error" in responses_base64.json()["error"]["message"]
assert f"bad_{param_name}" in responses_base64.json()["error"]["message"]
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_single_embedding_truncation(client: openai.AsyncOpenAI, model_name: str):
input_texts = [
"Como o Brasil pode fomentar o desenvolvimento de modelos de IA?",
]
# test single embedding
embedding_response = await client.embeddings.create(
model=model_name, input=input_texts, extra_body={"truncate_prompt_tokens": 10}
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 10
assert embeddings.usage.total_tokens == 10
input_tokens = [
1,
24428,
289,
18341,
26165,
285,
19323,
283,
289,
26789,
3871,
28728,
9901,
340,
2229,
385,
340,
315,
28741,
28804,
2,
]
embedding_response = await client.embeddings.create(
model=model_name, input=input_tokens, extra_body={"truncate_prompt_tokens": 10}
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 384
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == 10
assert embeddings.usage.total_tokens == 10
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_single_embedding_truncation_invalid(
client: openai.AsyncOpenAI, model_name: str
):
input_texts = [
"Como o Brasil pode fomentar o desenvolvimento de modelos de IA?",
]
with pytest.raises(openai.BadRequestError):
response = await client.embeddings.create(
model=model_name,
input=input_texts,
extra_body={"truncate_prompt_tokens": 8193},
)
assert "error" in response.object
assert (
"truncate_prompt_tokens value is greater than max_model_len. "
"Please, select a smaller truncation size." in response.message
)
@pytest.mark.asyncio
async def test_invocations(server: RemoteOpenAIServer, client: openai.AsyncOpenAI):
input_texts = [
"The chef prepared a delicious meal.",
]
request_args = {
"model": MODEL_NAME,
"input": input_texts,
"encoding_format": "float",
}
completion_response = await client.embeddings.create(**request_args)
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
completion_output = completion_response.model_dump()
invocation_output = invocation_response.json()
assert completion_output.keys() == invocation_output.keys()
for completion_data, invocation_data in zip(
completion_output["data"], invocation_output["data"]
):
assert completion_data.keys() == invocation_data.keys()
check_embeddings_close(
embeddings_0_lst=[completion_data["embedding"]],
embeddings_1_lst=[invocation_data["embedding"]],
name_0="completion",
name_1="invocation",
)
@pytest.mark.asyncio
async def test_invocations_conversation(server: RemoteOpenAIServer):
messages = [
{
"role": "user",
"content": "The cat sat on the mat.",
},
{
"role": "assistant",
"content": "A feline was resting on a rug.",
},
{
"role": "user",
"content": "Stars twinkle brightly in the night sky.",
},
]
request_args = {
"model": MODEL_NAME,
"messages": messages,
"encoding_format": "float",
}
chat_response = requests.post(server.url_for("v1/embeddings"), json=request_args)
chat_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
chat_output = chat_response.json()
invocation_output = invocation_response.json()
assert chat_output.keys() == invocation_output.keys()
for chat_data, invocation_data in zip(
chat_output["data"], invocation_output["data"]
):
assert chat_data.keys() == invocation_data.keys()
check_embeddings_close(
embeddings_0_lst=[chat_data["embedding"]],
embeddings_1_lst=[invocation_data["embedding"]],
name_0="chat",
name_1="invocation",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_normalize(server: RemoteOpenAIServer, model_name: str):
input_text = ["The chef prepared a delicious meal."]
async def get_outputs(normalize):
request_args = {
"model": MODEL_NAME,
"input": input_text,
"encoding_format": "float",
"normalize": normalize,
}
response = requests.post(server.url_for("v1/embeddings"), json=request_args)
outputs = response.json()
return torch.tensor([x["embedding"] for x in outputs["data"]])
default = await get_outputs(normalize=None)
w_normal = await get_outputs(normalize=True)
wo_normal = await get_outputs(normalize=False)
assert torch.allclose(default, w_normal, atol=1e-2), "Default should use normal."
assert not torch.allclose(w_normal, wo_normal, atol=1e-2), (
"wo_normal should not use normal."
)
assert torch.allclose(w_normal, F.normalize(wo_normal, p=2, dim=-1), atol=1e-2), (
"w_normal should be close to normal(wo_normal)."
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_embed(server: RemoteOpenAIServer, model_name: str):
task = "embed"
input_text = ["The chef prepared a delicious meal."]
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_text,
"encoding_format": "float",
"task": task,
},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 384
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_token_embed(server: RemoteOpenAIServer, model_name: str):
task = "token_embed"
input_text = ["The chef prepared a delicious meal."]
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_text,
"encoding_format": "float",
"task": task,
},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 11
assert len(poolings.data[0].data[0]) == 384
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("task", ["classify", "token_classify", "plugin"])
async def test_pooling_not_supported(
server: RemoteOpenAIServer, model_name: str, task: str
):
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": "test",
"encoding_format": "float",
"task": task,
},
)
assert response.json()["error"]["type"] == "BadRequestError"
assert response.json()["error"]["message"].startswith(
f"Task {task} is not supported"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_offline.py | tests/entrypoints/pooling/embed/test_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
import torch
import torch.nn.functional as F
from vllm import LLM, PoolingParams
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.platforms import current_platform
MODEL_NAME = "intfloat/multilingual-e5-small"
prompts = ["The chef prepared a delicious meal."]
@pytest.fixture(scope="module")
def llm():
# ROCm: Use FLEX_ATTENTION backend as it's the only attention backend
# that supports encoder-only models on ROCm.
attention_config = None
if current_platform.is_rocm():
attention_config = {"backend": "FLEX_ATTENTION"}
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=32768,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
enforce_eager=True,
seed=0,
attention_config=attention_config,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_token_embed(llm: LLM):
outputs = llm.encode(prompts, pooling_task="token_embed", use_tqdm=False)
multi_vector = outputs[0].outputs.data
assert multi_vector.shape == (11, 384)
def test_pooling_params(llm: LLM):
def get_outputs(normalize):
outputs = llm.embed(
prompts, pooling_params=PoolingParams(normalize=normalize), use_tqdm=False
)
return torch.tensor([x.outputs.embedding for x in outputs])
default = get_outputs(normalize=None)
w_normal = get_outputs(normalize=True)
wo_normal = get_outputs(normalize=False)
assert torch.allclose(default, w_normal, atol=1e-2), "Default should use normal."
assert not torch.allclose(w_normal, wo_normal, atol=1e-2), (
"wo_normal should not use normal."
)
assert torch.allclose(w_normal, F.normalize(wo_normal, p=2, dim=-1), atol=1e-2), (
"w_normal should be close to normal(wo_normal)."
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_online_vision.py | tests/entrypoints/pooling/embed/test_online_vision.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import pytest
import requests
from transformers import AutoProcessor
from tests.utils import VLLM_PATH, RemoteOpenAIServer
from vllm.entrypoints.pooling.embed.protocol import EmbeddingResponse
from vllm.multimodal.base import MediaWithBytes
from vllm.multimodal.utils import fetch_image
MODEL_NAME = "TIGER-Lab/VLM2Vec-Full"
MAXIMUM_IMAGES = 2
vlm2vec_jinja_path = VLLM_PATH / "examples/template_vlm2vec_phi3v.jinja"
assert vlm2vec_jinja_path.exists()
# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
TEST_IMAGE_ASSETS = [
"2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
"Grayscale_8bits_palette_sample_image.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/Grayscale_8bits_palette_sample_image.png",
"1280px-Venn_diagram_rgb.svg.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/1280px-Venn_diagram_rgb.svg.png",
"RGBA_comp.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/RGBA_comp.png",
]
@pytest.fixture(scope="module")
def server():
args = [
"--runner",
"pooling",
"--max-model-len",
"2048",
"--max-num-seqs",
"5",
"--enforce-eager",
"--trust-remote-code",
"--limit-mm-per-prompt",
json.dumps({"image": MAXIMUM_IMAGES}),
"--chat-template",
str(vlm2vec_jinja_path),
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
def get_hf_prompt_tokens(model_name, content, image_url):
processor = AutoProcessor.from_pretrained(
model_name, trust_remote_code=True, num_crops=4
)
placeholder = "<|image_1|> "
prompt = f"{placeholder}{content}"
image = fetch_image(image_url)
# Unwrap MediaWithBytes if present
if isinstance(image, MediaWithBytes):
image = image.media
images = [image]
inputs = processor(prompt, images, return_tensors="pt")
return inputs.input_ids.shape[1]
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
async def test_image_embedding(
server: RemoteOpenAIServer, model_name: str, image_url: str
):
content_text = "Represent the given image."
messages = [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": content_text},
],
}
]
response = requests.post(
server.url_for("v1/embeddings"),
json={"model": model_name, "messages": messages, "encoding_format": "float"},
)
response.raise_for_status()
embeddings = EmbeddingResponse.model_validate(response.json())
hf_prompt_tokens = get_hf_prompt_tokens(model_name, content_text, image_url)
assert embeddings.id is not None
assert len(embeddings.data) == 1
assert len(embeddings.data[0].embedding) == 3072
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens == hf_prompt_tokens
assert embeddings.usage.total_tokens == hf_prompt_tokens
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/conftest.py | tests/entrypoints/pooling/embed/conftest.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Pytest configuration for vLLM pooling embed tests."""
import warnings
import torch
from vllm.platforms import current_platform
def pytest_collection_modifyitems(config, items):
"""Configure ROCm-specific settings based on collected tests."""
if not current_platform.is_rocm():
return
# Disable Flash/MemEfficient SDP on ROCm to avoid HF Transformers
# accuracy issues: https://github.com/vllm-project/vllm/issues/30167
# TODO: Remove once ROCm SDP accuracy issues are resolved on HuggingFace
torch.backends.cuda.enable_flash_sdp(False)
torch.backends.cuda.enable_mem_efficient_sdp(False)
torch.backends.cuda.enable_math_sdp(True)
warnings.warn(
"ROCm: Disabled flash_sdp and mem_efficient_sdp, enabled math_sdp "
"to avoid HuggingFace Transformers accuracy issues",
UserWarning,
stacklevel=1,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/__init__.py | tests/entrypoints/pooling/embed/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_online_dimensions.py | tests/entrypoints/pooling/embed/test_online_dimensions.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Run `pytest tests/entrypoints/openai/test_embedding_dimensions.py`.
"""
import openai
import pytest
from tests.conftest import HfRunner
from tests.models.language.pooling.embed_utils import run_embedding_correctness_test
from tests.models.utils import EmbedModelInfo
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.embed.protocol import EmbeddingResponse
from vllm.platforms import current_platform
MODELS = [
EmbedModelInfo("intfloat/multilingual-e5-small", is_matryoshka=False),
EmbedModelInfo(
"Snowflake/snowflake-arctic-embed-m-v1.5",
is_matryoshka=True,
matryoshka_dimensions=[256],
),
]
input_texts = [
"The chef prepared a delicious meal.",
]
@pytest.fixture(scope="module", params=MODELS)
def model_info(request):
return request.param
@pytest.fixture(scope="module", params=["bfloat16"])
def dtype(request):
return request.param
@pytest.fixture(scope="module")
def server(model_info, dtype: str):
args = [
"--runner",
"pooling",
# use half precision for speed and memory savings in CI environment
"--dtype",
dtype,
"--enforce-eager",
"--max-model-len",
"512",
]
if model_info.name == "Snowflake/snowflake-arctic-embed-m-v1.5":
# Manually enable Matryoshka Embeddings
args.extend(
["--trust_remote_code", "--hf_overrides", '{"matryoshka_dimensions":[256]}']
)
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(model_info.name, args) as remote_server:
yield remote_server
@pytest.fixture(scope="module")
def hf_model(hf_runner, model_info, dtype: str):
with hf_runner(
model_info.name, dtype=dtype, is_sentence_transformer=True
) as hf_model:
yield hf_model
@pytest.mark.asyncio
async def test_matryoshka(
model_info: EmbedModelInfo, server: RemoteOpenAIServer, hf_model: HfRunner
):
client = server.get_async_client()
async def make_request_and_correctness_test(dimensions):
prompts = input_texts * 3
embedding_response = await client.embeddings.create(
model=model_info.name,
input=prompts,
dimensions=dimensions,
encoding_format="float",
)
embeddings = EmbeddingResponse.model_validate(
embedding_response.model_dump(mode="json")
)
assert embeddings.id is not None
assert len(embeddings.data) == 3
assert len(embeddings.data[0].embedding) > 0
assert embeddings.usage.completion_tokens == 0
assert embeddings.usage.prompt_tokens > 0
assert embeddings.usage.total_tokens > 0
if dimensions is not None:
assert len(embeddings.data[0].embedding) == dimensions
vllm_outputs = [d.embedding for d in embeddings.data]
run_embedding_correctness_test(hf_model, prompts, vllm_outputs, dimensions)
if model_info.is_matryoshka:
valid_dimensions: list[int | None] = [None]
if model_info.matryoshka_dimensions is not None:
valid_dimensions += model_info.matryoshka_dimensions[:2]
for dimensions in valid_dimensions:
await make_request_and_correctness_test(dimensions)
invalid_dimensions: list[int | None] = [-1]
if model_info.matryoshka_dimensions is not None:
assert 5 not in model_info.matryoshka_dimensions
invalid_dimensions.append(5)
for dimensions in invalid_dimensions:
with pytest.raises(openai.BadRequestError):
await make_request_and_correctness_test(dimensions)
else:
for dimensions in [None]:
await make_request_and_correctness_test(dimensions)
for dimensions in [-1, 16]:
with pytest.raises(openai.BadRequestError):
await make_request_and_correctness_test(dimensions)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/embed/test_correctness_mteb.py | tests/entrypoints/pooling/embed/test_correctness_mteb.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import pytest
from tests.models.language.pooling_mteb_test.mteb_embed_utils import (
MTEB_EMBED_TASKS,
MTEB_EMBED_TOL,
OpenAIClientMtebEncoder,
run_mteb_embed_task,
)
from tests.utils import RemoteOpenAIServer
from vllm.platforms import current_platform
os.environ["VLLM_LOGGING_LEVEL"] = "WARNING"
MODEL_NAME = "intfloat/e5-small"
MAIN_SCORE = 0.7422994752439667
@pytest.fixture(scope="module")
def server():
args = ["--runner", "pooling", "--enforce-eager", "--disable-uvicorn-access-log"]
# ROCm: Use Flex Attention to support encoder-only self-attention.
if current_platform.is_rocm():
args.extend(["--attention-backend", "FLEX_ATTENTION"])
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
def test_mteb_embed(server):
client = server.get_client()
encoder = OpenAIClientMtebEncoder(MODEL_NAME, client)
vllm_main_score = run_mteb_embed_task(encoder, MTEB_EMBED_TASKS)
st_main_score = MAIN_SCORE
print("VLLM main score: ", vllm_main_score)
print("SentenceTransformer main score: ", st_main_score)
print("Difference: ", st_main_score - vllm_main_score)
# We are not concerned that the vllm mteb results are better
# than SentenceTransformers, so we only perform one-sided testing.
assert st_main_score - vllm_main_score < MTEB_EMBED_TOL
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/classify/test_online.py | tests/entrypoints/pooling/classify/test_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import requests
import torch
import torch.nn.functional as F
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.classify.protocol import ClassificationResponse
from vllm.entrypoints.pooling.pooling.protocol import PoolingResponse
MODEL_NAME = "jason9693/Qwen2.5-1.5B-apeach"
DTYPE = "float32" # Use float32 to avoid NaN issue
@pytest.fixture(scope="module")
def server():
args = [
"--enforce-eager",
"--max-model-len",
"512",
"--dtype",
DTYPE,
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_single_input_classification(server: RemoteOpenAIServer, model_name: str):
input_text = "This product was excellent and exceeded my expectations"
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": input_text},
)
classification_response.raise_for_status()
output = ClassificationResponse.model_validate(classification_response.json())
assert output.object == "list"
assert output.model == MODEL_NAME
assert len(output.data) == 1
assert hasattr(output.data[0], "label")
assert hasattr(output.data[0], "probs")
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_add_special_tokens_false(server: RemoteOpenAIServer, model_name: str):
response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": "hello", "add_special_tokens": False},
)
response.raise_for_status()
ClassificationResponse.model_validate(response.json())
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_multiple_inputs_classification(server: RemoteOpenAIServer, model_name: str):
input_texts = [
"The product arrived on time and works perfectly",
"I'm very satisfied with my purchase, would buy again",
"The customer service was helpful and resolved my issue quickly",
"This product broke after one week, terrible quality",
"I'm very disappointed with this purchase, complete waste of money",
"The customer service was rude and unhelpful",
]
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": input_texts},
)
output = ClassificationResponse.model_validate(classification_response.json())
assert len(output.data) == len(input_texts)
for i, item in enumerate(output.data):
assert item.index == i
assert hasattr(item, "label")
assert hasattr(item, "probs")
assert len(item.probs) == item.num_classes
assert item.label in ["Default", "Spoiled"]
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_truncate_prompt_tokens(server: RemoteOpenAIServer, model_name: str):
long_text = "hello " * 600
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": long_text, "truncate_prompt_tokens": 5},
)
classification_response.raise_for_status()
output = ClassificationResponse.model_validate(classification_response.json())
assert len(output.data) == 1
assert output.data[0].index == 0
assert hasattr(output.data[0], "probs")
assert output.usage.prompt_tokens == 5
assert output.usage.total_tokens == 5
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_invalid_truncate_prompt_tokens_error(
server: RemoteOpenAIServer, model_name: str
):
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": "test", "truncate_prompt_tokens": 513},
)
error = classification_response.json()
assert classification_response.status_code == 400
assert "truncate_prompt_tokens" in error["error"]["message"]
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_empty_input_error(server: RemoteOpenAIServer, model_name: str):
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": ""},
)
error = classification_response.json()
assert classification_response.status_code == 400
assert "error" in error
@pytest.mark.parametrize("model_name", [MODEL_NAME])
def test_batch_classification_empty_list(server: RemoteOpenAIServer, model_name: str):
classification_response = requests.post(
server.url_for("classify"),
json={"model": model_name, "input": []},
)
classification_response.raise_for_status()
output = ClassificationResponse.model_validate(classification_response.json())
assert output.object == "list"
assert isinstance(output.data, list)
assert len(output.data) == 0
@pytest.mark.asyncio
async def test_invocations(server: RemoteOpenAIServer):
request_args = {
"model": MODEL_NAME,
"input": "This product was excellent and exceeded my expectations",
}
classification_response = requests.post(
server.url_for("classify"), json=request_args
)
classification_response.raise_for_status()
invocation_response = requests.post(
server.url_for("invocations"), json=request_args
)
invocation_response.raise_for_status()
classification_output = classification_response.json()
invocation_output = invocation_response.json()
assert classification_output.keys() == invocation_output.keys()
for classification_data, invocation_data in zip(
classification_output["data"], invocation_output["data"]
):
assert classification_data.keys() == invocation_data.keys()
assert classification_data["probs"] == pytest.approx(
invocation_data["probs"], rel=0.01
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_use_activation(server: RemoteOpenAIServer, model_name: str):
input_text = ["This product was excellent and exceeded my expectations"]
async def get_outputs(use_activation):
response = requests.post(
server.url_for("classify"),
json={
"model": model_name,
"input": input_text,
"use_activation": use_activation,
},
)
outputs = response.json()
return torch.tensor([x["probs"] for x in outputs["data"]])
default = await get_outputs(use_activation=None)
w_activation = await get_outputs(use_activation=True)
wo_activation = await get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(F.softmax(wo_activation, dim=-1), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_score(server: RemoteOpenAIServer, model_name: str):
# score api is only enabled for num_labels == 1.
response = requests.post(
server.url_for("score"),
json={
"model": model_name,
"text_1": "ping",
"text_2": "pong",
},
)
assert response.json()["error"]["type"] == "BadRequestError"
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_rerank(server: RemoteOpenAIServer, model_name: str):
# rerank api is only enabled for num_labels == 1.
response = requests.post(
server.url_for("rerank"),
json={
"model": model_name,
"query": "ping",
"documents": ["pong"],
},
)
assert response.json()["error"]["type"] == "BadRequestError"
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_classify(server: RemoteOpenAIServer, model_name: str):
input_text = "This product was excellent and exceeded my expectations"
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_text,
"encoding_format": "float",
"task": "classify",
},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 2
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_pooling_token_classify(server: RemoteOpenAIServer, model_name: str):
task = "token_classify"
input_text = ["This product was excellent and exceeded my expectations"]
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": input_text,
"encoding_format": "float",
"task": task,
},
)
poolings = PoolingResponse.model_validate(response.json())
assert len(poolings.data) == 1
assert len(poolings.data[0].data) == 8
assert len(poolings.data[0].data[0]) == 2
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("task", ["embed", "token_embed", "plugin"])
async def test_pooling_not_supported(
server: RemoteOpenAIServer, model_name: str, task: str
):
response = requests.post(
server.url_for("pooling"),
json={
"model": model_name,
"input": "test",
"encoding_format": "float",
"task": task,
},
)
assert response.json()["error"]["type"] == "BadRequestError"
assert response.json()["error"]["message"].startswith(
f"Task {task} is not supported"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/classify/test_offline.py | tests/entrypoints/pooling/classify/test_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
import torch
from tests.models.utils import softmax
from vllm import LLM, PoolingParams
from vllm.distributed import cleanup_dist_env_and_memory
MODEL_NAME = "jason9693/Qwen2.5-1.5B-apeach"
prompts = ["The chef prepared a delicious meal."]
@pytest.fixture(scope="module")
def llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=32768,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
enforce_eager=True,
seed=0,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_pooling_params(llm: LLM):
def get_outputs(use_activation):
outputs = llm.classify(
prompts,
pooling_params=PoolingParams(use_activation=use_activation),
use_tqdm=False,
)
return torch.tensor([x.outputs.probs for x in outputs])
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
assert not torch.allclose(w_activation, wo_activation, atol=1e-2), (
"wo_activation should not use activation."
)
assert torch.allclose(softmax(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
@pytest.mark.skip_global_cleanup
def test_token_classify(llm: LLM):
llm.encode(prompts, pooling_task="token_classify", use_tqdm=False)
def test_score_api(llm: LLM):
err_msg = "Score API is only enabled for num_labels == 1."
with pytest.raises(ValueError, match=err_msg):
llm.score("ping", "pong", use_tqdm=False)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/classify/test_online_vision.py | tests/entrypoints/pooling/classify/test_online_vision.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import pytest
import requests
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.classify.protocol import ClassificationResponse
VLM_MODEL_NAME = "muziyongshixin/Qwen2.5-VL-7B-for-VideoCls"
MAXIMUM_VIDEOS = 1
TEST_VIDEO_URL = "https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4"
HF_OVERRIDES = {
"text_config": {
"architectures": ["Qwen2_5_VLForSequenceClassification"],
},
}
@pytest.fixture(scope="module")
def server_vlm_classify():
args = [
"--runner",
"pooling",
"--max-model-len",
"5000",
"--enforce-eager",
"--limit-mm-per-prompt",
json.dumps({"video": MAXIMUM_VIDEOS}),
]
with RemoteOpenAIServer(
VLM_MODEL_NAME, args, override_hf_configs=HF_OVERRIDES
) as remote_server:
yield remote_server
@pytest.mark.parametrize("model_name", [VLM_MODEL_NAME])
def test_classify_accepts_chat_text_only(
server_vlm_classify: RemoteOpenAIServer, model_name: str
) -> None:
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Please classify this text request."},
],
}
]
response = requests.post(
server_vlm_classify.url_for("classify"),
json={"model": model_name, "messages": messages},
)
response.raise_for_status()
output = ClassificationResponse.model_validate(response.json())
assert output.object == "list"
assert output.model == model_name
assert len(output.data) == 1
assert len(output.data[0].probs) == 2
assert output.usage.prompt_tokens == 22
@pytest.mark.parametrize("model_name", [VLM_MODEL_NAME])
def test_classify_accepts_chat_video_url(
server_vlm_classify: RemoteOpenAIServer, model_name: str
) -> None:
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Please classify this video."},
{"type": "video_url", "video_url": {"url": TEST_VIDEO_URL}},
],
}
]
response = requests.post(
server_vlm_classify.url_for("classify"),
json={"model": model_name, "messages": messages},
)
response.raise_for_status()
output = ClassificationResponse.model_validate(response.json())
assert output.object == "list"
assert output.model == model_name
assert len(output.data) == 1
assert len(output.data[0].probs) == 2
assert output.usage.prompt_tokens == 4807
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/pooling/classify/__init__.py | tests/entrypoints/pooling/classify/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_chat.py | tests/entrypoints/llm/test_chat.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
from vllm import LLM
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.sampling_params import SamplingParams
from ..openai.test_vision import TEST_IMAGE_ASSETS
@pytest.fixture(scope="function")
def text_llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, seed=0)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.fixture(scope="function")
def llm_for_failure_test():
"""
Fixture for testing issue #26081.
Uses a small max_model_len to easily trigger length errors.
"""
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model="meta-llama/Llama-3.2-1B-Instruct",
enforce_eager=True,
seed=0,
max_model_len=128,
disable_log_stats=True,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
def test_chat(text_llm):
prompt1 = "Explain the concept of entropy."
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": prompt1},
]
outputs = text_llm.chat(messages)
assert len(outputs) == 1
def test_multi_chat(text_llm):
prompt1 = "Explain the concept of entropy."
prompt2 = "Explain what among us is."
conversation1 = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": prompt1},
]
conversation2 = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": prompt2},
]
messages = [conversation1, conversation2]
outputs = text_llm.chat(messages)
assert len(outputs) == 2
@pytest.fixture(scope="function")
def vision_llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model="microsoft/Phi-3.5-vision-instruct",
max_model_len=4096,
max_num_seqs=5,
enforce_eager=True,
trust_remote_code=True,
limit_mm_per_prompt={"image": 2},
seed=0,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.parametrize(
"image_urls", [[TEST_IMAGE_ASSETS[0], TEST_IMAGE_ASSETS[1]]], indirect=True
)
def test_chat_multi_image(vision_llm, image_urls: list[str]):
messages = [
{
"role": "user",
"content": [
*(
{"type": "image_url", "image_url": {"url": image_url}}
for image_url in image_urls
),
{"type": "text", "text": "What's in this image?"},
],
}
]
outputs = vision_llm.chat(messages)
assert len(outputs) >= 0
def test_llm_chat_tokenization_no_double_bos(text_llm):
"""
LLM.chat() should not add special tokens when using chat templates.
Check we get a single BOS token for llama chat.
"""
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
outputs = text_llm.chat(messages)
assert len(outputs) == 1
prompt_token_ids = outputs[0].prompt_token_ids
assert prompt_token_ids is not None
bos_token = text_llm.get_tokenizer().bos_token_id
# Ensure we have a single BOS
assert prompt_token_ids[0] == bos_token
assert prompt_token_ids[1] != bos_token, "Double BOS"
@pytest.fixture(scope="function")
def thinking_llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model="Qwen/Qwen3-0.6B",
max_model_len=4096,
enforce_eager=True,
seed=0,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.parametrize("enable_thinking", [True, False])
def test_chat_extra_kwargs(thinking_llm, enable_thinking):
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "What is 1+1?"},
]
outputs = thinking_llm.chat(
messages,
chat_template_kwargs={"enable_thinking": enable_thinking},
)
assert len(outputs) == 1
prompt_token_ids = outputs[0].prompt_token_ids
assert prompt_token_ids is not None
think_id = thinking_llm.get_tokenizer().get_vocab()["<think>"]
if enable_thinking:
assert think_id not in prompt_token_ids
else:
# The chat template includes dummy thinking process
assert think_id in prompt_token_ids
def test_chat_batch_failure_cleanup(llm_for_failure_test):
"""
Tests that if a batch call to llm.chat() fails mid-way
(e.g., due to one invalid prompt), the requests that
were already enqueued are properly aborted and do not
pollute the queue for subsequent calls.
(Fixes Issue #26081)
"""
llm = llm_for_failure_test
valid_msg = [{"role": "user", "content": "Hello"}]
long_text = "This is a very long text to test the error " * 50
invalid_msg = [{"role": "user", "content": long_text}]
batch_1 = [
valid_msg,
valid_msg,
invalid_msg,
]
batch_2 = [
valid_msg,
valid_msg,
]
sampling_params = SamplingParams(temperature=0, max_tokens=10)
with pytest.raises(ValueError, match="longer than the maximum model length"):
llm.chat(batch_1, sampling_params=sampling_params)
outputs_2 = llm.chat(batch_2, sampling_params=sampling_params)
assert len(outputs_2) == len(batch_2)
assert llm.llm_engine.get_num_unfinished_requests() == 0
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_accuracy.py | tests/entrypoints/llm/test_accuracy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This file test accuracy of the vLLM server via LMEval.
It uses local-completions, which interacts with vLLM
through the OAI API with N concurrent connections.
This simulates real work usage of the API and makes
sure that the zmq frontend mp RPC message passing and
AsyncLLMEngine are working correctly.
"""
import lm_eval
import pytest
from vllm.platforms import current_platform
MODEL_NAMES = [
"Qwen/Qwen3-1.7B",
"google/gemma-3-1b-it",
]
FP8_KV_MODEL_NAMES = [
"Qwen/Qwen3-1.7B",
]
NUM_CONCURRENT = 500
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
EXPECTED_VALUES = {
"Qwen/Qwen3-1.7B": 0.68,
"google/gemma-3-1b-it": 0.25,
}
def run_test(model_name, more_args=None):
"""Run the end to end accuracy test."""
model_args = f"pretrained={model_name},max_model_len=4096"
if more_args is not None:
model_args = "{},{}".format(model_args, more_args)
results = lm_eval.simple_evaluate(
model="vllm",
model_args=model_args,
tasks="gsm8k",
batch_size="auto",
)
measured_value = results["results"][TASK][FILTER]
assert model_name in EXPECTED_VALUES, (
f"Cannot find the expected value for the model {model_name=}"
)
expected_value = EXPECTED_VALUES[model_name]
assert (
measured_value - RTOL < expected_value
and measured_value + RTOL > expected_value
), f"Expected: {expected_value} | Measured: {measured_value}"
# TODO: [AlexM] Fix it with new CI/CD tests
TPU_TP_TEST_STR = "" # "tensor_parallel_size=4"
@pytest.mark.parametrize("model", MODEL_NAMES)
def test_lm_eval_accuracy_v1_engine(model):
"""Run with the V1 Engine."""
more_args = None
if current_platform.is_tpu():
# Limit compilation time for TPU V1
more_args = "max_model_len=2048,max_num_seqs=64"
# Add TP test (if provided)
if TPU_TP_TEST_STR:
more_args += ",{}".format(TPU_TP_TEST_STR)
run_test(model, more_args)
@pytest.mark.parametrize("model", FP8_KV_MODEL_NAMES)
def test_lm_eval_accuracy_v1_engine_fp8_kv_cache(model):
"""Run with the V1 Engine."""
more_args = None
if current_platform.is_tpu():
# Limit compilation time for TPU V1
more_args = "max_model_len=2048,max_num_seqs=128,kv_cache_dtype=fp8"
# Add TP test (if provided)
if TPU_TP_TEST_STR:
more_args += ",{}".format(TPU_TP_TEST_STR)
run_test(model, more_args)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/__init__.py | tests/entrypoints/llm/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_collective_rpc.py | tests/entrypoints/llm/test_collective_rpc.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from vllm import LLM
from ...utils import create_new_process_for_each_test
@pytest.mark.parametrize("tp_size", [1, 2])
@pytest.mark.parametrize("backend", ["mp", "ray"])
@create_new_process_for_each_test()
def test_collective_rpc(tp_size, backend, monkeypatch):
if torch.cuda.device_count() < tp_size:
pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}")
if tp_size == 1 and backend == "ray":
pytest.skip("Skip duplicate test case")
if tp_size == 1:
backend = None
# intentionally define the method and class in the test function,
# to test if they can be serialized and sent to the workers
def echo_rank(self):
return self.rank
monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
llm = LLM(
model="hmellor/tiny-random-LlamaForCausalLM",
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=tp_size,
distributed_executor_backend=backend,
)
assert llm.collective_rpc(echo_rank) == list(range(tp_size))
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_gpu_utilization.py | tests/entrypoints/llm/test_gpu_utilization.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm import LLM, SamplingParams
def test_gpu_memory_utilization():
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# makes sure gpu_memory_utilization is per-instance limit,
# not a global limit
llms = [
LLM(model="facebook/opt-125m", gpu_memory_utilization=0.3, enforce_eager=True)
for i in range(3)
]
for llm in llms:
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_mm_cache_stats.py | tests/entrypoints/llm/test_mm_cache_stats.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import logging
import pytest
import regex as re
from vllm import LLM
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
from vllm.v1.metrics import loggers as stat_loggers
from vllm.v1.metrics.reader import Counter, Metric
from ..openai.test_vision import TEST_IMAGE_ASSETS
def _make_messages(image_url: str) -> list[ChatCompletionMessageParam]:
return [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": image_url},
},
],
}
]
def _get_counter_value(metrics: list[Metric], name: str):
metric = next(m for m in metrics if m.name == name)
assert isinstance(metric, Counter)
return metric.value
def _get_mm_cache_stats(metrics: list[Metric]):
mm_cache_queries = _get_counter_value(metrics, "vllm:mm_cache_queries")
mm_cache_hits = _get_counter_value(metrics, "vllm:mm_cache_hits")
return mm_cache_queries, mm_cache_hits
def _get_mm_cache_log(llm: LLM, caplog_vllm: pytest.LogCaptureFixture) -> float:
caplog_vllm.clear()
with caplog_vllm.at_level(logging.INFO, logger=stat_loggers.__name__):
llm.llm_engine.do_log_stats()
assert len(caplog_vllm.records) == 1
msg = caplog_vllm.records[0].getMessage()
assert "MM cache hit rate" in msg
match = re.search(r"MM cache hit rate: ([0-9.]+)%", msg)
assert match is not None
return float(match.group(1))
@pytest.mark.parametrize("image_urls", [TEST_IMAGE_ASSETS[:2]], indirect=True)
@pytest.mark.parametrize("mm_processor_cache_type", ["lru", "shm"])
def test_mm_cache_stats(
num_gpus_available,
image_urls,
mm_processor_cache_type,
caplog_vllm,
):
llm = LLM(
model="llava-hf/llava-1.5-7b-hf",
max_model_len=4096,
max_num_seqs=5,
enforce_eager=True,
mm_processor_cache_type=mm_processor_cache_type,
disable_log_stats=False,
limit_mm_per_prompt={"image": 2},
)
llm.chat(_make_messages(image_urls[0]))
assert _get_mm_cache_stats(llm.get_metrics()) == (1, 0)
assert _get_mm_cache_log(llm, caplog_vllm) == pytest.approx(0.0)
llm.chat(_make_messages(image_urls[1]))
assert _get_mm_cache_stats(llm.get_metrics()) == (2, 0)
assert _get_mm_cache_log(llm, caplog_vllm) == pytest.approx(0.0)
llm.chat(_make_messages(image_urls[0]))
assert _get_mm_cache_stats(llm.get_metrics()) == (3, 1)
assert _get_mm_cache_log(llm, caplog_vllm) == pytest.approx(33.3)
# NOTE: This only resets hit rate stats in CachingMetrics
# The raw queries and hits counts remain unaffected
llm.reset_mm_cache()
llm.chat(_make_messages(image_urls[0]))
assert _get_mm_cache_stats(llm.get_metrics()) == (4, 1)
assert _get_mm_cache_log(llm, caplog_vllm) == pytest.approx(0.0)
llm.chat(_make_messages(image_urls[1]))
assert _get_mm_cache_stats(llm.get_metrics()) == (5, 1)
assert _get_mm_cache_log(llm, caplog_vllm) == pytest.approx(0.0)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_prompt_validation.py | tests/entrypoints/llm/test_prompt_validation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from vllm import LLM
def test_empty_prompt():
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
with pytest.raises(ValueError, match="decoder prompt cannot be empty"):
llm.generate([""])
def test_out_of_vocab_token():
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
with pytest.raises(ValueError, match="out of vocabulary"):
llm.generate({"prompt_token_ids": [999999]})
def test_require_mm_embeds():
llm = LLM(
model="llava-hf/llava-1.5-7b-hf",
enforce_eager=True,
enable_mm_embeds=False,
)
with pytest.raises(ValueError, match="--enable-mm-embeds"):
llm.generate(
{
"prompt": "<image>",
"multi_modal_data": {"image": torch.empty(1, 1, 1)},
}
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/entrypoints/llm/test_generate.py | tests/entrypoints/llm/test_generate.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
from vllm import LLM, SamplingParams
from vllm.distributed import cleanup_dist_env_and_memory
MODEL_NAME = "distilbert/distilgpt2"
PROMPTS = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
TOKEN_IDS = [
[0],
[0, 1],
[0, 2, 1],
[0, 3, 1, 2],
]
@pytest.fixture(scope="module")
def llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model=MODEL_NAME,
max_num_batched_tokens=4096,
tensor_parallel_size=1,
gpu_memory_utilization=0.10,
enforce_eager=True,
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_multiple_sampling_params(llm: LLM):
sampling_params = [
SamplingParams(temperature=0.01, top_p=0.95),
SamplingParams(temperature=0.3, top_p=0.95),
SamplingParams(temperature=0.7, top_p=0.95),
SamplingParams(temperature=0.99, top_p=0.95),
]
# Multiple SamplingParams should be matched with each prompt
outputs = llm.generate(PROMPTS, sampling_params=sampling_params)
assert len(PROMPTS) == len(outputs)
# Exception raised, if the size of params does not match the size of prompts
with pytest.raises(ValueError):
outputs = llm.generate(PROMPTS, sampling_params=sampling_params[:3])
# Single SamplingParams should be applied to every prompt
single_sampling_params = SamplingParams(temperature=0.3, top_p=0.95)
outputs = llm.generate(PROMPTS, sampling_params=single_sampling_params)
assert len(PROMPTS) == len(outputs)
# sampling_params is None, default params should be applied
outputs = llm.generate(PROMPTS, sampling_params=None)
assert len(PROMPTS) == len(outputs)
def test_multiple_priority(llm: LLM):
# Generate works when priority is None
outputs = llm.generate(PROMPTS, sampling_params=None, priority=None)
assert len(PROMPTS) == len(outputs)
# Generate works when length of priority is same as the len(PROMPTS)
outputs = llm.generate(PROMPTS, sampling_params=None, priority=[0] * len(PROMPTS))
assert len(PROMPTS) == len(outputs)
# Exception raised, if the length of priority does not match the length of prompts
with pytest.raises(ValueError):
outputs = llm.generate(
PROMPTS, sampling_params=None, priority=[0] * (len(PROMPTS) - 1)
)
# Exception raised, if the priority list is empty
with pytest.raises(ValueError):
outputs = llm.generate(PROMPTS, sampling_params=None, priority=[])
def test_max_model_len():
max_model_len = 20
llm = LLM(
model=MODEL_NAME,
max_model_len=max_model_len,
gpu_memory_utilization=0.10,
enforce_eager=True, # reduce test time
)
sampling_params = SamplingParams(max_tokens=max_model_len + 10)
outputs = llm.generate(PROMPTS, sampling_params)
for output in outputs:
num_total_tokens = len(output.prompt_token_ids) + len(
output.outputs[0].token_ids
)
# Total tokens must not exceed max_model_len.
# It can be less if generation finishes due to other reasons (e.g., EOS)
# before reaching the absolute model length limit.
assert num_total_tokens <= max_model_len
def test_log_stats():
llm = LLM(
model=MODEL_NAME,
disable_log_stats=False,
gpu_memory_utilization=0.10,
enforce_eager=True, # reduce test time
)
outputs = llm.generate(PROMPTS, sampling_params=None)
# disable_log_stats is False, every output should have metrics
assert all(output.metrics is not None for output in outputs)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/test_outputs.py | tests/v1/test_outputs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest import TestCase
from vllm.v1.outputs import LogprobsLists
class TestLogprobsLists(TestCase):
def setUp(self):
self.logprobsLists = LogprobsLists(
logprob_token_ids=[
[1, 2], # Request 0 token 0
[3, 4], # Request 0 token 1
[5, 6], # Request 1 token 0
[7, 8], # Request 1 token 1
[9, 10], # Request 1 token 2
[11, 12], # Request 2 token 0
[13, 14], # Request 2 token 1
[15, 16], # Request 2 token 2
[17, 18], # Request 2 token 3
],
logprobs=[
[0.1, 0.2],
[0.3, 0.4],
[0.5, 0.6],
[0.7, 0.8],
[0.9, 1.0],
[1.1, 1.2],
[1.3, 1.4],
[1.5, 1.6],
[1.7, 1.8],
],
sampled_token_ranks=[1, 3, 5, 7, 9, 11, 13, 15, 17],
cu_num_generated_tokens=[0, 2, 5, 9],
)
def test_slice_without_cu_num_generated_tokens(self):
"""Test slicing without cu_num_generated_tokens"""
logprobsLists = LogprobsLists(
logprob_token_ids=[[1], [2], [3]],
logprobs=[[0.1], [0.2], [0.3]],
sampled_token_ranks=[1, 2, 3],
cu_num_generated_tokens=None,
)
sliced = logprobsLists.slice_request(1, num_positions=2)
assert sliced.logprob_token_ids == [[2], [3]]
assert sliced.logprobs == [[0.2], [0.3]]
assert sliced.sampled_token_ranks == [2, 3]
assert sliced.cu_num_generated_tokens is None
def test_slice_from_start(self):
"""Test slicing from the start position"""
sliced = self.logprobsLists.slice_request(0, num_positions=5)
assert len(sliced.logprob_token_ids) == 5
assert sliced.logprob_token_ids == [
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
]
assert sliced.cu_num_generated_tokens is None
def test_slice_from_middle(self):
"""Test slicing from the middle position"""
sliced = self.logprobsLists.slice_request(1, num_positions=7)
assert len(sliced.logprob_token_ids) == 7
assert sliced.logprob_token_ids == [
[5, 6],
[7, 8],
[9, 10],
[11, 12],
[13, 14],
[15, 16],
[17, 18],
]
assert sliced.cu_num_generated_tokens is None
def test_slice_single_request(self):
"""Test slicing a single request"""
sliced = self.logprobsLists.slice_request(1, num_positions=3)
assert len(sliced.logprob_token_ids) == 3
assert sliced.logprob_token_ids == [[5, 6], [7, 8], [9, 10]]
assert sliced.cu_num_generated_tokens is None
def test_slice_last_request(self):
"""Test slicing the last request"""
sliced = self.logprobsLists.slice_request(2, num_positions=4)
assert len(sliced.logprob_token_ids) == 4
assert sliced.logprob_token_ids == [[11, 12], [13, 14], [15, 16], [17, 18]]
assert sliced.cu_num_generated_tokens is None
def test_slice_all_requests(self):
"""Test slicing all requests (full slice)"""
sliced = self.logprobsLists.slice_request(0, num_positions=9)
assert len(sliced.logprob_token_ids) == 9 # All tokens
assert sliced.logprob_token_ids == self.logprobsLists.logprob_token_ids
assert sliced.cu_num_generated_tokens is None
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/test_serial_utils.py | tests/v1/test_serial_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections import UserDict
from dataclasses import dataclass
import msgspec
import numpy as np
import pytest
import torch
from vllm.multimodal.inputs import (
MultiModalBatchedField,
MultiModalFieldElem,
MultiModalFlatField,
MultiModalKwargsItem,
MultiModalKwargsItems,
MultiModalSharedField,
NestedTensors,
)
from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder
pytestmark = pytest.mark.cpu_test
class UnrecognizedType(UserDict):
def __init__(self, an_int: int):
super().__init__()
self.an_int = an_int
@dataclass
class MyType:
tensor1: torch.Tensor
a_string: str
list_of_tensors: list[torch.Tensor]
numpy_array: np.ndarray
unrecognized: UnrecognizedType
small_f_contig_tensor: torch.Tensor
large_f_contig_tensor: torch.Tensor
small_non_contig_tensor: torch.Tensor
large_non_contig_tensor: torch.Tensor
empty_tensor: torch.Tensor
def test_encode_decode(monkeypatch: pytest.MonkeyPatch):
"""Test encode/decode loop with zero-copy tensors."""
with monkeypatch.context() as m:
m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
obj = MyType(
tensor1=torch.randint(low=0, high=100, size=(1024,), dtype=torch.int32),
a_string="hello",
list_of_tensors=[
torch.rand((1, 10), dtype=torch.float32),
torch.rand((3, 5, 4000), dtype=torch.float64),
torch.tensor(1984), # test scalar too
# Make sure to test bf16 which numpy doesn't support.
torch.rand((3, 5, 1000), dtype=torch.bfloat16),
torch.tensor(
[float("-inf"), float("inf")] * 1024, dtype=torch.bfloat16
),
],
numpy_array=np.arange(512),
unrecognized=UnrecognizedType(33),
small_f_contig_tensor=torch.rand(5, 4).t(),
large_f_contig_tensor=torch.rand(1024, 4).t(),
small_non_contig_tensor=torch.rand(2, 4)[:, 1:3],
large_non_contig_tensor=torch.rand(1024, 512)[:, 10:20],
empty_tensor=torch.empty(0),
)
encoder = MsgpackEncoder(size_threshold=256)
decoder = MsgpackDecoder(MyType)
encoded = encoder.encode(obj)
# There should be the main buffer + 4 large tensor buffers
# + 1 large numpy array. "large" is <= 512 bytes.
# The two small tensors are encoded inline.
assert len(encoded) == 8
decoded: MyType = decoder.decode(encoded)
assert_equal(decoded, obj)
# Test encode_into case
preallocated = bytearray()
encoded2 = encoder.encode_into(obj, preallocated)
assert len(encoded2) == 8
assert encoded2[0] is preallocated
decoded2: MyType = decoder.decode(encoded2)
assert_equal(decoded2, obj)
class MyRequest(msgspec.Struct):
mm: list[MultiModalKwargsItems] | None
def test_multimodal_kwargs():
e1 = MultiModalFieldElem(
"audio",
"a0",
torch.zeros(1000, dtype=torch.bfloat16),
MultiModalBatchedField(),
)
e2 = MultiModalFieldElem(
"video",
"v0",
[torch.zeros(1000, dtype=torch.int8) for _ in range(4)],
MultiModalFlatField(
slices=[[slice(1, 2, 3), slice(4, 5, 6)], [slice(None, 2)]],
dim=0,
),
)
e3 = MultiModalFieldElem(
"image",
"i0",
torch.zeros(1000, dtype=torch.int32),
MultiModalSharedField(batch_size=4),
)
e4 = MultiModalFieldElem(
"image",
"i1",
torch.zeros(1000, dtype=torch.int32),
MultiModalFlatField(slices=[slice(1, 2, 3), slice(4, 5, 6)], dim=2),
)
audio = MultiModalKwargsItem.from_elems([e1])
video = MultiModalKwargsItem.from_elems([e2])
image = MultiModalKwargsItem.from_elems([e3, e4])
mm = MultiModalKwargsItems.from_seq([audio, video, image])
# pack mm kwargs into a mock request so that it can be decoded properly
req = MyRequest([mm])
encoder = MsgpackEncoder()
decoder = MsgpackDecoder(MyRequest)
encoded = encoder.encode(req)
assert len(encoded) == 8
total_len = sum(memoryview(x).cast("B").nbytes for x in encoded)
# expected total encoding length, should be 14395, +-20 for minor changes
assert 14375 <= total_len <= 14425
decoded = decoder.decode(encoded).mm[0]
assert isinstance(decoded, MultiModalKwargsItems)
# check all modalities were recovered and do some basic sanity checks
assert len(decoded) == 3
images = decoded["image"]
assert len(images) == 1
assert len(images[0].items()) == 2
assert list(images[0].keys()) == ["i0", "i1"]
# check the tensor contents and layout in the main dict
mm_data = mm.get_data()
decoded_data = decoded.get_data()
assert all(nested_equal(mm_data[k], decoded_data[k]) for k in mm_data)
def nested_equal(a: NestedTensors, b: NestedTensors):
if isinstance(a, torch.Tensor):
return torch.equal(a, b)
return all(nested_equal(x, y) for x, y in zip(a, b))
def assert_equal(obj1: MyType, obj2: MyType):
assert torch.equal(obj1.tensor1, obj2.tensor1)
assert obj1.a_string == obj2.a_string
assert all(
torch.equal(a, b) for a, b in zip(obj1.list_of_tensors, obj2.list_of_tensors)
)
assert np.array_equal(obj1.numpy_array, obj2.numpy_array)
assert obj1.unrecognized.an_int == obj2.unrecognized.an_int
assert torch.equal(obj1.small_f_contig_tensor, obj2.small_f_contig_tensor)
assert torch.equal(obj1.large_f_contig_tensor, obj2.large_f_contig_tensor)
assert torch.equal(obj1.small_non_contig_tensor, obj2.small_non_contig_tensor)
assert torch.equal(obj1.large_non_contig_tensor, obj2.large_non_contig_tensor)
assert torch.equal(obj1.empty_tensor, obj2.empty_tensor)
def test_dict_serialization():
"""Test encoding and decoding of a generic Python object using pickle."""
encoder = MsgpackEncoder()
decoder = MsgpackDecoder()
# Create a sample Python object
obj = {"key": "value", "number": 42}
# Encode the object
encoded = encoder.encode(obj)
# Decode the object
decoded = decoder.decode(encoded)
# Verify the decoded object matches the original
assert obj == decoded, "Decoded object does not match the original object."
def test_tensor_serialization():
"""Test encoding and decoding of a torch.Tensor."""
encoder = MsgpackEncoder()
decoder = MsgpackDecoder(torch.Tensor)
# Create a sample tensor
tensor = torch.rand(10, 10)
# Encode the tensor
encoded = encoder.encode(tensor)
# Decode the tensor
decoded = decoder.decode(encoded)
# Verify the decoded tensor matches the original
assert torch.allclose(tensor, decoded), (
"Decoded tensor does not match the original tensor."
)
def test_numpy_array_serialization():
"""Test encoding and decoding of a numpy array."""
encoder = MsgpackEncoder()
decoder = MsgpackDecoder(np.ndarray)
# Create a sample numpy array
array = np.random.rand(10, 10)
# Encode the numpy array
encoded = encoder.encode(array)
# Decode the numpy array
decoded = decoder.decode(encoded)
# Verify the decoded array matches the original
assert np.allclose(array, decoded), (
"Decoded numpy array does not match the original array."
)
class CustomClass:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return isinstance(other, CustomClass) and self.value == other.value
def test_custom_class_serialization_allowed_with_pickle(
monkeypatch: pytest.MonkeyPatch,
):
"""Test that serializing a custom class succeeds when allow_pickle=True."""
with monkeypatch.context() as m:
m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
encoder = MsgpackEncoder()
decoder = MsgpackDecoder(CustomClass)
obj = CustomClass("test_value")
# Encode the custom class
encoded = encoder.encode(obj)
# Decode the custom class
decoded = decoder.decode(encoded)
# Verify the decoded object matches the original
assert obj == decoded, "Decoded object does not match the original object."
def test_custom_class_serialization_disallowed_without_pickle():
"""Test that serializing a custom class fails when allow_pickle=False."""
encoder = MsgpackEncoder()
obj = CustomClass("test_value")
with pytest.raises(TypeError):
# Attempt to encode the custom class
encoder.encode(obj)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/test_request.py | tests/v1/test_request.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.v1.request import RequestStatus
def test_request_status_fmt_str():
"""Test that the string representation of RequestStatus is correct."""
assert f"{RequestStatus.WAITING}" == "WAITING"
assert f"{RequestStatus.WAITING_FOR_FSM}" == "WAITING_FOR_FSM"
assert f"{RequestStatus.WAITING_FOR_REMOTE_KVS}" == "WAITING_FOR_REMOTE_KVS"
assert f"{RequestStatus.RUNNING}" == "RUNNING"
assert f"{RequestStatus.PREEMPTED}" == "PREEMPTED"
assert f"{RequestStatus.FINISHED_STOPPED}" == "FINISHED_STOPPED"
assert f"{RequestStatus.FINISHED_LENGTH_CAPPED}" == "FINISHED_LENGTH_CAPPED"
assert f"{RequestStatus.FINISHED_ABORTED}" == "FINISHED_ABORTED"
assert f"{RequestStatus.FINISHED_IGNORED}" == "FINISHED_IGNORED"
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/utils.py | tests/v1/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import regex as re
import requests
from tests.utils import RemoteOpenAIServer
# Prometheus metrics utilities for testing
def get_prometheus_metrics(server: RemoteOpenAIServer) -> dict[str, dict[str, float]]:
"""Fetch and parse Prometheus metrics from the /metrics endpoint.
Returns:
Dict mapping metric names to their values grouped by labels.
For example: {"vllm:request_success": {
"engine=0": 5.0, "engine=1": 3.0}
}
"""
try:
response = requests.get(server.url_for("metrics"), timeout=10)
response.raise_for_status()
metrics: dict[str, dict[str, float]] = {}
# Regex patterns for Prometheus metrics
metric_with_labels = re.compile(
r"^([a-zA-Z_:][a-zA-Z0-9_:]*)\{([^}]*)\}\s+([\d\.\-\+e]+)$"
)
metric_simple = re.compile(r"^([a-zA-Z_:][a-zA-Z0-9_:]*)\s+([\d\.\-\+e]+)$")
for line in response.text.split("\n"):
line = line.strip()
# Skip comments and empty lines
if not line or line.startswith("#"):
continue
# Try to match metric with labels first
match = metric_with_labels.match(line)
if match:
metric_name, labels_part, value_str = match.groups()
try:
value = float(value_str)
if metric_name not in metrics:
metrics[metric_name] = {}
metrics[metric_name][f"{{{labels_part}}}"] = value
except ValueError:
continue
else:
# Try simple metric without labels
match = metric_simple.match(line)
if match:
metric_name, value_str = match.groups()
try:
value = float(value_str)
if metric_name not in metrics:
metrics[metric_name] = {}
metrics[metric_name][""] = value
except ValueError:
continue
return metrics
except Exception as e:
pytest.fail(f"Failed to fetch Prometheus metrics: {e}")
return {}
def get_engine_request_counts(metrics: dict[str, dict[str, float]]) -> dict[str, float]:
"""Extract request counts per engine from Prometheus metrics.
Returns:
Dict mapping engine indices to request counts.
For example: {"0": 15.0, "1": 12.0}
"""
engine_counts = {}
# Look for request success metrics with engine labels
success_metrics = metrics.get("vllm:request_success_total", {})
engine_pattern = re.compile(r'engine="([^"]*)"')
for labels, count in success_metrics.items():
# Extract engine ID from labels using regex
match = engine_pattern.search(labels)
if match:
engine_id = match.group(1)
if engine_id not in engine_counts:
engine_counts[engine_id] = 0.0
engine_counts[engine_id] += count
return engine_counts
def check_request_balancing(server: RemoteOpenAIServer, dp_size: int):
"""Check request balancing via Prometheus metrics if dp_size > 1.
Args:
server: The RemoteOpenAIServer instance
dp_size: Number of data parallel ranks
"""
if dp_size <= 1:
return
# Get metrics after all requests are completed
metrics = get_prometheus_metrics(server)
engine_counts = get_engine_request_counts(metrics)
# Check that multiple engines received requests
engines_with_requests = [
engine for engine, count in engine_counts.items() if count > 0
]
assert len(engines_with_requests) == dp_size, (
f"Expected requests to be distributed across multiple engines,"
f" but only engine(s) {engines_with_requests} received "
f"requests. Engine counts: {engine_counts}"
)
# Verify that the load is reasonably balanced
# (no engine should handle all requests)
total_requests = sum(engine_counts.values())
for count in engine_counts.values():
assert count > total_requests // (dp_size + 1), (
f"requests are imbalanced: {engine_counts}"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/__init__.py | tests/v1/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/test_oracle.py | tests/v1/test_oracle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.engine.arg_utils import AsyncEngineArgs
MODEL = "meta-llama/Llama-3.2-1B-Instruct"
def test_unsupported_configs():
with pytest.raises(NotImplementedError):
AsyncEngineArgs(
model=MODEL,
speculative_config={
"model": MODEL,
},
).create_engine_config()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_attention_backends.py | tests/v1/attention/test_attention_backends.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for v1 attention backends without GPUModelRunner dependency."""
from functools import partial
import pytest
import torch
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
from tests.v1.attention.utils import (
BatchSpec,
create_common_attn_metadata,
create_standard_kv_cache_spec,
create_vllm_config,
try_get_attention_backend,
)
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.config import ModelConfig
from vllm.platforms import current_platform
from vllm.utils.math_utils import cdiv
from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE, is_torch_equal_or_newer
from vllm.v1.attention.backends.utils import (
CommonAttentionMetadata,
set_kv_cache_layout,
)
from vllm.v1.kv_cache_interface import FullAttentionSpec
BACKENDS_TO_TEST = [
AttentionBackendEnum.FLASH_ATTN,
AttentionBackendEnum.FLASHINFER,
AttentionBackendEnum.FLEX_ATTENTION,
AttentionBackendEnum.TRITON_ATTN,
AttentionBackendEnum.TREE_ATTN,
"FLEX_ATTENTION_SLOW",
]
# Remove flashinfer from the list if it's not available
try:
import flashinfer # noqa: F401
except ImportError:
BACKENDS_TO_TEST.remove(AttentionBackendEnum.FLASHINFER)
def _convert_dtype_to_torch(dtype):
"""Convert ModelDType to torch.dtype."""
if isinstance(dtype, str):
if dtype == "auto":
return torch.float16 # Default dtype for testing
elif dtype in STR_DTYPE_TO_TORCH_DTYPE:
return STR_DTYPE_TO_TORCH_DTYPE[dtype]
else:
raise ValueError(f"Unknown dtype: {dtype}")
elif isinstance(dtype, torch.dtype):
return dtype
else:
raise ValueError(f"Unknown dtype: {dtype}")
# Define common batch configurations
BATCH_SPECS = {
"small_decode": BatchSpec(seq_lens=[32, 40], query_lens=[1, 1]),
"small_prefill": BatchSpec(seq_lens=[32, 40], query_lens=[8, 8]),
"mixed_small": BatchSpec(seq_lens=[32, 40, 48, 56], query_lens=[1, 1, 5, 5]),
"medium_decode": BatchSpec(
seq_lens=[128, 256, 512, 1024, 128, 256, 512, 1024],
query_lens=[1, 1, 1, 1, 1, 1, 1, 1],
),
"medium_prefill": BatchSpec(
seq_lens=[256, 512, 1024, 2048], query_lens=[16, 16, 16, 16]
),
"mixed_medium": BatchSpec(
seq_lens=[512, 1024, 2048, 512, 1024, 2048], query_lens=[1, 1, 1, 7, 7, 7]
),
"large_decode": BatchSpec(seq_lens=[2048] * 32, query_lens=[1] * 32),
"large_prefill": BatchSpec(seq_lens=[4096] * 8, query_lens=[32] * 8),
"mixed_large": BatchSpec(
seq_lens=[1024, 2048, 4096, 1024, 2048, 4096], query_lens=[1, 1, 1, 32, 32, 32]
),
"single_decode": BatchSpec(seq_lens=[1024], query_lens=[1]),
"single_prefill": BatchSpec(seq_lens=[1024], query_lens=[64]),
}
def create_and_prepopulate_kv_cache(
k_contexts: list[torch.Tensor],
v_contexts: list[torch.Tensor],
block_size: int,
num_kv_heads: int,
head_size: int,
dtype: torch.dtype,
device: torch.device,
num_blocks: int,
common_attn_metadata: CommonAttentionMetadata,
randomize_blocks: bool = True,
) -> torch.Tensor:
"""Create and prepopulate a KV cache with context data.
Args:
k_contexts: List of key context tensors for each sequence
v_contexts: List of value context tensors for each sequence
seq_lens: List of sequence lengths
block_size: Size of each block
num_kv_heads: Number of KV heads
head_size: Size of each head
dtype: Data type for the cache
device: Device to create the cache on
num_blocks: Total number of blocks in the cache
block_table: Block table tensor to populate
randomize_blocks: Whether to randomly permute blocks
or use sequential order
Returns:
Tuple of (kv_cache, updated_block_table)
"""
batch_size = len(k_contexts)
seq_lens = common_attn_metadata.seq_lens_cpu
query_lens = (
common_attn_metadata.query_start_loc_cpu[1:]
- common_attn_metadata.query_start_loc_cpu[:-1]
)
context_lens = common_attn_metadata.num_computed_tokens_cpu
block_table = common_attn_metadata.block_table_tensor
slot_mapping = common_attn_metadata.slot_mapping
# Create KV cache
kv_cache = torch.empty(
2, num_blocks, block_size, num_kv_heads, head_size, dtype=dtype, device=device
)
kv_cache_flat = kv_cache.view(2, -1, num_kv_heads, head_size)
# Populate the cache with the context tokens
# Start from block_id=1 since block_id=0 is considered the null block
start_block_idx = 1
for i in range(batch_size):
k_context, v_context = k_contexts[i], v_contexts[i]
start = start_block_idx * block_size
end = start + k_context.shape[0]
kv_cache_flat[0, start:end, ...] = k_context
kv_cache_flat[1, start:end, ...] = v_context
# Stay block aligned and allocate enough blocks for the new tokens
start_block_idx += cdiv(int(seq_lens[i]), block_size)
blocks_end = start_block_idx
# Permute the context blocks (excluding block 0 which is null)
if randomize_blocks:
# Random permutation starting from block 1
perm = torch.randperm(blocks_end - 1) + 1
else:
# Sequential order starting from block 1
perm = torch.arange(1, blocks_end)
inv_perm = torch.zeros(blocks_end, dtype=torch.long, device=device)
# Add 1 to account for starting from block 1
inv_perm[1:] = torch.argsort(perm) + 1
kv_cache[:, 1:blocks_end, ...] = kv_cache[:, perm, ...]
# Construct the right block table
# Start from block_id=1 since block_id=0 is considered the null block
start_block_idx = 1
for i in range(batch_size):
num_blocks_for_seq = cdiv(int(seq_lens[i]), block_size)
start = start_block_idx
end = start + num_blocks_for_seq
block_table[i, :num_blocks_for_seq] = inv_perm[start:end]
start_block_idx += num_blocks_for_seq
# Create a realistic slot mapping that corresponds to the block table
for i in range(batch_size):
token_offsets = torch.arange(int(query_lens[i])) + int(context_lens[i])
block_indices = token_offsets // block_size
token_inter_block_offsets = token_offsets % block_size
start = common_attn_metadata.query_start_loc_cpu[i]
end = common_attn_metadata.query_start_loc_cpu[i + 1]
slot_mapping[start:end] = block_table[
i, block_indices
] * block_size + token_inter_block_offsets.to(device)
return kv_cache
class MockAttentionLayer:
"""A mock attention layer for testing."""
def __init__(self, device: torch.device):
self._q_scale = torch.tensor(1.0, device=device)
self._k_scale = torch.tensor(1.0, device=device)
self._v_scale = torch.tensor(1.0, device=device)
# Add float versions for flashinfer
self._q_scale_float = 1.0
self._k_scale_float = 1.0
self._v_scale_float = 1.0
def run_attention_backend(
backend: AttentionBackendEnum,
kv_cache_spec: FullAttentionSpec,
layer_names: list[str],
vllm_config,
device: torch.device,
common_attn_metadata: CommonAttentionMetadata,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
kv_cache: torch.Tensor,
sliding_window: int | None = None,
) -> torch.Tensor:
"""Run attention computation using the specified backend's AttentionImpl."""
# Handle special case for FLEX_ATTENTION_SLOW
actual_backend = backend
use_direct_block_mask = is_torch_equal_or_newer("2.9.0.dev0")
if backend == "FLEX_ATTENTION_SLOW":
actual_backend = AttentionBackendEnum.FLEX_ATTENTION
use_direct_block_mask = False
builder_cls, impl_cls = try_get_attention_backend(actual_backend)
# Mock flashinfer's get_per_layer_parameters if needed
if actual_backend == AttentionBackendEnum.FLASHINFER:
import unittest.mock
from vllm.v1.attention.backends.utils import PerLayerParameters
def mock_get_per_layer_parameters(vllm_config, layer_names, impl_cls):
# Return mock parameters for a single layer
head_size = vllm_config.model_config.get_head_size()
return {
layer_name: PerLayerParameters(
window_left=-1, # No sliding window
logits_soft_cap=0.0, # No soft cap
sm_scale=1.0 / (head_size**0.5), # Standard scale
)
for layer_name in layer_names
}
with unittest.mock.patch(
"vllm.v1.attention.backends.flashinfer.get_per_layer_parameters",
mock_get_per_layer_parameters,
):
builder = builder_cls(kv_cache_spec, layer_names, vllm_config, device)
attn_metadata = builder.build(
common_prefix_len=0,
common_attn_metadata=common_attn_metadata,
)
else:
# Build metadata
builder = builder_cls(kv_cache_spec, layer_names, vllm_config, device)
if actual_backend == AttentionBackendEnum.FLEX_ATTENTION:
builder.direct_build = use_direct_block_mask
attn_metadata = builder.build(
common_prefix_len=0,
common_attn_metadata=common_attn_metadata,
)
# Instantiate implementation
num_heads = vllm_config.model_config.get_num_attention_heads(
vllm_config.parallel_config
)
num_kv_heads = vllm_config.model_config.get_num_kv_heads(
vllm_config.parallel_config
)
head_size = vllm_config.model_config.get_head_size()
scale = 1.0 / (head_size**0.5)
impl = impl_cls(
num_heads=num_heads,
head_size=head_size,
scale=scale,
num_kv_heads=num_kv_heads,
alibi_slopes=None,
sliding_window=sliding_window,
kv_cache_dtype="auto",
)
# Create mock layer and output buffer
mock_layer = MockAttentionLayer(device)
output = torch.empty_like(query)
# Run forward pass
# NOTE: The query, key, and value are already shaped correctly
# in the calling test function.
output = impl.forward(
mock_layer, query, key, value, kv_cache, attn_metadata, output=output
)
return output
def _test_backend_correctness(
batch_spec: BatchSpec,
model: str,
backend_to_test: list[AttentionBackendEnum | str],
mask_mod,
*,
block_size: int = 16,
atol: float = 1e-2,
rtol: float = 1e-2,
tensor_parallel_size: int = 1,
):
"""
Test that all backends produce similar outputs to a reference implementation
using torch.nn.functional.scaled_dot_product_attention.
This test works by:
1. Generating a batch of sequences with specified context and query lengths.
2. Computing a ground-truth attention output using torch.sdpa on
contiguous Q, K, and V tensors.
3. Simulating vLLM's paged KV cache: It takes the context portion of the
K/V tensors and manually places them into a paged buffer according to
the test's (randomly generated) block table.
4. Running each vLLM attention backend with the new queries and the
simulated paged KV cache.
5. Comparing the vLLM backend's output to the ground-truth SDPA output.
Note: When tensor_parallel_size > 1, we simulate the head partitioning
by overriding the model config to use fewer heads, without requiring
multiple GPUs. This tests that backends work correctly with different
head counts.
"""
current_platform.seed_everything(42)
hf_config_override = None
if tensor_parallel_size > 1:
from vllm.config import ModelConfig
temp_config = ModelConfig(model=model, max_model_len=1)
original_num_heads = temp_config.hf_text_config.num_attention_heads
original_num_kv_heads = getattr(
temp_config.hf_text_config, "num_key_value_heads", None
)
hf_config_override = {
"num_attention_heads": original_num_heads // tensor_parallel_size,
}
if original_num_kv_heads is not None:
hf_config_override["num_key_value_heads"] = max(
1, original_num_kv_heads // tensor_parallel_size
)
vllm_config = create_vllm_config(
model_name=model,
tensor_parallel_size=1, # Always use TP=1 to avoid multi-GPU requirements
max_model_len=max(batch_spec.seq_lens),
block_size=block_size,
num_gpu_blocks=8192,
hf_config_override=hf_config_override,
)
device = torch.device("cuda:0")
kv_cache_spec = create_standard_kv_cache_spec(vllm_config)
# 1. Setup
batch_size = batch_spec.batch_size
seq_lens = batch_spec.seq_lens
query_lens = batch_spec.query_lens
num_q_heads = vllm_config.model_config.get_num_attention_heads(
vllm_config.parallel_config
)
num_kv_heads = vllm_config.model_config.get_num_kv_heads(
vllm_config.parallel_config
)
head_size = vllm_config.model_config.get_head_size()
sliding_window = vllm_config.model_config.get_sliding_window()
dtype = _convert_dtype_to_torch(vllm_config.model_config.dtype)
block_size = vllm_config.cache_config.block_size
scale = 1.0 / (head_size**0.5)
# 2. Generate data and compute SDPA reference output
all_q_vllm, all_k_vllm, all_v_vllm = [], [], []
all_sdpa_outputs = []
k_contexts, v_contexts = [], []
for i in range(batch_size):
s_len = seq_lens[i]
q_len = query_lens[i]
context_len = s_len - q_len
# Generate Q, K, V for the whole sequence to be used in SDPA
q = torch.randn(q_len, num_q_heads, head_size, dtype=dtype, device=device)
k_full = torch.randn(s_len, num_kv_heads, head_size, dtype=dtype, device=device)
v_full = torch.randn(s_len, num_kv_heads, head_size, dtype=dtype, device=device)
# SDPA expects (N, H, L, D), so unsqueeze batch and permute
q_sdpa_in = q.unsqueeze(0).transpose(1, 2)
k_sdpa_in = k_full.unsqueeze(0).transpose(1, 2)
v_sdpa_in = v_full.unsqueeze(0).transpose(1, 2)
if num_q_heads != num_kv_heads:
assert num_q_heads % num_kv_heads == 0, (
f"num_q_heads ({num_q_heads}) must be divisible by "
f"num_kv_heads ({num_kv_heads})"
)
repeats = num_q_heads // num_kv_heads
k_sdpa_in = k_sdpa_in.repeat_interleave(repeats, dim=1)
v_sdpa_in = v_sdpa_in.repeat_interleave(repeats, dim=1)
# Create causal mask: query token i attends to positions 0 to
# (context_len + i)
kv_len = s_len
final_mask_mod = partial(mask_mod, context_len=context_len)
block_mask = create_block_mask(
final_mask_mod, B=None, H=None, Q_LEN=q_len, KV_LEN=kv_len, device=device
)
sdpa_out_i = flex_attention(
q_sdpa_in,
k_sdpa_in,
v_sdpa_in,
block_mask=block_mask,
scale=scale,
enable_gqa=True,
)
all_sdpa_outputs.append(sdpa_out_i.transpose(1, 2).squeeze(0))
# Inputs for vLLM backends are just the new tokens
all_q_vllm.append(q)
all_k_vllm.append(k_full[context_len:])
all_v_vllm.append(v_full[context_len:])
# Contextual K/V data used to populate the paged cache
k_contexts.append(k_full[:context_len])
v_contexts.append(v_full[:context_len])
query_vllm = torch.cat(all_q_vllm, dim=0)
key_vllm = torch.cat(all_k_vllm, dim=0)
value_vllm = torch.cat(all_v_vllm, dim=0)
sdpa_output = torch.cat(all_sdpa_outputs, dim=0)
common_attn_metadata = create_common_attn_metadata(
batch_spec, vllm_config.cache_config.block_size, device
)
# 3. Simulate Paged KV Cache and a realistic slot_mapping
kv_cache = create_and_prepopulate_kv_cache(
k_contexts=k_contexts,
v_contexts=v_contexts,
block_size=block_size,
num_kv_heads=num_kv_heads,
head_size=head_size,
dtype=dtype,
device=device,
num_blocks=vllm_config.cache_config.num_gpu_blocks or 1000,
common_attn_metadata=common_attn_metadata,
randomize_blocks=True,
)
# 4. Run vLLM backends and compare
# Note: flex_attention has known Triton kernel compatibility issues
# with test infrastructures
for backend_name in backend_to_test:
# FlashAttentionm + FlexAttention:
# [2, num_blocks, block_size, num_kv_heads, head_size]
# FlashInfer + Triton:
# [num_blocks, 2, block_size, num_kv_heads, head_size]
# Select the appropriate KV cache format for each backend
kv_cache_for_backend = kv_cache
reset_kv_cache_layout = False
if backend_name in (
AttentionBackendEnum.FLASHINFER,
AttentionBackendEnum.TRITON_ATTN,
):
kv_cache_for_backend = kv_cache.transpose(0, 1)
if backend_name == AttentionBackendEnum.FLASHINFER:
# For FlashInfer default to HND layout and
kv_cache_for_backend = (
kv_cache_for_backend.transpose(2, 3).contiguous().transpose(2, 3)
)
set_kv_cache_layout("HND")
reset_kv_cache_layout = True
elif backend_name == AttentionBackendEnum.TRITON_ATTN:
kv_cache_for_backend = kv_cache_for_backend.contiguous()
try:
backend_output = run_attention_backend(
backend_name,
kv_cache_spec,
["placeholder"],
vllm_config,
device,
common_attn_metadata,
query_vllm,
key_vllm,
value_vllm,
kv_cache_for_backend,
sliding_window=sliding_window,
)
finally:
if reset_kv_cache_layout:
set_kv_cache_layout(None)
# Check shape and dtype consistency
assert backend_output.shape == sdpa_output.shape, (
f"[{backend_name}] shape {backend_output.shape} != "
f"SDPA shape {sdpa_output.shape}"
)
assert backend_output.dtype == sdpa_output.dtype, (
f"[{backend_name}] dtype {backend_output.dtype} != "
f"SDPA dtype {sdpa_output.dtype}"
)
assert torch.isfinite(backend_output).all(), (
f"[{backend_name}] produced non-finite values"
)
# Check numerical similarity
def error_msg(msg: str, backend_name: str):
return f"[{backend_name}] output differs from SDPA baseline. {msg}"
torch.testing.assert_close(
backend_output,
sdpa_output,
rtol=rtol,
atol=atol,
msg=partial(error_msg, backend_name=backend_name),
)
@pytest.mark.parametrize(
"batch_spec_name",
[
"small_decode",
"small_prefill",
"mixed_small",
"medium_decode",
"medium_prefill",
"mixed_medium",
"large_decode",
"large_prefill",
"single_decode",
"single_prefill",
],
)
@pytest.mark.parametrize("model", ["meta-llama/Meta-Llama-3-8B"])
@pytest.mark.parametrize("tensor_parallel_size", [1, 2, 4])
def test_causal_backend_correctness(
batch_spec_name: str, model: str, tensor_parallel_size: int
):
"""Test backend's correctness with causal attention."""
def causal_mask_mod(
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
kv_idx: torch.Tensor,
*,
context_len: int,
):
return (q_idx + context_len) >= kv_idx
batch_spec = BATCH_SPECS[batch_spec_name]
LARGE_BLOCK_BACKENDS = (
[AttentionBackendEnum.FLEX_ATTENTION]
if is_torch_equal_or_newer("2.9.0.dev0")
else []
)
if current_platform.is_rocm():
SMALL_BLOCK_BACKENDS = [
x
for x in BACKENDS_TO_TEST
if (
x not in LARGE_BLOCK_BACKENDS
and x is not AttentionBackendEnum.FLASH_ATTN
)
]
else:
SMALL_BLOCK_BACKENDS = [
x for x in BACKENDS_TO_TEST if x not in LARGE_BLOCK_BACKENDS
]
_test_backend_correctness(
batch_spec,
model,
SMALL_BLOCK_BACKENDS,
causal_mask_mod,
tensor_parallel_size=tensor_parallel_size,
)
# Fast FlexAttention needs to run with block_size=128
if LARGE_BLOCK_BACKENDS:
_test_backend_correctness(
batch_spec,
model,
LARGE_BLOCK_BACKENDS,
causal_mask_mod,
block_size=128,
tensor_parallel_size=tensor_parallel_size,
)
if current_platform.is_rocm():
# FLASH_ATTN is not supported on ROCm
SLIDING_WINDOW_BACKENDS_TO_TEST = [
AttentionBackendEnum.FLEX_ATTENTION,
AttentionBackendEnum.TRITON_ATTN,
"FLEX_ATTENTION_SLOW",
]
else:
SLIDING_WINDOW_BACKENDS_TO_TEST = [
AttentionBackendEnum.FLASH_ATTN,
AttentionBackendEnum.FLEX_ATTENTION,
AttentionBackendEnum.TRITON_ATTN,
"FLEX_ATTENTION_SLOW",
]
@pytest.mark.parametrize(
"batch_spec_name",
[
"small_decode",
"small_prefill",
"mixed_medium",
"large_decode",
"large_prefill",
"mixed_large",
],
)
@pytest.mark.parametrize("model", ["microsoft/Phi-tiny-MoE-instruct"])
@pytest.mark.parametrize("tensor_parallel_size", [1, 2, 4])
def test_sliding_window_backend_correctness(
batch_spec_name: str, model: str, tensor_parallel_size: int
):
"""Test backend's correctness with sliding window attention."""
def sliding_window_mask_mod(
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
kv_idx: torch.Tensor,
*,
context_len: int,
sliding_window: int,
):
causal_mask = q_idx + context_len >= kv_idx
window_mask = q_idx + context_len - kv_idx < sliding_window
return causal_mask & window_mask
batch_spec = BATCH_SPECS[batch_spec_name]
model_config = ModelConfig(model=model, max_model_len=max(batch_spec.seq_lens))
sliding_window = model_config.get_sliding_window()
sliding_window_mask_mod_fn = partial(
sliding_window_mask_mod, sliding_window=sliding_window
)
LARGE_BLOCK_BACKENDS = (
[AttentionBackendEnum.FLEX_ATTENTION]
if is_torch_equal_or_newer("2.9.0.dev0")
else []
)
SMALL_BLOCK_BACKENDS = [
x for x in SLIDING_WINDOW_BACKENDS_TO_TEST if x not in LARGE_BLOCK_BACKENDS
]
_test_backend_correctness(
batch_spec,
model,
SMALL_BLOCK_BACKENDS,
sliding_window_mask_mod_fn,
tensor_parallel_size=tensor_parallel_size,
)
# Fast FlexAttention needs to run with block_size=128
if LARGE_BLOCK_BACKENDS:
_test_backend_correctness(
batch_spec,
model,
LARGE_BLOCK_BACKENDS,
sliding_window_mask_mod_fn,
block_size=128,
tensor_parallel_size=tensor_parallel_size,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_attention_splitting.py | tests/v1/attention/test_attention_splitting.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from tests.v1.attention.test_attention_backends import BATCH_SPECS
from tests.v1.attention.utils import BatchSpec, create_common_attn_metadata
from vllm.v1.attention.backends.utils import (
UBatchSlice,
_make_metadata_with_slice,
slice_query_start_locs,
split_attn_metadata,
split_decodes_and_prefills,
)
from vllm.v1.worker.ubatch_utils import maybe_create_ubatch_slices
@pytest.fixture
def sample_query_start_loc():
"""Sample query_start_loc tensor for testing"""
return torch.tensor([0, 5, 12, 20, 35, 50])
def test_basic_slice_middle(sample_query_start_loc):
"""Test slicing from middle of tensor"""
req_slice = slice(1, 3) # slice from index 1 to 3
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 7, 15])
assert torch.equal(result, expected)
def test_slice_from_beginning(sample_query_start_loc):
"""Test slicing from the beginning of tensor"""
req_slice = slice(0, 2) # slice from index 0 to 2
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 5, 12])
assert torch.equal(result, expected)
def test_slice_to_end(sample_query_start_loc):
"""Test slicing to the end of tensor"""
req_slice = slice(3, 5) # slice from index 3 to 5 (last index)
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 15, 30])
assert torch.equal(result, expected)
def test_single_element_slice(sample_query_start_loc):
"""Test slice that results in single element"""
req_slice = slice(2, 3) # slice from index 2 to 3
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 8])
assert torch.equal(result, expected)
def test_full_tensor_slice(sample_query_start_loc):
"""Test slicing the entire tensor"""
req_slice = slice(0, 5) # slice entire tensor
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 5, 12, 20, 35, 50])
assert torch.equal(result, expected)
def test_slice_bounds_edge_cases(sample_query_start_loc):
# Test slice that goes exactly to the last element
req_slice = slice(4, 5) # Last index
result = slice_query_start_locs(sample_query_start_loc, req_slice)
expected = torch.tensor([0, 15])
assert torch.equal(result, expected)
@pytest.fixture
def small_decode_metadata():
"""Create metadata for small decode batch"""
batch_spec = BATCH_SPECS["small_decode"]
device = torch.device("cpu")
return create_common_attn_metadata(batch_spec, block_size=16, device=device)
@pytest.fixture
def large_decode_metadata():
"""Create metadata for small decode batch"""
batch_spec = BATCH_SPECS["large_decode"]
device = torch.device("cpu")
return create_common_attn_metadata(batch_spec, block_size=16, device=device)
@pytest.fixture
def mixed_small_metadata():
"""Create metadata for mixed small batch"""
batch_spec = BATCH_SPECS["mixed_small"]
device = torch.device("cpu")
return create_common_attn_metadata(batch_spec, block_size=16, device=device)
# Tests for _make_metadata_with_slice
def test_make_metadata_with_slice_decode_batch(small_decode_metadata):
"""Test slicing decode batch metadata"""
# Split first request only
ubatch_slice = UBatchSlice(slice(0, 1), slice(0, 1))
result = _make_metadata_with_slice(ubatch_slice, small_decode_metadata)
# Check sliced results
assert result.num_reqs == 1 # slice(0, 1) gives 1 requests
assert result.num_actual_tokens == 1 # slice(0, 1) gives 1 token
assert result.max_query_len == 1
assert torch.equal(result.query_start_loc, torch.tensor([0, 1]))
assert torch.equal(result.seq_lens, torch.tensor([32]))
def test_make_metadata_with_slice_mixed_batch(mixed_small_metadata):
"""Test slicing mixed batch metadata"""
ubatch_slice = UBatchSlice(slice(1, 3), slice(1, 7)) # Requests 1-3, tokens 1-7
result = _make_metadata_with_slice(ubatch_slice, mixed_small_metadata)
assert result.num_reqs == 2 # slice(1, 3) gives 2 requests
assert result.num_actual_tokens == 6 # slice(1, 7) gives 6 tokens
assert result.max_query_len == 5
assert torch.equal(result.query_start_loc, torch.tensor([0, 1, 6]))
assert torch.equal(result.seq_lens, torch.tensor([40, 48]))
def test_split_attn_metadata_decode_batch(large_decode_metadata):
"""Test splitting decode batch into two equal parts"""
num_tokens = large_decode_metadata.num_reqs
mid_point = num_tokens // 2
ubatch_slices = [
UBatchSlice(slice(0, mid_point), slice(0, mid_point)),
UBatchSlice(slice(mid_point, num_tokens), slice(mid_point, num_tokens)),
]
results = split_attn_metadata(ubatch_slices, large_decode_metadata)
assert len(results) == 2
# Check first split
assert results[0].num_reqs == mid_point
assert results[0].num_actual_tokens == mid_point
assert torch.equal(results[0].seq_lens, torch.tensor([2048] * mid_point))
# Check second split
assert results[1].num_reqs == mid_point
assert results[1].num_actual_tokens == mid_point
assert torch.equal(results[1].seq_lens, torch.tensor([2048] * mid_point))
def apply_split_decodes_and_prefills(
query_lens: list[int],
decode_threshold: int,
require_uniform: bool,
padded_num_tokens: int | None = None,
):
"""Helper function to apply split_decodes_and_prefills and return
the results."""
device = torch.device("cpu")
seq_lens = [10 * (i + 1) for i in range(len(query_lens))]
common_metadata = create_common_attn_metadata(
BatchSpec(seq_lens=seq_lens, query_lens=query_lens),
block_size=16,
device=device,
)
if padded_num_tokens is not None:
common_metadata.num_actual_tokens = padded_num_tokens
return split_decodes_and_prefills(
common_metadata,
decode_threshold=decode_threshold,
require_uniform=require_uniform,
)
def test_split_decodes_and_prefills_nonuniform_all_ones():
query_lens = [1, 1, 1]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 1, False)
)
assert num_decodes == 3
assert num_prefills == 0
assert num_decode_tokens == 3
assert num_prefill_tokens == 0
def test_split_decodes_and_prefills_nonuniform_all_short_decodes():
query_lens = [1, 2, 1, 3, 2, 1, 2]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 3, False)
)
assert num_decodes == 7
assert num_prefills == 0
assert num_decode_tokens == sum(query_lens)
assert num_prefill_tokens == 0
def test_split_decodes_and_prefills_nonuniform_all_prefills():
query_lens = [4, 5, 6, 7]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 3, False)
)
assert num_decodes == 0
assert num_prefills == 4
assert num_decode_tokens == 0
assert num_prefill_tokens == sum(query_lens)
def test_split_decodes_and_prefills_nonuniform_mixed_batch():
query_lens = [2, 1, 3, 4, 5, 6, 7, 8]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 4, False)
)
assert num_decodes == 4 # 2, 1, 3, 4 are all <= 4
assert num_prefills == 4 # 5, 6, 7, 8 are all > 4
assert num_decode_tokens == 10 # 2 + 1 + 3 + 4
assert num_prefill_tokens == 26 # 5 + 6 + 7 + 8
def test_split_decodes_and_prefills_uniform_all_ones():
query_lens = [1, 1, 1]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 1, True)
)
assert num_decodes == 3
assert num_prefills == 0
assert num_decode_tokens == 3
assert num_prefill_tokens == 0
def test_split_decodes_and_prefills_uniform_all_short_decodes():
query_lens = [2, 2, 1, 3, 2, 1, 2]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 3, True)
)
assert num_decodes == 2
assert num_prefills == 5
assert num_decode_tokens == 4
assert num_prefill_tokens == (1 + 3 + 2 + 1 + 2)
def test_split_decodes_and_prefills_uniform_all_prefills():
query_lens = [4, 5, 6, 7]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 3, True)
)
assert num_decodes == 0
assert num_prefills == 4
assert num_decode_tokens == 0
assert num_prefill_tokens == sum(query_lens)
def test_split_decodes_and_prefills_uniform_mixed_batch_all_uniform_decodes():
query_lens = [2, 2, 2, 4, 5, 6, 7, 8]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 4, True)
)
assert num_decodes == 3 # 2, 2, 2 are all <= 4 and uniform
assert num_prefills == 5 # 4, 5, 6, 7, 8 are all > 4
assert num_decode_tokens == 6 # 2 + 2 + 2
assert num_prefill_tokens == 30 # 4 + 5 + 6 + 7 + 8
def test_split_decodes_and_prefills_uniform_mixed_batch_non_uniform_decodes():
query_lens = [2, 1, 2, 4, 5, 6, 7, 8]
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 4, True)
)
assert num_decodes == 1 # only the first 2 is taken as decode
assert num_prefills == 7 # 1, 2, 4, 5, 6, 7, 8 are all > 4 or non-uniform
assert num_decode_tokens == 2 # only the first 2
assert num_prefill_tokens == (sum(query_lens) - 2) # rest of the tokens
def test_split_decodes_and_prefills_uniform_padded_batch_all_same():
"""uniform batch where all query lengths are identical with 0 length padded reqs."""
# All query lengths are 2, with decode_threshold=3 (so 2 <= 3)
# This triggers the padded uniform path at line 891
query_lens = [2, 2, 2, 0]
padded_num_tokens = 8
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
apply_split_decodes_and_prefills(query_lens, 3, True, padded_num_tokens)
)
# With uniform batch, all requests are treated as decodes
assert num_decodes == 4
assert num_prefills == 0
assert num_decode_tokens == padded_num_tokens
assert num_prefill_tokens == 0
@pytest.mark.parametrize(
"seq_lens,query_lens,split_point,expected_first_reqs,expected_second_reqs",
[
# Split in the middle of request 1
([32, 40], [8, 8], 12, 2, 1),
# Split inside the first request
([32, 40], [8, 8], 4, 1, 2),
],
)
def test_prefill_split_across_ubatches(
seq_lens, query_lens, split_point, expected_first_reqs, expected_second_reqs
):
"""Test splitting a prefill across ubatches"""
import numpy as np
device = torch.device("cpu")
batch_spec = BatchSpec(seq_lens=seq_lens, query_lens=query_lens)
common = create_common_attn_metadata(batch_spec, block_size=16, device=device)
num_scheduled_tokens = np.array(query_lens, dtype=np.int32)
qsl_np = common.query_start_loc_cpu.numpy()
num_tokens = common.num_actual_tokens
ubatch_slices, _ = maybe_create_ubatch_slices(
True,
num_scheduled_tokens,
num_tokens,
batch_spec.batch_size,
split_point=split_point,
num_ubatches=2,
)
assert ubatch_slices is not None and len(ubatch_slices) == 2
first_meta = _make_metadata_with_slice(ubatch_slices[0], common)
second_meta = _make_metadata_with_slice(ubatch_slices[1], common)
# Token counts match the split
assert first_meta.num_actual_tokens == split_point
assert second_meta.num_actual_tokens == num_tokens - split_point
# Number of requests per ubatch
assert first_meta.num_reqs == expected_first_reqs
assert second_meta.num_reqs == expected_second_reqs
# Identify which request is split and how many tokens are in the first chunk
split_req_idx = int(np.searchsorted(qsl_np, split_point, side="right") - 1)
tokens_in_first_chunk = split_point - int(qsl_np[split_req_idx])
orig_q_lens = common.query_start_loc_cpu[1:] - common.query_start_loc_cpu[:-1]
# Check query length continuity: first-chunk + second-chunk == original qlen
# First ubatch last request query length
qlen_first_last = int(
first_meta.query_start_loc_cpu[-1] - first_meta.query_start_loc_cpu[-2]
)
# Second ubatch first request query length
qlen_second_first = int(
second_meta.query_start_loc_cpu[1] - second_meta.query_start_loc_cpu[0]
)
assert qlen_first_last == tokens_in_first_chunk
assert qlen_first_last + qlen_second_first == int(orig_q_lens[split_req_idx])
# Check seq_lens adjustments
# Context lengths per original request
context_lens = [s - q for s, q in zip(seq_lens, query_lens)]
# First ubatch: last request's seq_len should be
# context + tokens_in_first_chunk
expected_seqlen = context_lens[split_req_idx] + tokens_in_first_chunk
assert int(first_meta.seq_lens[-1]) == expected_seqlen
# For full preceding requests in first ubatch, seq_lens should match
# originals
for i in range(first_meta.num_reqs - 1):
assert int(first_meta.seq_lens[i]) == seq_lens[i]
# Second ubatch: first request (continuation) seq_len should be full
# original
assert int(second_meta.seq_lens[0]) == seq_lens[split_req_idx]
# Any following full requests in second ubatch should match originals
for j in range(1, second_meta.num_reqs):
# Map to original request index
orig_idx = split_req_idx + j
assert int(second_meta.seq_lens[j]) == seq_lens[orig_idx]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/utils.py | tests/v1/attention/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Utility functions for attention-related v1 tests."""
from dataclasses import dataclass
import pytest
import torch
from vllm.attention.backends.abstract import AttentionImpl
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.config import (
CacheConfig,
CompilationConfig,
DeviceConfig,
LoadConfig,
ModelConfig,
ParallelConfig,
SchedulerConfig,
VllmConfig,
)
from vllm.config.model import ModelDType
from vllm.v1.attention.backends.utils import (
AttentionMetadataBuilder,
CommonAttentionMetadata,
)
from vllm.v1.kv_cache_interface import FullAttentionSpec
@dataclass
class BatchSpec:
"""Specification for a batch configuration (workload shape only)."""
seq_lens: list[int]
query_lens: list[int]
name: str = "unnamed"
@property
def batch_size(self):
return len(self.seq_lens)
def __post_init__(self):
assert len(self.seq_lens) == len(self.query_lens)
def compute_num_tokens(self):
return sum(self.query_lens)
def create_common_attn_metadata(
batch_spec: BatchSpec,
block_size: int,
device: torch.device,
max_block_idx: int = 1000,
arange_block_indices: bool = False,
) -> CommonAttentionMetadata:
"""Create CommonAttentionMetadata from a BatchSpec and ModelParams."""
# Create query start locations
query_start_loc = torch.zeros(
batch_spec.batch_size + 1, dtype=torch.int32, device=device
)
query_start_loc[1:] = torch.tensor(
batch_spec.query_lens, dtype=torch.int32, device=device
).cumsum(0)
query_start_loc_cpu = query_start_loc.cpu()
num_tokens = batch_spec.compute_num_tokens()
# Create sequence lengths
seq_lens = torch.tensor(batch_spec.seq_lens, dtype=torch.int32, device=device)
seq_lens_cpu = seq_lens.cpu()
max_seq_len = int(seq_lens_cpu.max())
# Create computed tokens (context length for each sequence)
context_lens = [
batch_spec.seq_lens[i] - batch_spec.query_lens[i]
for i in range(batch_spec.batch_size)
]
num_computed_tokens_cpu = torch.tensor(context_lens, dtype=torch.int32)
# Create block table and slot mapping
max_blocks = (max(batch_spec.seq_lens) + block_size - 1) // block_size
if arange_block_indices:
num_blocks = batch_spec.batch_size * max_blocks
block_table_tensor = torch.arange(
num_blocks, dtype=torch.int32, device=device
).view(batch_spec.batch_size, max_blocks)
slot_mapping = torch.arange(num_tokens, dtype=torch.int64, device=device).view(
num_tokens
)
else:
block_table_tensor = torch.randint(
0,
max_block_idx,
(batch_spec.batch_size, max_blocks),
dtype=torch.int32,
device=device,
)
slot_mapping = torch.randint(
0, max_block_idx, (num_tokens,), dtype=torch.int64, device=device
)
# Calculate max query length
max_query_len = max(batch_spec.query_lens)
return CommonAttentionMetadata(
query_start_loc=query_start_loc,
query_start_loc_cpu=query_start_loc_cpu,
seq_lens=seq_lens,
_seq_lens_cpu=seq_lens_cpu,
_num_computed_tokens_cpu=num_computed_tokens_cpu,
num_reqs=batch_spec.batch_size,
num_actual_tokens=num_tokens,
max_query_len=max_query_len,
max_seq_len=max_seq_len,
block_table_tensor=block_table_tensor,
slot_mapping=slot_mapping,
causal=True,
)
def try_get_attention_backend(
backend: AttentionBackendEnum,
) -> tuple[type[AttentionMetadataBuilder], type[AttentionImpl]]:
"""Try to get the attention backend class, skipping test if not found."""
try:
backend_class = backend.get_class()
return backend_class.get_builder_cls(), backend_class.get_impl_cls()
except ImportError as e:
pytest.skip(f"{backend.name} not available: {e}")
raise AssertionError("unreachable") from None
def create_standard_kv_cache_spec(vllm_config: VllmConfig) -> FullAttentionSpec:
"""Create a FullAttentionSpec from ModelParams only."""
return FullAttentionSpec(
block_size=vllm_config.cache_config.block_size,
num_kv_heads=vllm_config.model_config.get_num_kv_heads(
vllm_config.parallel_config
),
head_size=vllm_config.model_config.get_head_size(),
dtype=vllm_config.model_config.dtype,
sliding_window=vllm_config.model_config.get_sliding_window(),
)
def create_vllm_config(
model_name: str = "meta-llama/Meta-Llama-3-8B",
tensor_parallel_size: int = 1,
max_model_len: int = 1024,
dtype: ModelDType | torch.dtype = "auto",
num_gpu_blocks: int = 1000,
block_size: int = 16,
max_num_seqs: int = 256,
max_num_batched_tokens: int = 8192,
enable_chunked_prefill: bool = True,
add_mock_model_methods: bool = True,
hf_config_override: dict | None = None,
) -> VllmConfig:
"""Create a VllmConfig for testing with reasonable defaults."""
model_config = ModelConfig(
model=model_name,
tokenizer=model_name,
trust_remote_code=False,
dtype=dtype,
seed=0,
max_model_len=max_model_len,
)
cache_config = CacheConfig(
block_size=block_size,
cache_dtype="auto",
swap_space=0,
)
# Set cache blocks for testing
# (these may be set during initialization normally)
cache_config.num_gpu_blocks = num_gpu_blocks
cache_config.num_cpu_blocks = 0
parallel_config = ParallelConfig(
tensor_parallel_size=tensor_parallel_size,
)
scheduler_config = SchedulerConfig(
max_num_seqs=max_num_seqs,
max_num_batched_tokens=max_num_batched_tokens,
enable_chunked_prefill=enable_chunked_prefill,
max_model_len=model_config.max_model_len,
is_encoder_decoder=model_config.is_encoder_decoder,
)
device_config = DeviceConfig()
load_config = LoadConfig()
compilation_config = CompilationConfig()
if add_mock_model_methods:
# Add mock methods to satisfy backends that need them
# This is a workaround because tests don't build full, real models,
# but some backends expect to query the model for layer-specific
# parameters
import types
model_config.get_num_layers = types.MethodType(lambda self: 1, model_config)
model_config.get_sliding_window_for_layer = types.MethodType(
lambda self, i: None, model_config
)
model_config.get_logits_soft_cap_for_layer = types.MethodType(
lambda self, i: 0.0, model_config
)
model_config.get_sm_scale_for_layer = types.MethodType(
lambda self, i: 1.0 / model_config.get_head_size() ** 0.5, model_config
)
if hf_config_override:
model_config.hf_config.update(hf_config_override)
return VllmConfig(
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
load_config=load_config,
compilation_config=compilation_config,
)
def create_dummy_kv_cache(
block_size: int,
num_kv_heads: int,
head_size: int,
dtype: torch.dtype,
device: torch.device,
num_blocks: int = 100,
) -> torch.Tensor:
"""Create a dummy KV cache tensor for testing."""
kv_cache = torch.randn(
num_blocks,
2, # K and V
block_size,
num_kv_heads,
head_size,
dtype=dtype,
device=device,
)
return kv_cache
@dataclass
class BackendConfig:
name: str
attention_config: dict
comp_config: dict
specific_gpu_arch: tuple | None = None
# Define all backend configurations of full cudagraph to be tested
full_cg_backend_configs = {
# FA3 on Hopper
"FA3": BackendConfig(
name="FA3",
attention_config={
"backend": "FLASH_ATTN",
"flash_attn_version": 3,
"flash_attn_max_num_splits_for_cuda_graph": 16,
},
comp_config={
"cudagraph_mode": "FULL",
},
specific_gpu_arch=(9, 0),
),
# FlashMLA on Hopper
"FlashMLA": BackendConfig(
name="FlashMLA",
attention_config={"backend": "FLASHMLA"},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
specific_gpu_arch=(9, 0),
),
# Cutlass MLA on Blackwell
"CutlassMLA": BackendConfig(
name="CutlassMLA",
attention_config={"backend": "CUTLASS_MLA"},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
specific_gpu_arch=(10, 0),
),
# FlashInfer MLA on Blackwell
"FlashInferMLA": BackendConfig(
name="FlashInferMLA",
attention_config={"backend": "FLASHINFER_MLA"},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
specific_gpu_arch=(10, 0),
),
# FlashAttention MLA on Hopper
"FlashAttentionMLA": BackendConfig(
name="FlashAttentionMLA",
attention_config={
"backend": "FLASH_ATTN_MLA",
"flash_attn_max_num_splits_for_cuda_graph": 16,
},
comp_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
},
specific_gpu_arch=(9, 0),
),
# FA2
"FA2": BackendConfig(
name="FA2",
attention_config={
"backend": "FLASH_ATTN",
"flash_attn_version": 2,
"flash_attn_max_num_splits_for_cuda_graph": 16,
},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
),
# Triton Attention
"TritonAttn": BackendConfig(
name="TritonAttn",
attention_config={"backend": "TRITON_ATTN"},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
),
# FlashInfer
"FlashInfer": BackendConfig(
name="FlashInfer",
attention_config={"backend": "FLASHINFER"},
comp_config={
"cudagraph_mode": "FULL_AND_PIECEWISE",
},
),
"RocmAttn": BackendConfig(
name="RocmAttn",
attention_config={
"backend": "ROCM_ATTN",
"use_prefill_decode_attention": True,
},
comp_config={
"cudagraph_mode": "FULL",
},
),
}
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_sparse_mla_backends.py | tests/v1/attention/test_sparse_mla_backends.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Unit tests for the FlashMLA sparse backend utilities."""
import math
from types import MethodType, SimpleNamespace
import numpy as np
import pytest
import torch
from tests.v1.attention.test_mla_backends import (
BATCH_SPECS,
BatchSpec,
MockAttentionLayer,
create_and_prepopulate_kv_cache,
)
from tests.v1.attention.utils import (
create_common_attn_metadata,
create_standard_kv_cache_spec,
create_vllm_config,
)
from vllm import _custom_ops as ops
from vllm.attention.ops import flashmla
from vllm.config import set_current_vllm_config
from vllm.model_executor.layers.linear import ColumnParallelLinear
from vllm.platforms import current_platform
from vllm.utils.math_utils import cdiv
from vllm.v1.attention.backends.mla.flashmla_sparse import (
FlashMLASparseBackend,
triton_convert_req_index_to_global_index,
)
from vllm.v1.attention.backends.utils import split_prefill_chunks
SPARSE_BACKEND_BATCH_SPECS = {
name: BATCH_SPECS[name]
for name in [
"mixed_small",
"mixed_medium",
"small_prefill",
"medium_prefill",
"single_prefill",
]
}
SPARSE_BACKEND_BATCH_SPECS["large_q_prefill"] = BatchSpec(
seq_lens=[1024] * 2, query_lens=[256] * 2
)
SPARSE_BACKEND_BATCH_SPECS["large_q_pure_prefill"] = BatchSpec(
seq_lens=[256] * 2, query_lens=[256] * 2
)
def _dequantize_fp8_ds_mla_entry(
cache_slice: torch.Tensor, kv_lora_rank: int, rope_dim: int, dtype: torch.dtype
) -> tuple[torch.Tensor, torch.Tensor]:
"""Dequantize a single fp8_ds_mla cache entry back to latent + rope."""
# The first kv_lora_rank bytes store FP8 latent values with one scale per
# 128 element tile written as float32 right after the latent payload.
scales = cache_slice.view(torch.float32)[kv_lora_rank // 4 : kv_lora_rank // 4 + 4]
latent = torch.empty(kv_lora_rank, dtype=torch.float16, device=cache_slice.device)
for tile_idx in range(4):
tile_start = tile_idx * 128
tile_end = tile_start + 128
ops.convert_fp8(
latent[tile_start:tile_end],
cache_slice[tile_start:tile_end],
float(scales[tile_idx].item()),
kv_dtype="fp8",
)
latent = latent.to(dtype)
rope_offset = kv_lora_rank // 2 + 8
rope_vals = cache_slice.view(dtype)[rope_offset : rope_offset + rope_dim]
return latent, rope_vals.clone()
def _quantize_dequantize_fp8_ds_mla(
kv_c: torch.Tensor, k_pe: torch.Tensor, block_size: int, scale: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
"""Round-trip kv_c/k_pe though the fp8_ds_mla cache layout."""
if kv_c.numel() == 0:
return kv_c.clone(), k_pe.clone()
kv_lora_rank = kv_c.shape[-1]
rope_dim = k_pe.shape[-1]
num_tokens = kv_c.shape[0]
num_blocks = max(1, math.ceil(num_tokens / block_size))
entry_size = kv_lora_rank + 4 * 4 + 2 * rope_dim
tmp_cache = torch.zeros(
num_blocks, block_size, entry_size, dtype=torch.uint8, device=kv_c.device
)
slot_mapping = torch.arange(num_tokens, dtype=torch.long, device=kv_c.device)
ops.concat_and_cache_mla(
kv_c, k_pe, tmp_cache, slot_mapping, kv_cache_dtype="fp8_ds_mla", scale=scale
)
dequant_kv_c = torch.empty_like(kv_c)
dequant_k_pe = torch.empty_like(k_pe)
for token_idx in range(num_tokens):
slot = slot_mapping[token_idx].item()
block_idx = slot // block_size
block_offset = slot % block_size
cache_slice = tmp_cache[block_idx, block_offset]
latent, rope_vals = _dequantize_fp8_ds_mla_entry(
cache_slice, kv_lora_rank, rope_dim, kv_c.dtype
)
dequant_kv_c[token_idx] = latent
dequant_k_pe[token_idx] = rope_vals
return dequant_kv_c, dequant_k_pe
@pytest.mark.parametrize("batch_name", list(SPARSE_BACKEND_BATCH_SPECS.keys()))
@pytest.mark.parametrize("kv_cache_dtype", ["fp8_ds_mla", "auto"])
@pytest.mark.parametrize("tensor_parallel_size", [1, 2, 4])
@pytest.mark.skipif(
torch.cuda.get_device_capability() < (9, 0),
reason="FlashMLASparseBackend requires CUDA 9.0 or higher",
)
def test_sparse_backend_decode_correctness(
dist_init, batch_name, kv_cache_dtype, tensor_parallel_size, workspace_init
):
if current_platform.is_rocm():
pytest.skip("ROCm does not support fp8_ds_mla data type for kv cache.")
if not torch.cuda.is_available():
pytest.skip("CUDA is required for sparse MLA decode test")
device = torch.device("cuda")
dtype = torch.bfloat16
batch_spec = SPARSE_BACKEND_BATCH_SPECS[batch_name]
# Model hyper-parameters (kept intentionally small for the unit test)
num_heads = 128
kv_lora_rank = 512
qk_nope_head_dim = 128
qk_rope_head_dim = 64
v_head_dim = 128
head_size = kv_lora_rank + qk_rope_head_dim
topk_tokens = 2048
max_seqlen = max(batch_spec.seq_lens)
total_cache_tokens = sum(batch_spec.seq_lens)
block_size = 64
# Note: We use TP=1 to avoid multi-GPU requirements in CI.
# The test simulates head partitioning via mocked methods below.
vllm_config = create_vllm_config(
model_name="deepseek-ai/DeepSeek-V2-Lite-Chat",
tensor_parallel_size=1,
max_model_len=max_seqlen,
num_gpu_blocks=max(2048, cdiv(total_cache_tokens, block_size) + 1),
block_size=block_size,
hf_config_override={
"index_topk": topk_tokens,
"attn_module_list_cfg": [{"topk_tokens": topk_tokens}],
},
)
model_config = vllm_config.model_config
model_config.hf_text_config = SimpleNamespace(
q_lora_rank=None,
kv_lora_rank=kv_lora_rank,
qk_nope_head_dim=qk_nope_head_dim,
qk_rope_head_dim=qk_rope_head_dim,
v_head_dim=v_head_dim,
model_type="deepseek_v2",
)
model_config.dtype = dtype
model_config.get_num_attention_heads = MethodType(
lambda self, parallel_config: max(1, num_heads // tensor_parallel_size),
model_config,
)
model_config.get_num_kv_heads = MethodType(
lambda self, parallel_config: 1, model_config
)
model_config.get_head_size = MethodType(lambda self: head_size, model_config)
model_config.get_sliding_window = MethodType(lambda self: None, model_config)
kv_cache_spec = create_standard_kv_cache_spec(vllm_config)
torch.manual_seed(0)
scale = 1.0 / math.sqrt(head_size)
# Shared MLA projection weights to keep reference and backend in sync
W_UK = torch.randn(
kv_lora_rank, num_heads, qk_nope_head_dim, dtype=dtype, device=device
)
W_UV = torch.randn(kv_lora_rank, num_heads, v_head_dim, dtype=dtype, device=device)
# Build synthetic decode-only workload
seq_lens = batch_spec.seq_lens
query_lens = batch_spec.query_lens
all_q_vllm, all_kv_c_vllm, all_k_pe_vllm = [], [], []
kv_c_contexts, k_pe_contexts = [], []
reference_outputs = []
kv_cache_scale = torch.tensor(1.0, dtype=torch.float32, device=device)
for i in range(batch_spec.batch_size):
s_len = seq_lens[i]
q_len = query_lens[i]
ctx_len = s_len - q_len
q_c = torch.rand(
q_len,
num_heads,
qk_nope_head_dim + qk_rope_head_dim,
dtype=dtype,
device=device,
)
kv_c_full = torch.rand(s_len, kv_lora_rank, dtype=dtype, device=device)
k_pe_full = torch.rand(s_len, 1, qk_rope_head_dim, dtype=dtype, device=device)
kv_c_full, k_pe_full = _quantize_dequantize_fp8_ds_mla(
kv_c_full,
k_pe_full.squeeze(1),
block_size=vllm_config.cache_config.block_size,
scale=kv_cache_scale,
)
q_nope, q_pe = q_c.split([qk_nope_head_dim, qk_rope_head_dim], dim=-1)
ql_nope = torch.einsum("qnh,lnh->qnl", q_nope, W_UK)
q_mqa = torch.cat([ql_nope, q_pe], dim=-1)
k_mqa = torch.cat([kv_c_full, k_pe_full], dim=-1)
k_mqa = k_mqa.unsqueeze(1).expand(-1, num_heads, -1)
v_mqa = kv_c_full.unsqueeze(1).expand(-1, num_heads, -1)
attn_mask = torch.ones(q_len, s_len, dtype=torch.bool, device=device)
causal_mask = torch.tril(torch.ones(q_len, q_len, device=device))
attn_mask[:, ctx_len:] = causal_mask
q_sdpa_in = q_mqa.unsqueeze(0).transpose(1, 2)
k_sdpa_in = k_mqa.unsqueeze(0).transpose(1, 2)
v_sdpa_in = v_mqa.unsqueeze(0).transpose(1, 2)
sdpa_out = torch.nn.functional.scaled_dot_product_attention(
q_sdpa_in, k_sdpa_in, v_sdpa_in, attn_mask=attn_mask, scale=scale
)
sdpa_out = sdpa_out.transpose(1, 2).squeeze(0)
sdpa_out = torch.einsum("qnl,lnv->qnv", sdpa_out, W_UV)
reference_outputs.append(sdpa_out.flatten(start_dim=-2))
all_q_vllm.append(q_c)
all_kv_c_vllm.append(kv_c_full[ctx_len:])
all_k_pe_vllm.append(k_pe_full[ctx_len:])
kv_c_contexts.append(kv_c_full[: ctx_len + 1])
k_pe_contexts.append(k_pe_full[: ctx_len + 1])
query_vllm = torch.cat(all_q_vllm, dim=0)
kv_c_vllm = torch.cat(all_kv_c_vllm, dim=0)
k_pe_vllm = torch.cat(all_k_pe_vllm, dim=0)
sdpa_reference = torch.cat(reference_outputs, dim=0)
vllm_config.cache_config.cache_dtype = kv_cache_dtype
vllm_config.model_config.hf_config.index_topk = topk_tokens
common_attn_metadata = create_common_attn_metadata(
batch_spec,
vllm_config.cache_config.block_size,
device,
arange_block_indices=True,
)
kv_cache = create_and_prepopulate_kv_cache(
kv_c_contexts=kv_c_contexts,
k_pe_contexts=k_pe_contexts,
block_size=vllm_config.cache_config.block_size,
head_size=head_size,
dtype=dtype,
device=device,
num_blocks=vllm_config.cache_config.num_gpu_blocks,
common_attn_metadata=common_attn_metadata,
randomize_blocks=False,
kv_cache_dtype=vllm_config.cache_config.cache_dtype,
scale=kv_cache_scale,
)
builder_cls = FlashMLASparseBackend.get_builder_cls()
builder = builder_cls(kv_cache_spec, ["placeholder"], vllm_config, device)
metadata = builder.build(
common_prefix_len=0, common_attn_metadata=common_attn_metadata
)
starts = np.asarray(common_attn_metadata.query_start_loc_cpu, dtype=np.int32)
seg_lengths = np.diff(starts)
positions = np.arange(starts[-1], dtype=np.int32) - np.repeat(
starts[:-1], seg_lengths
)
seq_lengths = np.asarray(common_attn_metadata.seq_lens_cpu, dtype=np.int32)
prefix_lengths = seq_lengths - seg_lengths
positions += np.repeat(prefix_lengths, seg_lengths)
pos_gpu = torch.as_tensor(positions, device=device, dtype=torch.int32)
topk = metadata.topk_tokens
debug_indices = torch.arange(topk, device=device, dtype=torch.int32).unsqueeze(0)
token_positions = pos_gpu.unsqueeze(1)
causal_mask = debug_indices <= token_positions
debug_indices = torch.where(
causal_mask, debug_indices, torch.full_like(debug_indices, -1)
)
# FlashMLASparseImpl now reads top-k indices from the indexer-provided
# buffer, so emulate that contract with a simple namespace mock.
debug_indices = debug_indices.expand(metadata.num_actual_tokens, -1).clone()
mock_indexer = SimpleNamespace(topk_indices_buffer=debug_indices)
ok, reason = flashmla.is_flashmla_sparse_supported()
if not ok:
pytest.skip(reason)
kv_b_proj_weight = torch.cat([W_UK, W_UV], dim=-1)
kv_b_proj_weight = kv_b_proj_weight.view(
kv_lora_rank, num_heads * (qk_nope_head_dim + v_head_dim)
)
mock_kv_b_proj = ColumnParallelLinear(
input_size=kv_lora_rank,
output_size=num_heads * (qk_nope_head_dim + v_head_dim),
bias=False,
).to(device=device, dtype=dtype)
mock_kv_b_proj.weight = torch.nn.Parameter(kv_b_proj_weight.T.contiguous())
impl_cls = FlashMLASparseBackend.get_impl_cls()
with set_current_vllm_config(vllm_config):
impl = impl_cls(
num_heads=num_heads,
head_size=head_size,
scale=scale,
num_kv_heads=1,
alibi_slopes=None,
sliding_window=None,
kv_cache_dtype=vllm_config.cache_config.cache_dtype,
logits_soft_cap=None,
attn_type="decoder",
kv_sharing_target_layer_name=None,
q_lora_rank=None,
kv_lora_rank=kv_lora_rank,
qk_nope_head_dim=qk_nope_head_dim,
qk_rope_head_dim=qk_rope_head_dim,
qk_head_dim=qk_nope_head_dim + qk_rope_head_dim,
v_head_dim=v_head_dim,
kv_b_proj=mock_kv_b_proj,
indexer=mock_indexer,
)
impl.process_weights_after_loading(dtype)
layer = MockAttentionLayer(device)
out_buffer = torch.empty(
metadata.num_actual_tokens, num_heads * v_head_dim, dtype=dtype, device=device
)
with torch.inference_mode():
backend_output = impl.forward(
layer,
query_vllm,
kv_c_vllm,
k_pe_vllm,
kv_cache,
metadata,
output=out_buffer,
)
assert backend_output.shape == sdpa_reference.shape
assert backend_output.dtype == sdpa_reference.dtype
assert torch.isfinite(backend_output).all()
torch.testing.assert_close(backend_output, sdpa_reference, rtol=0.5, atol=0.5)
def _triton_convert_reference_impl(
req_ids: torch.Tensor,
block_table: torch.Tensor,
token_indices: torch.Tensor,
block_size: int,
num_topk_tokens: int,
HAS_PREFILL_WORKSPACE: bool = False,
prefill_workspace_request_ids: torch.Tensor | None = None,
prefill_workspace_starts: torch.Tensor | None = None,
) -> torch.Tensor:
"""Reference implementation for triton_convert_req_index_to_global_index."""
num_tokens = req_ids.shape[0]
max_blocks_per_req = block_table.shape[1]
result = torch.empty(
num_tokens, num_topk_tokens, dtype=torch.int32, device=req_ids.device
)
for token_id in range(num_tokens):
req_id = req_ids[token_id].item()
# Determine if this token uses workspace or paged cache
use_prefill_workspace = False
workspace_start = 0
if HAS_PREFILL_WORKSPACE and prefill_workspace_request_ids is not None:
assert prefill_workspace_starts is not None
prefill_req_id = prefill_workspace_request_ids[token_id].item()
if prefill_req_id >= 0:
use_prefill_workspace = True
workspace_start = prefill_workspace_starts[prefill_req_id].item()
for idx_id in range(num_topk_tokens):
token_idx = token_indices[token_id, idx_id].item()
if token_idx == -1:
result[token_id, idx_id] = -1
elif use_prefill_workspace:
# Prefill + using prefill workspace: map to workspace offset
result[token_id, idx_id] = workspace_start + token_idx
else:
# Decode: map to paged cache
block_id = token_idx // block_size
if block_id >= max_blocks_per_req:
result[token_id, idx_id] = -1
else:
block_num = block_table[req_id, block_id].item()
offset = token_idx % block_size
result[token_id, idx_id] = block_num * block_size + offset
return result
@pytest.mark.parametrize("block_size", [16, 64, 128])
@pytest.mark.parametrize("num_topk_tokens", [128, 256, 512])
@pytest.mark.skipif(
torch.cuda.get_device_capability() < (9, 0),
reason="FlashMLASparseBackend requires CUDA 9.0 or higher",
)
def test_triton_convert_req_index_to_global_index_decode_only(
block_size, num_topk_tokens
):
device = torch.device("cuda")
num_tokens = 8
num_requests = 4
max_blocks_per_req = 10
req_id = torch.randint(
0, num_requests, (num_tokens,), dtype=torch.int32, device=device
)
block_table = torch.randint(
0, 100, (num_requests, max_blocks_per_req), dtype=torch.int32, device=device
)
token_indices = torch.randint(
0,
block_size * max_blocks_per_req,
(num_tokens, num_topk_tokens),
dtype=torch.int32,
device=device,
)
# Set some to -1 to test masking
token_indices[0, :10] = -1
token_indices[3, 50:60] = -1
# Set some to out of bounds
token_indices[2, 100:110] = max_blocks_per_req * block_size
token_indices[6, 150:160] = max_blocks_per_req * block_size
result = triton_convert_req_index_to_global_index(
req_id,
block_table,
token_indices,
BLOCK_SIZE=block_size,
NUM_TOPK_TOKENS=num_topk_tokens,
)
reference_result = _triton_convert_reference_impl(
req_id,
block_table,
token_indices,
block_size,
num_topk_tokens,
)
torch.testing.assert_close(result, reference_result, rtol=0, atol=0)
@pytest.mark.parametrize("block_size", [16])
@pytest.mark.skipif(
torch.cuda.get_device_capability() < (9, 0),
reason="FlashMLASparseBackend requires CUDA 9.0 or higher",
)
def test_triton_convert_req_index_to_global_index_with_prefill_workspace(block_size):
device = torch.device("cuda")
num_requests = 4
max_blocks_per_req = 8
num_topk_tokens = 128
# First 6 tokens are decode (reqs 0, 1), last 6 are prefill (reqs 2, 3)
req_id = torch.tensor(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=torch.int32, device=device
)
prefill_workspace_request_ids = torch.tensor(
[-1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1], dtype=torch.int32, device=device
)
# Workspace starts for the 2 prefill reqs: req 2 starts at 0, req 3 starts at 100
prefill_workspace_starts = torch.tensor([0, 100], dtype=torch.int32, device=device)
block_table = torch.randint(
0, 50, (num_requests, max_blocks_per_req), dtype=torch.int32, device=device
)
token_indices = torch.randint(
0,
block_size * max_blocks_per_req,
(req_id.shape[0], num_topk_tokens),
dtype=torch.int32,
device=device,
)
# Set some to -1 to test masking
token_indices[0, :10] = -1
token_indices[3, 50:60] = -1
# Set some to out of bounds
token_indices[2, 100:110] = max_blocks_per_req * block_size
token_indices[6, 150:160] = max_blocks_per_req * block_size
result = triton_convert_req_index_to_global_index(
req_id,
block_table,
token_indices,
BLOCK_SIZE=block_size,
NUM_TOPK_TOKENS=num_topk_tokens,
HAS_PREFILL_WORKSPACE=True,
prefill_workspace_request_ids=prefill_workspace_request_ids,
prefill_workspace_starts=prefill_workspace_starts,
)
reference_result = _triton_convert_reference_impl(
req_id,
block_table,
token_indices,
block_size,
num_topk_tokens,
HAS_PREFILL_WORKSPACE=True,
prefill_workspace_request_ids=prefill_workspace_request_ids,
prefill_workspace_starts=prefill_workspace_starts,
)
torch.testing.assert_close(result, reference_result, rtol=0, atol=0)
@pytest.mark.parametrize(
"seq_lens,max_buf,expected",
[
# Basic split: totals per chunk ≤ max_buf
(torch.tensor([2, 3, 4, 2]), 5, [(0, 2), (2, 3), (3, 4)]),
# Exact fits should split between items when adding the next would overflow
(torch.tensor([5, 5, 5]), 5, [(0, 1), (1, 2), (2, 3)]),
# All requests fit in a single chunk
(torch.tensor([1, 1, 1]), 10, [(0, 3)]),
# Large buffer
(torch.tensor([4, 4, 4]), 100, [(0, 3)]),
],
)
def test_split_prefill_chunks(seq_lens, max_buf, expected):
out = split_prefill_chunks(seq_lens, max_buf)
assert out == expected
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_batch_reordering.py | tests/v1/attention/test_batch_reordering.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
import numpy as np
import pytest
from vllm.v1.attention.backends.utils import reorder_batch_to_split_decodes_and_prefills
class MockInputBatch:
def __init__(self, req_ids, num_computed_tokens_cpu):
self.req_ids = req_ids
self.num_computed_tokens_cpu = num_computed_tokens_cpu
def swap_states(self, i, j):
self.req_ids[i], self.req_ids[j] = self.req_ids[j], self.req_ids[i]
self.num_computed_tokens_cpu[i], self.num_computed_tokens_cpu[j] = (
self.num_computed_tokens_cpu[j],
self.num_computed_tokens_cpu[i],
)
class MockSchedulerOutput:
def __init__(self, num_scheduled_tokens):
self.num_scheduled_tokens = num_scheduled_tokens
@dataclass
class ReorderTestCase:
requests: list[tuple[int, int]] # (num_scheduled_tokens, num_computed_tokens)
expected_order: list[int]
expected_modified: bool
decode_threshold: int = 1
# Test cases for batch reordering
REORDER_TEST_CASES = {
"all_decodes": ReorderTestCase(
requests=[(1, 10), (1, 20), (1, 30)],
expected_order=[0, 1, 2],
expected_modified=False,
),
"all_prefills": ReorderTestCase(
requests=[(100, 100), (200, 200), (300, 300)],
expected_order=[0, 1, 2],
expected_modified=False,
),
"mixed_interleaved": ReorderTestCase(
requests=[(100, 100), (1, 10), (200, 200), (1, 20)],
expected_order=[3, 1, 2, 0], # Only swap 0↔3, keep 1 and 2 in place
expected_modified=True,
),
"already_ordered": ReorderTestCase(
requests=[(1, 10), (1, 20), (100, 100), (200, 0)],
expected_order=[0, 1, 2, 3],
expected_modified=False,
),
"single_request": ReorderTestCase(
requests=[(1, 10)],
expected_order=[0],
expected_modified=False,
),
"higher_threshold": ReorderTestCase(
requests=[(2, 10), (3, 20), (5, 30), (6, 40)],
expected_order=[0, 1, 2, 3],
expected_modified=False,
decode_threshold=4,
),
"decodes_at_end": ReorderTestCase(
requests=[(100, 100), (200, 200), (1, 10), (1, 20)],
expected_order=[2, 3, 0, 1],
expected_modified=True,
),
"decode_extend_prefill": ReorderTestCase(
requests=[(100, 0), (10, 50), (1, 10)],
expected_order=[2, 1, 0],
expected_modified=True,
),
"extend_prefill_only": ReorderTestCase(
requests=[(100, 0), (10, 50), (200, 0), (20, 75)],
expected_order=[3, 1, 2, 0], # Only swap 0↔3, keep 1 and 2 in place
expected_modified=True,
),
"complicated_mixed_interleaved": ReorderTestCase(
requests=[
(1, 20),
(1, 50),
(374, 0),
(300, 20),
(1, 20),
(256, 0),
(1, 5),
(27, 0),
(1, 4),
],
expected_order=[0, 1, 6, 8, 4, 3, 2, 7, 5],
expected_modified=True,
),
}
@pytest.mark.parametrize(
"test_case", REORDER_TEST_CASES.values(), ids=REORDER_TEST_CASES.keys()
)
def test_reorder_batch_to_split_decodes_and_prefills(test_case: ReorderTestCase):
req_ids = [f"r{i}" for i in range(len(test_case.requests))]
num_computed_tokens = np.array([r[1] for r in test_case.requests], dtype=np.int32)
num_scheduled_tokens = {f"r{i}": r[0] for i, r in enumerate(test_case.requests)}
input_batch = MockInputBatch(req_ids, num_computed_tokens)
scheduler_output = MockSchedulerOutput(num_scheduled_tokens)
modified = reorder_batch_to_split_decodes_and_prefills(
input_batch, scheduler_output, decode_threshold=test_case.decode_threshold
)
expected_req_ids = [f"r{i}" for i in test_case.expected_order]
assert modified == test_case.expected_modified, (
f"Expected modified={test_case.expected_modified}, got {modified}"
)
assert input_batch.req_ids == expected_req_ids, (
f"Expected order {expected_req_ids}, got {input_batch.req_ids}"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_attention_backends_selection.py | tests/v1/attention/test_attention_backends_selection.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for mamba attention backend selectors."""
from types import SimpleNamespace
import pytest
from vllm.model_executor.layers.mamba.mamba_mixer import MambaMixer
from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2
from vllm.model_executor.layers.mamba.short_conv import ShortConv
from vllm.model_executor.models.minimax_text_01 import MiniMaxText01LinearAttention
from vllm.v1.attention.backends.linear_attn import LinearAttentionBackend
from vllm.v1.attention.backends.mamba1_attn import Mamba1AttentionBackend
from vllm.v1.attention.backends.mamba2_attn import Mamba2AttentionBackend
from vllm.v1.attention.backends.short_conv_attn import ShortConvAttentionBackend
@pytest.mark.parametrize(
"layer_class, init_kwargs, expected_backend, expected_mamba_type",
[
(
MambaMixer,
dict(
hidden_size=128,
ssm_state_size=16,
conv_kernel_size=4,
intermediate_size=256,
time_step_rank=8,
use_conv_bias=True,
use_bias=False,
use_rms_norm=True,
),
Mamba1AttentionBackend,
"mamba1",
),
(
MambaMixer2,
dict(
hidden_size=128,
ssm_state_size=16,
conv_kernel_size=4,
intermediate_size=256,
use_conv_bias=True,
use_bias=False,
n_groups=1,
num_heads=8,
head_dim=32,
),
Mamba2AttentionBackend,
"mamba2",
),
(
MiniMaxText01LinearAttention,
dict(
hidden_size=128,
hidden_inner_size=256,
num_heads=8,
head_dim=32,
max_position=2048,
block_size=64,
num_hidden_layer=12,
layer_idx=0,
linear_layer_idx=0,
),
LinearAttentionBackend,
"linear_attention",
),
(
ShortConv,
dict(
config=SimpleNamespace(conv_L_cache=32, conv_bias=True),
dim=128,
layer_idx=0,
),
ShortConvAttentionBackend,
"short_conv",
),
],
)
def test_mamba_layers_get_attn_backend(
dist_init, layer_class, init_kwargs, expected_backend, expected_mamba_type
):
"""Test that Mamba-like layers return the correct attention backend."""
layer = layer_class(**init_kwargs)
backend_class = layer.get_attn_backend()
assert backend_class is expected_backend
assert layer.mamba_type == expected_mamba_type
@pytest.mark.parametrize(
"layer_class,expected_backend,expected_mamba_type",
[
(MambaMixer, Mamba1AttentionBackend, "mamba1"),
(MambaMixer2, Mamba2AttentionBackend, "mamba2"),
(MiniMaxText01LinearAttention, LinearAttentionBackend, "linear_attention"),
(ShortConv, ShortConvAttentionBackend, "short_conv"),
],
)
def test_mamba_layers_have_unified_interface(
layer_class, expected_backend, expected_mamba_type
):
"""Test that all Mamba layers have the unified get_attn_backend
interface."""
assert hasattr(layer_class, "get_attn_backend"), (
f"{layer_class.__name__} should have get_attn_backend method"
)
assert hasattr(layer_class, "mamba_type"), (
f"{layer_class.__name__} should have mamba_type property"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_mla_backends.py | tests/v1/attention/test_mla_backends.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for v1 MLA backends without GPUModelRunner dependency.
Known Issues:
- FLASH_ATTN_MLA backend occasionally produces NaN values in
test_backend_correctness[mixed_small] when run after
test_backend_correctness[small_prefill], but passes when run alone.
"""
import pytest
import torch
from tests.v1.attention.utils import (
BatchSpec,
create_common_attn_metadata,
create_vllm_config,
try_get_attention_backend,
)
from vllm import _custom_ops as ops
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.attention.ops.flashmla import is_flashmla_dense_supported
from vllm.attention.utils.fa_utils import flash_attn_supports_mla
from vllm.config.vllm import set_current_vllm_config
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
from vllm.utils.math_utils import cdiv
from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE
from vllm.v1.attention.backends.mla.common import QueryLenSupport
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
from vllm.v1.kv_cache_interface import FullAttentionSpec
BACKENDS_TO_TEST = [
AttentionBackendEnum.CUTLASS_MLA,
AttentionBackendEnum.FLASHMLA,
AttentionBackendEnum.FLASH_ATTN_MLA,
AttentionBackendEnum.FLASHINFER_MLA,
AttentionBackendEnum.TRITON_MLA,
]
# Remove sm100 backends from the list if not using sm100
if not torch.cuda.is_available() or torch.cuda.get_device_properties(0).major < 10:
BACKENDS_TO_TEST.remove(AttentionBackendEnum.CUTLASS_MLA)
BACKENDS_TO_TEST.remove(AttentionBackendEnum.FLASHINFER_MLA)
# Remove FLASH_ATTN_MLA from the list if not supported
if not flash_attn_supports_mla():
BACKENDS_TO_TEST.remove(AttentionBackendEnum.FLASH_ATTN_MLA)
# Remove FLASHMLA from the list if not supported
if not is_flashmla_dense_supported()[0]:
BACKENDS_TO_TEST.remove(AttentionBackendEnum.FLASHMLA)
SPEC_DECODE_BACKENDS = []
for backend in BACKENDS_TO_TEST:
builder_cls, _ = try_get_attention_backend(backend)
query_len_support = getattr(
builder_cls, "query_len_support", QueryLenSupport.SINGLE_ONLY
)
if query_len_support != QueryLenSupport.SINGLE_ONLY:
SPEC_DECODE_BACKENDS.append(backend)
BACKEND_BLOCK_SIZES = {}
for backend in BACKENDS_TO_TEST:
supported_sizes = backend.get_class().get_supported_kernel_block_sizes()
if supported_sizes:
default_size = supported_sizes[0]
block_size = (
default_size if isinstance(default_size, int) else default_size.base
)
else:
block_size = 16
BACKEND_BLOCK_SIZES[backend] = block_size
torch.manual_seed(42)
def _convert_dtype_to_torch(dtype):
"""Convert ModelDType to torch.dtype."""
if isinstance(dtype, str):
if dtype == "auto":
return torch.float16 # Default dtype for testing
elif dtype in STR_DTYPE_TO_TORCH_DTYPE:
return STR_DTYPE_TO_TORCH_DTYPE[dtype]
else:
raise ValueError(f"Unknown dtype: {dtype}")
elif isinstance(dtype, torch.dtype):
return dtype
else:
raise ValueError(f"Unknown dtype: {dtype}")
# Define common batch configurations
BATCH_SPECS = {
"small_decode": BatchSpec(seq_lens=[32, 40], query_lens=[1, 1]),
"small_prefill": BatchSpec(seq_lens=[32, 40], query_lens=[8, 8]),
"mixed_small": BatchSpec(seq_lens=[32, 40, 48, 56], query_lens=[1, 1, 5, 5]),
"medium_decode": BatchSpec(
seq_lens=[128, 256, 512, 1024, 128, 256, 512, 1024],
query_lens=[1, 1, 1, 1, 1, 1, 1, 1],
),
"medium_prefill": BatchSpec(
seq_lens=[256, 512, 1024, 2048], query_lens=[16, 16, 16, 16]
),
"mixed_medium": BatchSpec(
seq_lens=[512, 1024, 2048, 512, 1024, 2048], query_lens=[1, 1, 1, 7, 7, 7]
),
"large_decode": BatchSpec(seq_lens=[2048] * 32, query_lens=[1] * 32),
"large_prefill": BatchSpec(seq_lens=[4096] * 8, query_lens=[32] * 8),
"single_decode": BatchSpec(seq_lens=[1024], query_lens=[1]),
"single_prefill": BatchSpec(seq_lens=[1024], query_lens=[64]),
"spec_decode_small": BatchSpec(
seq_lens=[128, 256, 512, 1024], query_lens=[4, 4, 4, 4]
),
"spec_decode_medium": BatchSpec(
seq_lens=[512, 1024, 2048, 512, 1024, 2048], query_lens=[8, 8, 8, 8, 8, 8]
),
}
def create_and_prepopulate_kv_cache(
kv_c_contexts: list[torch.Tensor],
k_pe_contexts: list[torch.Tensor],
block_size: int,
head_size: int,
dtype: torch.dtype,
device: torch.device,
num_blocks: int,
common_attn_metadata: CommonAttentionMetadata,
randomize_blocks: bool = True,
kv_cache_dtype: str | None = None,
scale: float | torch.Tensor = 1.0,
) -> torch.Tensor:
"""Create and prepopulate an MLA KV cache with context data.
Args:
kv_c_contexts: List of latent KV context tensors for each sequence
k_pe_contexts: List of key positional embedding context tensors
for each sequence
block_size: Size of each block
head_size: Size of each head (latent dimension)
dtype: Data type for the cache
device: Device to create the cache on
num_blocks: Total number of blocks in the cache
common_attn_metadata: Common attention metadata
randomize_blocks: Whether to randomly permute blocks
or use sequential order
kv_cache_dtype: Optional kv cache dtype string. When set to
"fp8_ds_mla" the cache is populated using the
fp8 DeepSeek MLA layout via concat_and_cache_mla.
scale: Scaling factor forwarded to concat_and_cache_mla when the
fp8 cache layout is requested.
Returns:
MLA KV cache tensor
"""
batch_size = len(kv_c_contexts)
seq_lens = common_attn_metadata.seq_lens_cpu
query_lens = (
common_attn_metadata.query_start_loc_cpu[1:]
- common_attn_metadata.query_start_loc_cpu[:-1]
)
context_lens = common_attn_metadata.num_computed_tokens_cpu
block_table = common_attn_metadata.block_table_tensor
slot_mapping = common_attn_metadata.slot_mapping
use_fp8_ds_mla = kv_cache_dtype == "fp8_ds_mla"
if use_fp8_ds_mla:
if not kv_c_contexts:
raise ValueError(
"kv_c_contexts cannot be empty when using fp8_ds_mla cache dtype"
)
kv_lora_rank = kv_c_contexts[0].shape[-1]
rope_dim = k_pe_contexts[0].shape[-1]
entry_size = kv_lora_rank + 4 * 4 + 2 * rope_dim
kv_cache = torch.zeros(
num_blocks, block_size, entry_size, dtype=torch.uint8, device=device
)
scale_tensor = (
scale
if isinstance(scale, torch.Tensor)
else torch.tensor(scale, dtype=torch.float32, device=device)
)
scale_tensor = scale_tensor.to(device=device, dtype=torch.float32)
else:
# Create MLA KV cache: (num_blocks, block_size, head_size)
kv_cache = torch.zeros(
num_blocks, block_size, head_size, dtype=dtype, device=device
)
kv_cache_flat = kv_cache.view(-1, head_size)
# Populate the cache with the context tokens
# Start from block_id=1 since block_id=0 is considered the null block
start_block_idx = 1
for i in range(batch_size):
kv_c_context, k_pe_context = kv_c_contexts[i], k_pe_contexts[i]
context_len = kv_c_context.shape[0]
if context_len == 0:
start_block_idx += cdiv(int(seq_lens[i]), block_size)
continue
start = start_block_idx * block_size
if use_fp8_ds_mla:
slots = torch.arange(context_len, device=device, dtype=torch.long) + start
ops.concat_and_cache_mla(
kv_c_context,
k_pe_context.squeeze(1),
kv_cache,
slots,
kv_cache_dtype="fp8_ds_mla",
scale=scale_tensor,
)
else:
kv_context = torch.cat([kv_c_context, k_pe_context.squeeze(1)], dim=-1)
end = start + kv_context.shape[0]
kv_cache_flat[start:end, ...] = kv_context
# Stay block aligned and allocate enough blocks for the new tokens
start_block_idx += cdiv(int(seq_lens[i]), block_size)
blocks_end = start_block_idx
# Permute the context blocks (excluding block 0 which is null)
if randomize_blocks:
perm = (
torch.randperm(blocks_end - 1) + 1
) # Random permutation starting from block 1
else:
perm = torch.arange(1, blocks_end) # Sequential order starting from block 1
inv_perm = torch.zeros(blocks_end, dtype=torch.long, device=device)
inv_perm[1:] = torch.argsort(perm) + 1 # Add 1 to account for starting from block 1
kv_cache[1:blocks_end, ...] = kv_cache[perm, ...]
# Construct the right block table
# Start from block_id=1 since block_id=0 is considered the null block
start_block_idx = 1
for i in range(batch_size):
num_blocks_for_seq = cdiv(int(seq_lens[i]), block_size)
start = start_block_idx
end = start + num_blocks_for_seq
block_table[i, :num_blocks_for_seq] = inv_perm[start:end]
block_table[i, num_blocks_for_seq:] = 0
start_block_idx += num_blocks_for_seq
# Create a realistic slot mapping that corresponds to the block table
for i in range(batch_size):
token_offsets = torch.arange(int(query_lens[i])) + int(context_lens[i])
block_indices = token_offsets // block_size
token_inter_block_offsets = token_offsets % block_size
start = common_attn_metadata.query_start_loc_cpu[i]
end = common_attn_metadata.query_start_loc_cpu[i + 1]
slot_mapping[start:end] = block_table[
i, block_indices
] * block_size + token_inter_block_offsets.to(device)
return kv_cache
class MockAttentionLayer:
"""A mock attention layer for testing."""
def __init__(self, device: torch.device):
self._q_scale = torch.tensor(1.0, device=device)
self._k_scale = torch.tensor(1.0, device=device)
self._v_scale = torch.tensor(1.0, device=device)
self._prob_scale = torch.tensor(1.0, device=device)
self._q_scale_float = 1.0
self._k_scale_float = 1.0
self._v_scale_float = 1.0
def forward(self, *_args, **_kwargs):
raise NotImplementedError
class MockMLAAttentionLayer(AttentionLayerBase):
"""A mock MLA attention layer for populating static_forward_context."""
def __init__(self, impl):
self.impl = impl
def get_attn_backend(self):
raise NotImplementedError
def get_kv_cache_spec(self, vllm_config):
raise NotImplementedError
def run_attention_backend(
backend: AttentionBackendEnum,
kv_cache_spec: FullAttentionSpec,
layer_names: list[str],
vllm_config,
device: torch.device,
common_attn_metadata: CommonAttentionMetadata,
query: torch.Tensor,
kv_c: torch.Tensor,
k_pe: torch.Tensor,
kv_cache: torch.Tensor,
kv_lora_rank: int,
qk_nope_head_dim: int,
qk_rope_head_dim: int,
v_head_dim: int,
mock_kv_b_proj,
) -> torch.Tensor:
"""Run attention computation using the specified backend's AttentionImpl."""
builder_cls, impl_cls = try_get_attention_backend(backend)
# Set the current vllm config so that get_current_vllm_config() works
# in the backend implementations
with set_current_vllm_config(vllm_config):
# Instantiate MLA implementation
num_heads = vllm_config.model_config.get_num_attention_heads(
vllm_config.parallel_config
)
num_kv_heads = vllm_config.model_config.get_num_kv_heads(
vllm_config.parallel_config
)
head_size = vllm_config.model_config.get_head_size()
scale = 1.0 / (head_size**0.5)
impl = impl_cls(
num_heads=num_heads,
head_size=head_size,
scale=scale,
num_kv_heads=num_kv_heads,
alibi_slopes=None,
sliding_window=None,
kv_cache_dtype="auto",
logits_soft_cap=None,
attn_type="decoder",
kv_sharing_target_layer_name=None,
q_lora_rank=None,
kv_lora_rank=kv_lora_rank,
qk_nope_head_dim=qk_nope_head_dim,
qk_rope_head_dim=qk_rope_head_dim,
qk_head_dim=qk_nope_head_dim + qk_rope_head_dim,
v_head_dim=v_head_dim,
kv_b_proj=mock_kv_b_proj,
)
# Process weights to create W_UK_T and W_UV attributes needed by MLA
act_dtype = _convert_dtype_to_torch(vllm_config.model_config.dtype)
impl.process_weights_after_loading(act_dtype)
# Populate static_forward_context with mock attention layers
for layer_name in layer_names:
vllm_config.compilation_config.static_forward_context[layer_name] = (
MockMLAAttentionLayer(impl)
)
# Build metadata
builder = builder_cls(kv_cache_spec, layer_names, vllm_config, device)
attn_metadata = builder.build(
common_prefix_len=0,
common_attn_metadata=common_attn_metadata,
)
# Create mock layer and output buffer
mock_layer = MockAttentionLayer(device)
num_tokens = query.shape[0]
output = torch.empty(
num_tokens, num_heads * v_head_dim, dtype=query.dtype, device=query.device
)
# Run forward pass
# NOTE: The query, key, and value are already shaped correctly
# in the calling test function.
output = impl.forward(
mock_layer, query, kv_c, k_pe, kv_cache, attn_metadata, output=output
)
return output
@pytest.mark.parametrize(
"batch_spec_name",
[
"small_decode",
"small_prefill",
"mixed_small",
"medium_decode",
"medium_prefill",
"mixed_medium",
"large_decode",
"large_prefill",
"single_decode",
"single_prefill",
"spec_decode_small",
"spec_decode_medium",
],
)
@pytest.mark.parametrize("model", ["deepseek-ai/DeepSeek-R1"])
@pytest.mark.parametrize("tensor_parallel_size", [1, 4, 8, 16])
def test_backend_correctness(
dist_init, batch_spec_name: str, model: str, tensor_parallel_size: int
):
"""
Test that all backends produce similar outputs to a reference implementation
using torch.nn.functional.scaled_dot_product_attention.
This test works by:
1. Generating a batch of sequences with specified context and query lengths.
2. Computing a ground-truth attention output using torch.sdpa on
contiguous Q, K, and V tensors.
3. Simulating vLLM's paged KV cache: It takes the context portion of the
K/V tensors and manually places them into a paged buffer according to
the test's (randomly generated) block table.
4. Running each vLLM attention backend with the new queries and the
simulated paged KV cache.
5. Comparing the vLLM backend's output to the ground-truth SDPA output.
Note: When tensor_parallel_size > 1, we simulate the head partitioning
by overriding the model config to use fewer heads, without requiring
multiple GPUs. This tests that backends work correctly with different
head counts.
"""
batch_spec = BATCH_SPECS[batch_spec_name]
is_spec_decode_test = batch_spec_name.startswith("spec_decode")
unique_block_sizes = sorted(set(BACKEND_BLOCK_SIZES.values()))
default_block_size = unique_block_sizes[0]
required_blocks = sum(
(seq_len + default_block_size - 1) // default_block_size
for seq_len in batch_spec.seq_lens
)
# Add 1 for null block at index 0, and some buffer
num_gpu_blocks = required_blocks + 1 + 100
hf_config_override = None
if tensor_parallel_size > 1:
from vllm.config import ModelConfig
temp_config = ModelConfig(model=model, max_model_len=1)
original_num_heads = temp_config.hf_text_config.num_attention_heads
original_num_kv_heads = getattr(
temp_config.hf_text_config, "num_key_value_heads", None
)
hf_config_override = {
"num_attention_heads": original_num_heads // tensor_parallel_size,
}
if original_num_kv_heads is not None:
hf_config_override["num_key_value_heads"] = max(
1, original_num_kv_heads // tensor_parallel_size
)
vllm_config = create_vllm_config(
model_name=model,
tensor_parallel_size=1, # Always use TP=1 to avoid multi-GPU requirements
max_model_len=max(batch_spec.seq_lens),
num_gpu_blocks=num_gpu_blocks,
block_size=default_block_size,
hf_config_override=hf_config_override,
)
# For spec decode tests, add a speculative_config to set the reorder_batch_threshold
if is_spec_decode_test:
from vllm.config import SpeculativeConfig
# Get the query length from the batch spec (they should all be uniform)
query_len = batch_spec.query_lens[0]
# Set num_speculative_tokens to query_len - 1
# (since threshold is 1 + num_spec_tokens)
# Use ngram method which doesn't require a draft model
vllm_config.speculative_config = SpeculativeConfig(
method="ngram", num_speculative_tokens=query_len - 1
)
device = torch.device("cuda:0")
# 1. Setup
batch_size = batch_spec.batch_size
seq_lens = batch_spec.seq_lens
query_lens = batch_spec.query_lens
num_q_heads = vllm_config.model_config.get_num_attention_heads(
vllm_config.parallel_config
)
head_size = vllm_config.model_config.get_head_size()
dtype = _convert_dtype_to_torch(vllm_config.model_config.dtype)
kv_lora_rank = 512
qk_rope_head_dim = 64
qk_nope_head_dim = 128
v_head_dim = 128
total_head_size = kv_lora_rank + qk_rope_head_dim
assert kv_lora_rank + qk_rope_head_dim == head_size, (
f"MLA dimensions don't match: {total_head_size} != {head_size}"
)
scale = 1.0 / (total_head_size**0.5)
# 2. Generate data and compute SDPA reference output for MLA
all_q_vllm, all_kv_c_vllm, all_k_pe_vllm = [], [], []
all_sdpa_outputs: list[list[torch.Tensor]] = []
kv_c_contexts, k_pe_contexts = [], []
# Create shared MLA weight matrices for consistency across all sequences
W_UK = torch.randn(
kv_lora_rank, num_q_heads, qk_nope_head_dim, dtype=dtype, device=device
)
W_UV = torch.randn(
kv_lora_rank, num_q_heads, v_head_dim, dtype=dtype, device=device
)
kv_b_proj_weight = torch.cat([W_UK, W_UV], dim=-1)
for i, backend in enumerate(BACKENDS_TO_TEST):
all_sdpa_outputs.append([])
for i in range(batch_size):
s_len = seq_lens[i]
q_len = query_lens[i]
context_len = s_len - q_len
# Generate MLA tensors
# Q has both nope and rope components:
# [q_len, num_heads, qk_nope_head_dim + qk_rope_head_dim]
q_c = torch.randn(
q_len,
num_q_heads,
qk_nope_head_dim + qk_rope_head_dim,
dtype=dtype,
device=device,
)
# KV_C (latent K/V): [s_len, kv_lora_rank]
kv_c_full = torch.randn(s_len, kv_lora_rank, dtype=dtype, device=device)
# K_PE (rope component): [s_len, 1, qk_rope_head_dim]
k_pe_full = torch.randn(s_len, 1, qk_rope_head_dim, dtype=dtype, device=device)
# Determine if this sequence uses the decode pipeline or prefill
# pipeline for each backend
# NOTE: For spec decode tests with uniform query_len > 1, backends that
# support spec decode (FLASH_ATTN_MLA with varlen support, FLASHMLA with
# uniform support) will use the decode pipeline (MQA-style), while
# backends that only support single-token queries will use the prefill
# pipeline (MHA-style). This ensures the reference implementation
# matches each backend's actual decode/prefill pipeline path.
is_decode = []
for backend_idx, backend in enumerate(BACKENDS_TO_TEST):
builder_cls, _ = try_get_attention_backend(backend)
if is_spec_decode_test:
query_len_support = getattr(
builder_cls, "query_len_support", QueryLenSupport.SINGLE_ONLY
)
supports_spec = query_len_support != QueryLenSupport.SINGLE_ONLY
is_decode.append(supports_spec)
else:
threshold = getattr(builder_cls, "reorder_batch_threshold", None)
query_len_support = getattr(
builder_cls, "query_len_support", QueryLenSupport.SINGLE_ONLY
)
within_threshold = q_len <= threshold if threshold else False
if (
within_threshold
and query_len_support == QueryLenSupport.UNIFORM
and i > 0
):
first_q_len = query_lens[0]
within_threshold = q_len == first_q_len
is_decode.append(within_threshold)
# Split q into nope and rope components
q_nope, q_pe = q_c.split([qk_nope_head_dim, qk_rope_head_dim], dim=-1)
#######################################################
# Decode path: MQA-style attention in latent space
# Transform q_nope to latent space: q_nope @ W_UK
# q_nope: [1, num_heads, qk_nope_head_dim]
# W_UK: [kv_lora_rank, num_heads, qk_nope_head_dim]
ql_nope = torch.einsum(
"qnh,lnh->qnl", q_nope, W_UK
) # [1, num_heads, kv_lora_rank]
# Build MQA attention inputs
# Q: [1, num_heads, kv_lora_rank + qk_rope_head_dim]
q_mqa = torch.cat([ql_nope, q_pe], dim=-1)
# K: [s_len, kv_lora_rank + qk_rope_head_dim]
# (broadcasted to all heads)
k_mqa = torch.cat([kv_c_full, k_pe_full.squeeze(1)], dim=-1)
k_mqa = k_mqa.unsqueeze(1).expand(-1, num_q_heads, -1)
# V: [s_len, kv_lora_rank] (broadcasted to all heads)
v_mqa = kv_c_full.unsqueeze(1).expand(-1, num_q_heads, -1)
# Create custom attention mask for decode path:
# - Query tokens can attend to all context tokens
# - Query tokens can only attend to query tokens up to their position
attn_mask = torch.ones(q_len, s_len, dtype=torch.bool, device=device)
# Apply causal mask only to the query portion (context_len onwards)
causal_mask = torch.tril(torch.ones(q_len, q_len, device=device))
attn_mask[:, context_len:] = causal_mask
# SDPA expects (N, H, L, D)
q_sdpa_in = q_mqa.unsqueeze(0).transpose(1, 2)
k_sdpa_in = k_mqa.unsqueeze(0).transpose(1, 2)
v_sdpa_in = v_mqa.unsqueeze(0).transpose(1, 2)
sdpa_out_i_decode = torch.nn.functional.scaled_dot_product_attention(
q_sdpa_in, k_sdpa_in, v_sdpa_in, attn_mask=attn_mask, scale=scale
)
sdpa_out_i_decode = sdpa_out_i_decode.transpose(1, 2).squeeze(
0
) # [1, num_heads, kv_lora_rank]
# Project back to output space: sdpa_out @ W_UV
sdpa_out_i_decode = torch.einsum("qnl,lnv->qnv", sdpa_out_i_decode, W_UV)
sdpa_out_i_decode = sdpa_out_i_decode.flatten(start_dim=-2)
#######################################################
# Prefill path: MHA-style attention with full sequence
# Apply kv_b_proj to the full kv_c tensor
kv_nope_full = torch.einsum("sl,lnh->snh", kv_c_full, kv_b_proj_weight)
k_nope_full, v_full = kv_nope_full.split([qk_nope_head_dim, v_head_dim], dim=-1)
# Build attention inputs for full sequence
q_mha = torch.cat([q_nope, q_pe], dim=-1) # [q_len, num_heads, total_dim]
k_pe_full_expanded = k_pe_full.expand(-1, num_q_heads, -1)
k_full = torch.cat([k_nope_full, k_pe_full_expanded], dim=-1)
# Create custom attention mask:
# - Query tokens can attend to all context tokens
# - Query tokens can only attend to query tokens up to their pos
attn_mask = torch.ones(q_len, s_len, dtype=torch.bool, device=device)
# Apply causal mask only to the query portion (context_len onwards)
causal_mask = torch.tril(torch.ones(q_len, q_len, device=device))
attn_mask[:, context_len:] = causal_mask
# SDPA expects (N, H, L, D)
q_sdpa_in = q_mha.unsqueeze(0).transpose(1, 2)
k_sdpa_in = k_full.unsqueeze(0).transpose(1, 2)
v_sdpa_in = v_full.unsqueeze(0).transpose(1, 2)
# Single attention call with custom mask
sdpa_out_i_prefill = torch.nn.functional.scaled_dot_product_attention(
q_sdpa_in, k_sdpa_in, v_sdpa_in, attn_mask=attn_mask, scale=scale
)
sdpa_out_i_prefill = sdpa_out_i_prefill.transpose(1, 2).squeeze(0)
sdpa_out_i_prefill = sdpa_out_i_prefill.flatten(start_dim=-2)
for backend_idx, backend in enumerate(BACKENDS_TO_TEST):
if is_decode[backend_idx]:
all_sdpa_outputs[backend_idx].append(sdpa_out_i_decode)
else:
all_sdpa_outputs[backend_idx].append(sdpa_out_i_prefill)
# Inputs for vLLM MLA backends are just the new tokens
all_q_vllm.append(q_c)
all_kv_c_vllm.append(kv_c_full[context_len:]) # New kv_c tokens
all_k_pe_vllm.append(k_pe_full[context_len:]) # New k_pe tokens
# Contextual K/V data used to populate the paged cache (MLA format)
kv_c_contexts.append(kv_c_full[:context_len])
k_pe_contexts.append(k_pe_full[:context_len])
# Concatenate all sequences (no reordering needed)
query_vllm = torch.cat(all_q_vllm, dim=0)
kv_c_vllm = torch.cat(all_kv_c_vllm, dim=0)
k_pe_vllm = torch.cat(all_k_pe_vllm, dim=0)
sdpa_outputs = {}
for backend_idx, backend in enumerate(BACKENDS_TO_TEST):
sdpa_outputs[backend] = torch.cat(all_sdpa_outputs[backend_idx], dim=0)
# Create mock kv_b_proj using the same weights as reference implementation
from vllm.model_executor.layers.linear import ColumnParallelLinear
mock_kv_b_proj = ColumnParallelLinear(
input_size=kv_lora_rank,
output_size=num_q_heads * (qk_nope_head_dim + v_head_dim),
bias=False,
).to(device=device, dtype=dtype)
# Set the mock weights to match our reference implementation
# Reshape W_UK and W_UV to match the expected kv_b_proj format
# [kv_lora_rank, num_heads, qk_nope_head_dim + v_head_dim]
kv_b_proj_weight = kv_b_proj_weight.view(
kv_lora_rank, num_q_heads * (qk_nope_head_dim + v_head_dim)
)
mock_kv_b_proj.weight = torch.nn.Parameter(kv_b_proj_weight.T, requires_grad=False)
# 3. Create metadata and KV caches for each block size
# Group backends by block size and test each group
metadata_per_block_size = {}
kv_cache_per_block_size = {}
for block_size in unique_block_sizes:
# Create metadata for this block size
common_attn_metadata = create_common_attn_metadata(
batch_spec, block_size, device
)
# Pad block table to meet requirement:
# block_num % (128 / block_size) == 0
required_divisor = int(128 / block_size)
current_block_num = common_attn_metadata.block_table_tensor.shape[1]
if current_block_num % required_divisor != 0:
# Pad to next multiple of required_divisor
padded_block_num = (
(current_block_num + required_divisor - 1) // required_divisor
) * required_divisor
padding_cols = padded_block_num - current_block_num
padding = torch.zeros(
(common_attn_metadata.block_table_tensor.shape[0], padding_cols),
dtype=torch.int32,
device=device,
)
common_attn_metadata.block_table_tensor = torch.cat(
[common_attn_metadata.block_table_tensor, padding], dim=1
)
metadata_per_block_size[block_size] = common_attn_metadata
# Create KV cache for this block size
required_blocks_for_size = sum(
(seq_len + block_size - 1) // block_size for seq_len in batch_spec.seq_lens
)
num_blocks_for_size = required_blocks_for_size + 1 + 100
kv_cache = create_and_prepopulate_kv_cache(
kv_c_contexts=kv_c_contexts,
k_pe_contexts=k_pe_contexts,
block_size=block_size,
head_size=head_size,
dtype=dtype,
device=device,
num_blocks=num_blocks_for_size,
common_attn_metadata=common_attn_metadata,
randomize_blocks=True,
)
kv_cache_per_block_size[block_size] = kv_cache
# 4. Run vLLM backends and compare
failures = []
for backend_idx, backend_name in enumerate(BACKENDS_TO_TEST):
# Skip backends that don't support spec decode for spec decode tests
if is_spec_decode_test and backend_name not in SPEC_DECODE_BACKENDS:
continue
# Get the appropriate block_size, metadata, and cache for this backend
block_size = BACKEND_BLOCK_SIZES[backend_name]
common_attn_metadata = metadata_per_block_size[block_size]
kv_cache = kv_cache_per_block_size[block_size]
# Create kv_cache_spec with the correct block_size for this backend
backend_kv_cache_spec = FullAttentionSpec(
block_size=block_size,
num_kv_heads=vllm_config.model_config.get_num_kv_heads(
vllm_config.parallel_config
),
head_size=vllm_config.model_config.get_head_size(),
dtype=vllm_config.model_config.dtype,
sliding_window=vllm_config.model_config.get_sliding_window(),
)
backend_output = run_attention_backend(
backend_name,
backend_kv_cache_spec,
["placeholder"],
vllm_config,
device,
common_attn_metadata,
query_vllm,
kv_c_vllm,
k_pe_vllm,
kv_cache,
kv_lora_rank,
qk_nope_head_dim,
qk_rope_head_dim,
v_head_dim,
mock_kv_b_proj,
)
# Use backend_idx to get the correct SDPA output for this backend
expected_output = sdpa_outputs[backend_name]
# Check shape and dtype consistency
try:
assert backend_output.shape == expected_output.shape, (
f"[{backend_name}] shape {backend_output.shape} != "
f"SDPA shape {expected_output.shape}"
)
assert backend_output.dtype == expected_output.dtype, (
f"[{backend_name}] dtype {backend_output.dtype} != "
f"SDPA dtype {expected_output.dtype}"
)
assert torch.isfinite(backend_output).all(), (
f"[{backend_name}] produced non-finite values"
)
# Check numerical similarity
rtol = 1e-2
atol = 5e-1
max_diff = torch.max(torch.abs(backend_output - expected_output)).item()
max_rel_diff = torch.max(
torch.abs(backend_output - expected_output) / torch.abs(expected_output)
).item()
all_close = torch.allclose(
backend_output, expected_output, rtol=rtol, atol=atol
)
assert all_close, (
f"[{backend_name}] output differs from SDPA baseline. "
f"Max diff: {max_diff:.6f}, max rel diff: {max_rel_diff:.6f})"
)
except AssertionError as e:
failures.append(str(e))
# Report all failures at once
if failures:
# Create a summary for the single-line failure message
backend_names = []
for f in failures:
if "[AttentionBackendEnum." in f:
backend_name = f.split("[")[1].split("]")[0]
backend_names.append(backend_name)
summary = f"{len(failures)} backend(s) failed: {', '.join(backend_names)}"
detailed_msg = "\n".join(failures)
pytest.fail(f"{summary}\n{detailed_msg}")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_rocm_attention_backends_selection.py | tests/v1/attention/test_rocm_attention_backends_selection.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for attention backend selectors."""
from unittest.mock import MagicMock, patch
import pytest
import torch
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.attention.selector import AttentionSelectorConfig
from vllm.platforms import current_platform
# ROCm-specific attention backend selection tests
pytestmark = pytest.mark.skipif(
not current_platform.is_rocm(), reason="ROCm-specific tests"
)
@pytest.fixture
def mock_vllm_config():
"""Create a mock VllmConfig for testing."""
config = MagicMock()
config.model_config.dtype = torch.float16
config.model_config.hf_config.architectures = ["LlamaForCausalLM"]
config.cache_config.block_size = 16
return config
@pytest.fixture
def mock_on_gfx9():
"""Mock the on_gfx9 function to return True."""
with patch("vllm.platforms.rocm.on_gfx9", return_value=True):
yield
@pytest.mark.parametrize(
"env_vars, selected_backend, expected_backend_path",
[
# Test Case: Explicit FLEX_ATTENTION backend
(
{},
"FLEX_ATTENTION",
AttentionBackendEnum.FLEX_ATTENTION.get_path(),
),
# Test Case 1: Default (no env vars, no explicit backend)
(
{},
None,
AttentionBackendEnum.TRITON_ATTN.get_path(),
),
# Test Case 2: Explicit TRITON_ATTN backend
(
{},
"TRITON_ATTN",
AttentionBackendEnum.TRITON_ATTN.get_path(),
),
# Test Case 3: Explicit ROCM_ATTN backend
(
{},
"ROCM_ATTN",
AttentionBackendEnum.ROCM_ATTN.get_path(),
),
# Test Case 4: Explicit ROCM_AITER_FA backend
(
{},
"ROCM_AITER_FA",
AttentionBackendEnum.ROCM_AITER_FA.get_path(),
),
# Test Case 5: Explicit ROCM_AITER_UNIFIED_ATTN backend
(
{},
"ROCM_AITER_UNIFIED_ATTN",
AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path(),
),
# Test Case 6: VLLM_ROCM_USE_AITER=1
# (defaults to AITER FA when MHA not explicitly disabled)
(
{"VLLM_ROCM_USE_AITER": "1"},
None,
AttentionBackendEnum.ROCM_AITER_FA.get_path(),
),
# Test Case 7: VLLM_ROCM_USE_AITER=1 + VLLM_ROCM_USE_AITER_MHA=1
(
{"VLLM_ROCM_USE_AITER": "1", "VLLM_ROCM_USE_AITER_MHA": "1"},
None,
AttentionBackendEnum.ROCM_AITER_FA.get_path(),
),
# Test Case 8: VLLM_ROCM_USE_AITER=1 + VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION=1
(
{
"VLLM_ROCM_USE_AITER": "1",
"VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION": "1",
},
None,
AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path(),
),
# Test Case 9: VLLM_ROCM_USE_AITER=1 + explicit TRITON_ATTN
(
{"VLLM_ROCM_USE_AITER": "1"},
"TRITON_ATTN",
AttentionBackendEnum.TRITON_ATTN.get_path(),
),
# Test Case 10: VLLM_ROCM_USE_AITER=1 + VLLM_ROCM_USE_AITER_MHA=0
# (explicitly disabled)
(
{"VLLM_ROCM_USE_AITER": "1", "VLLM_ROCM_USE_AITER_MHA": "0"},
None,
AttentionBackendEnum.TRITON_ATTN.get_path(),
),
# Test Case 11: VLLM_ROCM_USE_AITER=1 + explicit ROCM_ATTN
(
{"VLLM_ROCM_USE_AITER": "1"},
"ROCM_ATTN",
AttentionBackendEnum.ROCM_ATTN.get_path(),
),
],
)
def test_standard_attention_backend_selection(
env_vars,
selected_backend,
expected_backend_path,
mock_vllm_config,
mock_on_gfx9,
monkeypatch,
):
"""Test standard attention backend selection with various configurations."""
# Set environment variables
for key, value in env_vars.items():
monkeypatch.setenv(key, value)
# Import after setting env vars to ensure they're picked up
# Reload envs to pick up new environment variables
import importlib
import vllm.envs as envs
importlib.reload(envs)
# Convert string backend to enum if provided
backend_enum = None
if selected_backend:
backend_enum = getattr(AttentionBackendEnum, selected_backend)
# Get the backend class path
from vllm.platforms.rocm import RocmPlatform
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=16,
use_mla=False,
has_sink=False,
use_sparse=False,
)
backend_path = RocmPlatform.get_attn_backend_cls(
selected_backend=backend_enum, attn_selector_config=attn_selector_config
)
assert backend_path == expected_backend_path
@pytest.mark.parametrize(
"env_vars, selected_backend, block_size, expected_backend_path, should_raise",
[
# Test Case 1: TRITON_MLA with block_size != 1
(
{},
"TRITON_MLA",
16,
AttentionBackendEnum.TRITON_MLA.get_path(),
False,
),
# Test Case 2: TRITON_MLA with block_size == 1 (should raise)
(
{},
"TRITON_MLA",
1,
None,
True,
),
# Test Case 3: ROCM_AITER_MLA with block_size == 1
(
{},
"ROCM_AITER_MLA",
1,
AttentionBackendEnum.ROCM_AITER_MLA.get_path(),
False,
),
# Test Case 4: ROCM_AITER_MLA with block_size != 1 (should raise)
(
{},
"ROCM_AITER_MLA",
16,
AttentionBackendEnum.ROCM_AITER_MLA.get_path(),
False,
),
# Test Case 5: VLLM_ROCM_USE_AITER=1 with block_size == 1
(
{"VLLM_ROCM_USE_AITER": "1"},
None,
1,
AttentionBackendEnum.ROCM_AITER_MLA.get_path(),
False,
),
# Test Case 6: VLLM_ROCM_USE_AITER=1 with block_size == 16
# (should use ROCM_AITER_MLA now, as it supports block_size 16)
(
{"VLLM_ROCM_USE_AITER": "1"},
None,
16,
AttentionBackendEnum.ROCM_AITER_MLA.get_path(),
False,
),
# Test Case 7: VLLM_ROCM_USE_AITER=1 + explicit TRITON_MLA
(
{"VLLM_ROCM_USE_AITER": "1"},
"TRITON_MLA",
16,
AttentionBackendEnum.TRITON_MLA.get_path(),
False,
),
# Test Case 8: Explicit ROCM_AITER_TRITON_MLA
(
{},
"ROCM_AITER_TRITON_MLA",
16,
AttentionBackendEnum.ROCM_AITER_TRITON_MLA.get_path(),
False,
),
],
)
def test_mla_backend_selection(
env_vars,
selected_backend,
block_size,
expected_backend_path,
should_raise,
mock_vllm_config,
monkeypatch,
):
"""Test MLA backend selection with various configurations."""
# Set environment variables
for key, value in env_vars.items():
monkeypatch.setenv(key, value)
# Import after setting env vars
# Reload envs
import importlib
import vllm.envs as envs
importlib.reload(envs)
# Mock is_aiter_mla_enabled based on env vars and block_size
aiter_enabled = env_vars.get("VLLM_ROCM_USE_AITER") == "1"
mock_rocm_ops = MagicMock()
mock_rocm_ops.is_mla_enabled.return_value = aiter_enabled
mock_aiter_module = MagicMock()
mock_aiter_module.rocm_aiter_ops = mock_rocm_ops
with patch.dict("sys.modules", {"vllm._aiter_ops": mock_aiter_module}):
# Convert string backend to enum if provided
backend_enum = None
if selected_backend:
backend_enum = getattr(AttentionBackendEnum, selected_backend)
from vllm.platforms.rocm import RocmPlatform
if should_raise:
with pytest.raises(ValueError):
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=block_size,
use_mla=True,
has_sink=False,
use_sparse=False,
)
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=block_size,
use_mla=True,
has_sink=False,
use_sparse=False,
)
backend_path = RocmPlatform.get_attn_backend_cls(
selected_backend=backend_enum,
attn_selector_config=attn_selector_config,
)
else:
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=block_size,
use_mla=True,
has_sink=False,
use_sparse=False,
)
backend_path = RocmPlatform.get_attn_backend_cls(
selected_backend=backend_enum, attn_selector_config=attn_selector_config
)
assert backend_path == expected_backend_path
def test_aiter_fa_requires_gfx9(mock_vllm_config):
"""Test that ROCM_AITER_FA requires gfx9 architecture."""
from vllm.platforms.rocm import RocmPlatform
# Mock on_gfx9 to return False
with (
patch("vllm.platforms.rocm.on_gfx9", return_value=False),
pytest.raises(
ValueError,
match="only supported on gfx9",
),
):
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=16,
use_mla=False,
has_sink=False,
use_sparse=False,
)
RocmPlatform.get_attn_backend_cls(
selected_backend=AttentionBackendEnum.ROCM_AITER_FA,
attn_selector_config=attn_selector_config,
)
def test_sparse_not_supported(mock_vllm_config):
"""Test that sparse attention is not supported on ROCm."""
from vllm.platforms.rocm import RocmPlatform
with pytest.raises(
AssertionError, match="Sparse MLA backend on ROCm only supports block size 1"
):
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.float16,
kv_cache_dtype="auto",
block_size=16,
use_mla=False,
has_sink=False,
use_sparse=True,
)
RocmPlatform.get_attn_backend_cls(
selected_backend=None, attn_selector_config=attn_selector_config
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/attention/test_chunked_local_attention.py | tests/v1/attention/test_chunked_local_attention.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
import numpy as np
import pytest
import torch
from tests.v1.attention.utils import BatchSpec, create_common_attn_metadata
from vllm.v1.attention.backends.utils import make_local_attention_virtual_batches
@dataclass
class LocalAttentionTestData:
# Input parameters
batch_spec: BatchSpec
attn_chunk_size: int
block_size: int
# Expected return values
expected_q_seqlens: list[int]
expected_k_seqlens: list[int]
expected_local_block_table: list[list[int]]
test_data_list = [
# Same as example in docstring of make_local_attention_virtual_batches
# except block table has 9 columns instead of 10
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[4, 10, 5],
seq_lens=[6, 17, 9],
),
attn_chunk_size=4,
block_size=2,
expected_q_seqlens=[2, 2, 1, 4, 4, 1, 4, 1],
expected_k_seqlens=[4, 2, 4, 4, 4, 1, 4, 1],
# 2 pages per local branch
# (chunk size 4 // block size 2)
expected_local_block_table=[
[0, 1], # local-batch 0, (batch 0, starting from k[0])
[2, 3], # local-batch 1, (batch 0, starting from k[4])
[11, 12], # local-batch 2, (batch 1, starting from k[4])
[13, 14], # local-batch 3, (batch 1, starting from k[8])
[15, 16], # local-batch 4, (batch 1, starting from k[12])
[17, 17], # local-batch 5, (batch 1, starting from k[16])
[20, 21], # local-batch 6, (batch 2, starting from k[4])
[22, 23], # local-batch 7, (batch 2, starting from k[8])
],
),
# Case where block indices are not clipped to block table ncols-1
# because tokens_in_last_block == attn_chunk_size
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[8],
seq_lens=[12],
),
attn_chunk_size=4,
block_size=2,
expected_q_seqlens=[4, 4],
expected_k_seqlens=[4, 4],
expected_local_block_table=[
[2, 3],
[4, 5],
],
),
# Case where all kv_seq positions are involved in attn
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[7],
# 10 - 7 = 3 previously computed tokens
seq_lens=[10],
),
attn_chunk_size=4,
block_size=2,
expected_q_seqlens=[1, 4, 2],
expected_k_seqlens=[4, 4, 2],
expected_local_block_table=[
[0, 1],
[2, 3],
[4, 4],
],
),
# Case where attn_chunk_size > kv_seq_len
# so no extra mini virtual batches are created
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[4],
seq_lens=[6],
),
# Larger than kv_seq_len
attn_chunk_size=10,
block_size=2,
# No change to q_seqlens and k_seqlens
expected_q_seqlens=[4],
expected_k_seqlens=[6],
# In this case, we only need a block-table like:
# block_table = [ [0, 1, 2] ] # 1 batch, 3 pages
# But we need to pad it to 5 pages per local batch
# because currently the pages_per_local_batch
# is calculated as (attn_chunk_size // block_size)
expected_local_block_table=[
[0, 1, 2, 2, 2],
],
),
# Block size equal to chunk size
# Expect single page per batch in local batch table
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[6, 6],
seq_lens=[8, 8],
),
attn_chunk_size=4,
block_size=4,
expected_q_seqlens=[2, 4, 2, 4],
expected_k_seqlens=[4, 4, 4, 4],
# Initial block table = [
# [0, 1], < batch 0
# [2, 3], < batch 1
# ]
expected_local_block_table=[
[0], # local-batch 0, (batch 0, starting from k[0])
[1], # local-batch 1, (batch 0, starting from k[4])
[2], # local-batch 1, (batch 0, starting from k[0])
[3], # local-batch 1, (batch 0, starting from k[4])
],
),
# Case where query falls in the second attention chunk
# k_toks > 0 1 2 3 4
# q_toks v _____________
# 0 | 1
# 1 | 1 1
# 2 | 1 1 1
# 3 | 1 1 1 1
# 4 | 1
# where tokens 0,1,2,3 have been pre-computed
LocalAttentionTestData(
batch_spec=BatchSpec(
query_lens=[1],
seq_lens=[5],
),
attn_chunk_size=4,
block_size=2,
expected_q_seqlens=[1],
expected_k_seqlens=[1],
expected_local_block_table=[
[2, 2],
],
),
]
@pytest.mark.parametrize("test_data", test_data_list)
def test_local_attention_virtual_batches(test_data: LocalAttentionTestData):
device = torch.device("cuda:0")
batch_spec = test_data.batch_spec
attn_chunk_size = test_data.attn_chunk_size
block_size = test_data.block_size
expected_q_seqlens = test_data.expected_q_seqlens
expected_k_seqlens = test_data.expected_k_seqlens
expected_local_block_table = test_data.expected_local_block_table
# Create common attention metadata
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size,
device,
# Use torch.arange instead of torch.randint so we can assert on
# block table tensor values. The block table will have shape
# (num_batches, cdiv(max_seq_len, block_size)) and the values will be
# arranged from 0 to cdiv(max_seq_len, block_size)-1
arange_block_indices=True,
)
# Call the function
result, _ = make_local_attention_virtual_batches(
attn_chunk_size, common_attn_metadata, block_size
)
# Convert to numpy for easier comparison
actual_q_seqlens = np.diff(result.query_start_loc_cpu.numpy())
actual_k_seqlens = result.seq_lens_cpu.numpy()
# Check that all query lengths are less than or equal to attn_chunk_size
assert all(q_len <= attn_chunk_size for q_len in actual_q_seqlens)
# Check that all key lengths are less than or equal to attn_chunk_size
assert all(k_len <= attn_chunk_size for k_len in actual_k_seqlens)
# Check that the total number of query tokens is preserved
assert sum(actual_q_seqlens) == sum(batch_spec.query_lens)
# Verify results
np.testing.assert_array_equal(actual_q_seqlens, expected_q_seqlens)
np.testing.assert_array_equal(actual_k_seqlens, expected_k_seqlens)
expected_block_table_tensor = torch.tensor(
expected_local_block_table, dtype=torch.int32, device=device
)
print(f"Expected block table:\n{expected_block_table_tensor}")
print(f"Actual block table:\n{result.block_table_tensor}")
torch.testing.assert_close(result.block_table_tensor, expected_block_table_tensor)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/__init__.py | tests/v1/kv_connector/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_decode_bench_connector.py | tests/v1/kv_connector/unit/test_decode_bench_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for DecodeBenchConnector.
Tests the functionality of the DecodeBenchConnector which fills KV cache
with dummy values for decode performance benchmarking.
"""
import pytest
import torch
from vllm import SamplingParams
from vllm.config import KVTransferConfig
from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorRole
# ruff: noqa: E501
from vllm.distributed.kv_transfer.kv_connector.v1.decode_bench_connector import (
DecodeBenchConnector,
DecodeBenchConnectorMetadata,
)
from vllm.forward_context import ForwardContext
from vllm.utils.hashing import sha256
from vllm.v1.core.kv_cache_utils import get_request_block_hasher, init_none_hash
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.request import Request
from .utils import (
EOS_TOKEN_ID,
create_model_runner_output,
create_scheduler,
create_vllm_config,
)
class DecodeBenchTestRunner:
"""Test runner for DecodeBenchConnector."""
def __init__(self, block_size: int, num_gpu_blocks: int):
self.block_size = block_size
self.num_gpu_blocks = num_gpu_blocks
self.req_id = -1
# Create vllm config with DecodeBenchConnector
vllm_config = create_vllm_config(
block_size=block_size, max_num_batched_tokens=1000
)
vllm_config.kv_transfer_config = KVTransferConfig(
kv_connector="DecodeBenchConnector",
kv_role="kv_both",
)
self.vllm_config = vllm_config
self.scheduler: Scheduler = create_scheduler(
vllm_config, num_blocks=num_gpu_blocks
)
# Create worker-side connector
self.worker_connector = DecodeBenchConnector(
vllm_config, KVConnectorRole.WORKER
)
# Create dummy KV caches for testing
# Shape: [num_blocks, 2, num_heads, block_size, head_dim]
# Using simplified shape for testing
num_heads = 4
head_dim = 64
self.kv_caches = {
f"layer_{i}": torch.zeros(
num_gpu_blocks, 2, num_heads, block_size, head_dim
)
for i in range(2) # 2 layers for testing
}
# Register KV caches with worker connector
self.worker_connector.register_kv_caches(self.kv_caches)
# Extract scheduler-side connector
scheduler_connector = self.scheduler.connector
assert scheduler_connector is not None
assert isinstance(scheduler_connector, DecodeBenchConnector)
self.scheduler_connector: DecodeBenchConnector = scheduler_connector
init_none_hash(sha256)
self._block_hasher = get_request_block_hasher(block_size, sha256)
self._dummy_ctx: ForwardContext = ForwardContext(
no_compile_layers={}, attn_metadata={}, virtual_engine=0
)
def new_request(self, token_ids: list[int]) -> Request:
"""Create a new request with given token IDs."""
self.req_id += 1
req = Request(
request_id=str(self.req_id),
prompt_token_ids=token_ids,
sampling_params=SamplingParams(max_tokens=100),
pooling_params=None,
eos_token_id=EOS_TOKEN_ID,
block_hasher=self._block_hasher,
)
self.scheduler.add_request(req)
return req
def run_single_step(self, token_id: int = 0):
"""Run a single scheduler + worker step."""
scheduler_output = self.scheduler.schedule()
# Get connector metadata
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, DecodeBenchConnectorMetadata)
# Bind metadata and load KV
self.worker_connector.bind_connector_metadata(kv_connector_metadata)
self.worker_connector.start_load_kv(self._dummy_ctx)
if scheduler_output.total_num_scheduled_tokens > 0:
self.worker_connector.wait_for_save()
self.worker_connector.clear_connector_metadata()
# Create model runner output
model_runner_output = create_model_runner_output(
reqs=self.scheduler.running,
token_id=token_id,
)
self.scheduler.update_from_output(scheduler_output, model_runner_output)
return scheduler_output, kv_connector_metadata
def test_decode_bench_connector_basic():
"""Test basic functionality of DecodeBenchConnector."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request with multiple blocks worth of tokens
num_tokens = block_size * 3 # 3 blocks
token_ids = [1] * num_tokens
req = runner.new_request(token_ids)
# Run first step - should fill KV cache with dummy values
scheduler_output, metadata = runner.run_single_step()
# Check that get_num_new_matched_tokens returned correct value
# Should be num_tokens - 1 (all except the last token for decode)
expected_fill_tokens = num_tokens - 1
# Check metadata has the request to fill
assert len(metadata.reqs_to_fill) == 1
assert req.request_id in metadata.reqs_to_fill
block_ids_per_group, num_tokens_to_fill = metadata.reqs_to_fill[req.request_id]
assert num_tokens_to_fill == expected_fill_tokens
# For standard attention, there's only one group
assert len(block_ids_per_group) == 1
block_ids = block_ids_per_group[0]
# Calculate expected number of blocks
expected_num_blocks = (expected_fill_tokens + block_size - 1) // block_size
assert len(block_ids) == expected_num_blocks
# Verify KV caches were filled with constant value
for layer_name, kv_cache in runner.kv_caches.items():
for block_id in block_ids:
# Check that the block was filled
block_data = kv_cache[block_id]
# Should be filled with constant value 0.015
assert torch.allclose(block_data, torch.tensor(0.015))
def test_decode_bench_connector_no_refill():
"""Test that DecodeBenchConnector only fills once per request."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request
num_tokens = block_size * 2
token_ids = [1] * num_tokens
runner.new_request(token_ids)
# Run first step - should fill KV cache
_, metadata1 = runner.run_single_step()
assert len(metadata1.reqs_to_fill) == 1
# Run second step - should NOT fill again (already filled)
_, metadata2 = runner.run_single_step()
assert len(metadata2.reqs_to_fill) == 0
def test_decode_bench_connector_single_token():
"""Test DecodeBenchConnector with single token request."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request with just 1 token
# Should not fill anything (need at least 2 tokens: 1 to fill, 1 to decode)
token_ids = [1]
runner.new_request(token_ids)
# Run step - should NOT fill KV cache
_, metadata = runner.run_single_step()
assert len(metadata.reqs_to_fill) == 0
def test_decode_bench_connector_two_tokens():
"""Test DecodeBenchConnector with two token request."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request with 2 tokens
# Should fill 1 token (first token), decode the second
token_ids = [1, 2]
req = runner.new_request(token_ids)
# Run step
_, metadata = runner.run_single_step()
assert len(metadata.reqs_to_fill) == 1
assert req.request_id in metadata.reqs_to_fill
block_ids_per_group, num_tokens_to_fill = metadata.reqs_to_fill[req.request_id]
assert num_tokens_to_fill == 1
# For standard attention, there's only one group
assert len(block_ids_per_group) == 1
assert len(block_ids_per_group[0]) == 1 # 1 token needs 1 block
def test_decode_bench_connector_large_context():
"""Test DecodeBenchConnector with large context size."""
block_size = 16
num_gpu_blocks = 1000
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request with many blocks
num_blocks = 20
num_tokens = block_size * num_blocks
token_ids = list(range(num_tokens))
req = runner.new_request(token_ids)
# Run step
_, metadata = runner.run_single_step()
assert len(metadata.reqs_to_fill) == 1
assert req.request_id in metadata.reqs_to_fill
block_ids_per_group, num_tokens_to_fill = metadata.reqs_to_fill[req.request_id]
# Should fill all tokens except the last one
expected_fill_tokens = num_tokens - 1
assert num_tokens_to_fill == expected_fill_tokens
# For standard attention, there's only one group
assert len(block_ids_per_group) == 1
block_ids = block_ids_per_group[0]
# Calculate expected number of blocks
expected_num_blocks = (expected_fill_tokens + block_size - 1) // block_size
assert len(block_ids) == expected_num_blocks
# Verify blocks were filled
for layer_name, kv_cache in runner.kv_caches.items():
for block_id in block_ids:
block_data = kv_cache[block_id]
assert torch.allclose(block_data, torch.tensor(0.015))
def test_decode_bench_connector_multiple_requests():
"""Test DecodeBenchConnector with multiple sequential requests."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# First request
req1 = runner.new_request([1] * (block_size * 2))
_, metadata1 = runner.run_single_step()
assert len(metadata1.reqs_to_fill) == 1
assert req1.request_id in metadata1.reqs_to_fill
# Complete first request
while runner.scheduler.running:
runner.run_single_step()
# Add EOS to finish
scheduler_output = runner.scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=runner.scheduler.running,
token_id=EOS_TOKEN_ID,
use_eos=True,
)
runner.scheduler.update_from_output(scheduler_output, model_runner_output)
# Second request - should also get filled
req2 = runner.new_request([2] * (block_size * 3))
_, metadata2 = runner.run_single_step()
assert len(metadata2.reqs_to_fill) == 1
assert req2.request_id in metadata2.reqs_to_fill
# Different request should have different metadata
_, num_tokens1 = metadata1.reqs_to_fill[req1.request_id]
_, num_tokens2 = metadata2.reqs_to_fill[req2.request_id]
assert num_tokens1 == block_size * 2 - 1
assert num_tokens2 == block_size * 3 - 1
def test_decode_bench_connector_partial_block():
"""Test DecodeBenchConnector with partial block filling."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request that doesn't align to block boundaries
# e.g., 2.5 blocks worth of tokens
num_tokens = block_size * 2 + block_size // 2
token_ids = [1] * num_tokens
req = runner.new_request(token_ids)
# Run step
_, metadata = runner.run_single_step()
assert len(metadata.reqs_to_fill) == 1
assert req.request_id in metadata.reqs_to_fill
block_ids_per_group, num_tokens_to_fill = metadata.reqs_to_fill[req.request_id]
# Should fill all tokens except the last one
expected_fill_tokens = num_tokens - 1
assert num_tokens_to_fill == expected_fill_tokens
# For standard attention, there's only one group
assert len(block_ids_per_group) == 1
block_ids = block_ids_per_group[0]
# Should allocate 3 blocks to hold the partial data
expected_num_blocks = 3
assert len(block_ids) == expected_num_blocks
def test_decode_bench_connector_concurrent_requests():
"""Test DecodeBenchConnector with multiple concurrent requests in the same batch."""
block_size = 16
num_gpu_blocks = 1000
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create multiple requests that will be batched together
req1 = runner.new_request([1] * (block_size * 2))
req2 = runner.new_request([2] * (block_size * 3))
req3 = runner.new_request([3] * (block_size * 1))
# Run first step - all requests should be filled concurrently
_, metadata = runner.run_single_step()
# All three requests should be in the metadata
assert len(metadata.reqs_to_fill) == 3
assert req1.request_id in metadata.reqs_to_fill
assert req2.request_id in metadata.reqs_to_fill
assert req3.request_id in metadata.reqs_to_fill
# Verify each request has correct fill info
block_ids_per_group1, num_tokens1 = metadata.reqs_to_fill[req1.request_id]
block_ids_per_group2, num_tokens2 = metadata.reqs_to_fill[req2.request_id]
block_ids_per_group3, num_tokens3 = metadata.reqs_to_fill[req3.request_id]
# Verify token counts (all tokens except last one)
assert num_tokens1 == block_size * 2 - 1
assert num_tokens2 == block_size * 3 - 1
assert num_tokens3 == block_size * 1 - 1
# Verify block counts for each request
assert len(block_ids_per_group1[0]) == 2 # 2 blocks
assert len(block_ids_per_group2[0]) == 3 # 3 blocks
assert len(block_ids_per_group3[0]) == 1 # 1 block
# Verify all blocks are filled in KV cache
for req_id, (block_ids_per_group, _) in metadata.reqs_to_fill.items():
block_ids = block_ids_per_group[0]
for layer_name, kv_cache in runner.kv_caches.items():
for block_id in block_ids:
block_data = kv_cache[block_id]
assert torch.allclose(block_data, torch.tensor(0.015))
# Run second step - should NOT fill again (already filled)
_, metadata2 = runner.run_single_step()
assert len(metadata2.reqs_to_fill) == 0
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_invalid_blocks_correctness.py | tests/v1/kv_connector/unit/test_invalid_blocks_correctness.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for correctness in invalid block handling.
These tests verify correct behavior in three scenarios:
1. Sync recompute case: Blocks should not be freed for running requests
that need to recompute invalid blocks
2. Sync fail case: Invalid blocks must be evicted from cache when request fails
3. Async recompute case: Invalid blocks should not be cached after transfer
"""
from collections.abc import Callable
from unittest.mock import Mock
import pytest
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.request import FinishReason, Request, RequestStatus
from .utils import (
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def _make_get_num_new_matched_tokens(
req_num_new_matched_tokens: dict[str, int],
async_load: bool,
) -> Callable[[Request, int], tuple[int, bool]]:
def get_num_new_matched_tokens(request: Request, _: int) -> tuple[int, bool]:
value = req_num_new_matched_tokens.get(request.request_id, 0)
return value, async_load
return get_num_new_matched_tokens
@pytest.fixture
def fail_scheduler():
"""scheduler with kv_load_failure_policy='fail'"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_load_failure_policy = "fail"
return create_scheduler(vllm_config)
@pytest.fixture
def recompute_scheduler():
"""scheduler with kv_load_failure_policy='recompute'"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_load_failure_policy = "recompute"
return create_scheduler(vllm_config)
def test_sync_recompute_blocks_not_freed_for_running_requests(
recompute_scheduler: Scheduler,
):
"""
Test sync recompute case - blocks must not be freed for running requests.
When a running request has invalid blocks and retry_policy is 'recompute':
1. Request should remain in RUNNING state
2. num_computed_tokens should be truncated to invalid block boundary
3. Blocks should NOT be freed (request still needs them for recomputation)
4. Request should remain in scheduler.requests and scheduler.running
"""
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * recompute_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * recompute_scheduler.block_size
)
request = create_request(num_tokens=num_prompt_tokens)
recompute_scheduler.add_request(request=request)
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
# mock connector indicating sync load
recompute_scheduler.connector = Mock()
recompute_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, False)
)
recompute_scheduler.connector.request_finished.return_value = (False, None)
recompute_scheduler.connector.take_events.return_value = ()
scheduler_output = recompute_scheduler.schedule()
# request should be running with sync KV load
assert len(recompute_scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
assert request.status == RequestStatus.RUNNING
# get the allocated block IDs before invalid blocks are reported
req_block_ids = scheduler_output.scheduled_new_reqs[0].block_ids[0]
invalid_block_ids = {req_block_ids[invalid_block_idx]}
# store original num_computed_tokens for comparison
original_num_computed_tokens = request.num_computed_tokens
model_runner_output = create_model_runner_output(
[request],
invalid_block_ids=invalid_block_ids,
use_eos=False, # not finished - should continue running
)
outputs = recompute_scheduler.update_from_output(
scheduler_output, model_runner_output
)
# critical assertions for recompute case:
# 1. request should still be RUNNING (not finished, not aborted)
assert request.status == RequestStatus.RUNNING, (
f"Request should remain RUNNING for recompute, got {request.status}"
)
# 2. num_computed_tokens should be truncated to first invalid block
expected_truncated_tokens = invalid_block_idx * recompute_scheduler.block_size
assert request.num_computed_tokens == expected_truncated_tokens, (
f"num_computed_tokens should be truncated to {expected_truncated_tokens}, "
f"got {request.num_computed_tokens}"
)
assert request.num_computed_tokens < original_num_computed_tokens, (
"num_computed_tokens should be reduced after invalid block detection"
)
# 3. no output should be generated (request is still running)
# the request should be skipped in the output loop
assert len(outputs) == 0 or request.request_id not in [
out.request_id for outs in outputs.values() for out in outs.outputs
], "No output should be generated for recompute requests"
# 4. request should still be in running queue
assert request in recompute_scheduler.running, (
"Request should remain in running queue for recomputation"
)
# 5. request should still be in scheduler.requests (not deleted)
assert request.request_id in recompute_scheduler.requests, (
"Request should not be deleted from scheduler.requests"
)
# 6. blocks should NOT be freed - verify blocks are still allocated
try:
allocated_blocks = recompute_scheduler.kv_cache_manager.get_block_ids(
request.request_id
)
assert allocated_blocks is not None
assert len(allocated_blocks[0]) > 0, (
"Blocks should still be allocated for recomputation"
)
except KeyError:
pytest.fail(
"Blocks were freed incorrectly! Running requests need their blocks "
"to recompute invalid portions."
)
# 7. verify request can be rescheduled in next step
scheduler_output_2 = recompute_scheduler.schedule()
# request should appear in the new schedule to recompute invalid blocks
scheduled_req_ids = [
req.request_id for req in scheduler_output_2.scheduled_new_reqs
]
if scheduler_output_2.num_scheduled_tokens:
scheduled_req_ids.extend(scheduler_output_2.num_scheduled_tokens.keys())
assert (
request.request_id in scheduled_req_ids or len(recompute_scheduler.running) > 0
), "Request should be reschedulable for recomputation"
def test_sync_fail_invalid_blocks_evicted(fail_scheduler: Scheduler):
"""
Test sync fail case - invalid blocks must be evicted from cache.
When a request fails with policy='fail' and has invalid blocks from sync loading:
1. Request should be finished with FINISHED_ERROR
2. Invalid blocks should be evicted from the KV cache
3. Valid blocks (if shared) should remain in cache
4. Future requests should not reuse the invalid blocks
This test verifies that invalid blocks are properly evicted to prevent
cache corruption and reuse of invalid data.
"""
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * fail_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * fail_scheduler.block_size
)
request = create_request(num_tokens=num_prompt_tokens)
fail_scheduler.add_request(request=request)
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
# mock connector indicating sync load
fail_scheduler.connector = Mock()
fail_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, False)
)
fail_scheduler.connector.request_finished.return_value = (False, None)
fail_scheduler.connector.take_events.return_value = ()
scheduler_output = fail_scheduler.schedule()
# request should be running with sync KV load
assert len(fail_scheduler.running) == 1
assert request.status == RequestStatus.RUNNING
# get allocated block IDs
req_block_ids = scheduler_output.scheduled_new_reqs[0].block_ids[0]
invalid_block_id = req_block_ids[invalid_block_idx]
invalid_block_ids = {invalid_block_id}
# verify the block is in the block pool before we report it as invalid
block = fail_scheduler.kv_cache_manager.block_pool.blocks[invalid_block_id]
assert block is not None
# report invalid blocks - request should fail
model_runner_output = create_model_runner_output(
[request],
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
outputs = fail_scheduler.update_from_output(scheduler_output, model_runner_output)
# verify request is finished with error
assert request.status == RequestStatus.FINISHED_ERROR
assert request.get_finished_reason() == FinishReason.ERROR
# verify output is generated
assert len(outputs) == 1
engine_outputs = next(iter(outputs.values()))
assert len(engine_outputs.outputs) == 1
output = engine_outputs.outputs[0]
assert output.request_id == request.request_id
assert output.finish_reason == FinishReason.ERROR
# verify the request was removed from scheduler
assert request.request_id not in fail_scheduler.requests
assert len(fail_scheduler.running) == 0
# critical: verify invalid block was actually freed from cache
# this is the key assertion - the invalid block should no longer be
# tracked by the KV cache manager for this request
# if it's still there, a future request could reuse the invalid data
try:
block_ids = fail_scheduler.kv_cache_manager.get_block_ids(request.request_id)
# if we get here, check if blocks were actually freed
if block_ids is not None and len(block_ids[0]) > 0:
pytest.fail(
f"Invalid blocks still tracked for finished request! "
f"Request {request.request_id} should have been freed but "
f"still has {len(block_ids[0])} blocks allocated."
)
# blocks list exists but is empty - this is fine, they were freed
except KeyError:
# expected - request completely removed from tracking
pass
# critical: verify invalid block was evicted from prefix cache
# the block should no longer have a hash (hash is reset on eviction)
assert block.block_hash is None, (
f"Invalid block {invalid_block_id} should have been evicted from cache "
f"(hash should be None), but hash is still {block.block_hash}"
)
def test_async_recompute_blocks_not_cached_when_invalid(
recompute_scheduler: Scheduler,
):
"""
Test async recompute case - invalid blocks not cached after transfer.
When async KV loading has invalid blocks and retry_policy is 'recompute':
1. Blocks are allocated but not cached yet
2. When async transfer completes, only valid blocks should be cached
3. Invalid blocks should never enter the prefix cache
This test verifies correctness, the failed_recving_kv_req_ids protection
ensures only valid blocks are cached when the transfer completes, and we
only evict blocks from cache that are already hashed in the block table.
"""
from unittest.mock import patch
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * recompute_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * recompute_scheduler.block_size
)
request = create_request(num_tokens=num_prompt_tokens)
recompute_scheduler.add_request(request=request)
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
# mock connector indicating async load
recompute_scheduler.connector = Mock()
recompute_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, True)
)
recompute_scheduler.connector.request_finished.return_value = (False, None)
recompute_scheduler.connector.take_events.return_value = ()
scheduler_output = recompute_scheduler.schedule()
# request should be waiting for remote KVs
assert len(recompute_scheduler.waiting) == 1
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert request.num_computed_tokens == 0
# get the allocated block IDs
(req_block_ids,) = recompute_scheduler.kv_cache_manager.get_block_ids(
request.request_id
)
invalid_block_id = req_block_ids[invalid_block_idx]
invalid_block_ids = {invalid_block_id}
# get the block object to verify it's not cached yet and stays uncached
block = recompute_scheduler.kv_cache_manager.block_pool.blocks[invalid_block_id]
# verify block has no hash before invalid blocks are reported
assert block.block_hash is None, (
"Async loading blocks should not be cached yet (no hash)"
)
# report invalid blocks (transfer not finished yet)
model_runner_output = create_model_runner_output(
reqs=[],
finished_recving=None, # transfer NOT finished
invalid_block_ids=invalid_block_ids,
use_eos=False,
)
# critical: spy on evict_blocks to verify it's NOT called for async blocks
original_evict_blocks = recompute_scheduler.kv_cache_manager.evict_blocks
evict_blocks_calls = []
def evict_blocks_spy(block_ids):
evict_blocks_calls.append(set(block_ids))
return original_evict_blocks(block_ids)
with patch.object(
recompute_scheduler.kv_cache_manager, "evict_blocks", evict_blocks_spy
):
recompute_scheduler.update_from_output(scheduler_output, model_runner_output)
# verify evict_blocks was NOT called (async blocks excluded from eviction)
assert len(evict_blocks_calls) == 0, (
f"evict_blocks should not be called for async-only invalid blocks, "
f"but was called {len(evict_blocks_calls)} time(s) with {evict_blocks_calls}"
)
# request should still be waiting (not finished with error due to recompute policy)
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert request.request_id in recompute_scheduler.failed_recving_kv_req_ids
# verify num_computed_tokens was truncated to before invalid block
expected_valid_tokens = invalid_block_idx * recompute_scheduler.block_size
assert request.num_computed_tokens == expected_valid_tokens
# verify invalid block still has no hash (was not evicted)
assert block.block_hash is None, (
f"Async loading blocks shouldn't be cached or evicted. "
f"Block {invalid_block_id} hash should be None but is {block.block_hash}"
)
# now simulate async transfer completing
model_runner_output_2 = create_model_runner_output(
reqs=[],
finished_recving={request.request_id},
invalid_block_ids=None,
use_eos=False,
)
recompute_scheduler.update_from_output(scheduler_output, model_runner_output_2)
# verify request is now marked as finished receiving and ready to be processed
assert request.request_id in recompute_scheduler.finished_recving_kv_req_ids
assert request.request_id in recompute_scheduler.failed_recving_kv_req_ids
# critical: verify invalid block still has no hash before recompute
# the async transfer invalid data was never cached
assert block.block_hash is None, (
f"Invalid block {invalid_block_id} should not be cached before recompute "
f"(hash should be None), but hash is {block.block_hash}"
)
# critical end-to-end test: spy on cache_blocks to verify it's called with
# the truncated num_computed_tokens value
original_cache_blocks = recompute_scheduler.kv_cache_manager.cache_blocks
cache_blocks_calls = []
def cache_blocks_spy(req, num_tokens):
cache_blocks_calls.append((req.request_id, num_tokens))
return original_cache_blocks(req, num_tokens)
with patch.object(
recompute_scheduler.kv_cache_manager, "cache_blocks", cache_blocks_spy
):
# call schedule() again - this triggers _update_waiting_for_remote_kv()
# which should call cache_blocks with the truncated value
recompute_scheduler.schedule()
# verify cache_blocks was called with the truncated value
assert len(cache_blocks_calls) == 1, (
f"cache_blocks should be called exactly once, "
f"got {len(cache_blocks_calls)} calls"
)
cached_req_id, cached_num_tokens = cache_blocks_calls[0]
assert cached_req_id == request.request_id
assert cached_num_tokens == expected_valid_tokens, (
f"cache_blocks should be called with truncated value {expected_valid_tokens}, "
f"but was called with {cached_num_tokens}"
)
# request should now be RUNNING (scheduled immediately after transfer completes)
# the flow is: WAITING_FOR_REMOTE_KVS -> WAITING -> RUNNING in same schedule() call
assert request.status == RequestStatus.RUNNING
# num_computed_tokens should be >= expected_valid_tokens because the scheduler
# will schedule additional new tokens (up to max_num_batched_tokens) for the request
assert request.num_computed_tokens >= expected_valid_tokens, (
f"num_computed_tokens should be at least {expected_valid_tokens}, "
f"got {request.num_computed_tokens}"
)
# request should no longer be in the failed/finished receiving sets
assert request.request_id not in recompute_scheduler.failed_recving_kv_req_ids
assert request.request_id not in recompute_scheduler.finished_recving_kv_req_ids
# request should be in the running queue
assert request in recompute_scheduler.running
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_kv_load_failure_recovery.py | tests/v1/kv_connector/unit/test_kv_load_failure_recovery.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from unittest.mock import Mock
import pytest
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.request import Request, RequestStatus
from .utils import (
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
def _make_get_num_new_matched_tokens(
req_num_new_matched_tokens: dict[str, int],
async_load,
) -> Callable[[Request, int], tuple[int, bool]]:
def get_num_new_matched_tokens(request: Request, _: int) -> tuple[int, bool]:
value = req_num_new_matched_tokens.get(request.request_id, 0)
return value, async_load
return get_num_new_matched_tokens
@pytest.fixture
def scheduler():
vllm_config = create_vllm_config()
return create_scheduler(vllm_config)
@pytest.mark.parametrize(
"num_prompt_blocks,num_external_computed_blocks,invalid_block_idxs",
[
(100, 99, {0, 98}),
(100, 99, {50, 98}),
(100, 99, {98}),
],
)
def test_async_load_failure(
scheduler: Scheduler,
num_prompt_blocks: int,
num_external_computed_blocks: int,
invalid_block_idxs: set[int],
):
assert num_prompt_blocks >= num_external_computed_blocks
num_prompt_tokens = num_prompt_blocks * scheduler.block_size
num_external_computed_tokens = num_external_computed_blocks * scheduler.block_size
request1 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request1)
request2 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request2)
request3 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request3)
# Mock KV connector method.
# req_id -> num_external_computed_tokens
req_num_new_matched_tokens = {
request1.request_id: num_external_computed_tokens,
request2.request_id: num_external_computed_tokens,
request3.request_id: num_external_computed_tokens,
}
scheduler.connector = Mock()
scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, async_load=True)
)
scheduler.connector.take_events.return_value = ()
scheduler_output = scheduler.schedule()
assert len(scheduler.waiting) == 3
for request in scheduler.waiting:
assert request.num_computed_tokens == 0
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert scheduler.connector.get_num_new_matched_tokens.call_count == 3
# Simulate a failure in loading some of request2 blocks.
(req2_block_ids,) = scheduler.kv_cache_manager.get_block_ids(request2.request_id)
invalid_block_ids = {req2_block_ids[i] for i in invalid_block_idxs}
model_runner_output = create_model_runner_output(
reqs=[],
finished_recving={request1.request_id, request3.request_id},
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
scheduler.update_from_output(scheduler_output, model_runner_output)
min_invalid_block_idx = min(invalid_block_idxs)
assert len(scheduler.waiting) == 3
for request in scheduler.waiting:
if request.request_id == request2.request_id:
assert request.num_computed_tokens == (
min_invalid_block_idx * scheduler.block_size
)
else:
assert request.num_computed_tokens == 0
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert scheduler.failed_recving_kv_req_ids == {request2.request_id}
assert scheduler.connector.get_num_new_matched_tokens.call_count == 3
@pytest.mark.parametrize(
"num_prompt_blocks,num_external_computed_blocks,invalid_block_idxs",
[
(100, 99, {0, 98}),
(100, 99, {50, 98}),
(100, 99, {98}),
],
)
def test_sync_load_failure(
scheduler: Scheduler,
num_prompt_blocks: int,
num_external_computed_blocks: int,
invalid_block_idxs: set[int],
):
assert num_prompt_blocks >= num_external_computed_blocks
num_prompt_tokens = num_prompt_blocks * scheduler.block_size
num_external_computed_tokens = num_external_computed_blocks * scheduler.block_size
request1 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request1)
request2 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request2)
request3 = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request3)
# Mock KV connector method.
# req_id -> num_external_computed_tokens
req_num_new_matched_tokens = {
request1.request_id: num_external_computed_tokens,
request2.request_id: num_external_computed_tokens,
request3.request_id: num_external_computed_tokens,
}
scheduler.connector = Mock()
scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, async_load=False)
)
scheduler.connector.request_finished.return_value = (False, None)
scheduler.connector.take_events.return_value = ()
scheduler_output = scheduler.schedule()
# req_id -> num_computed_tokens
expected_computed_tokens = {
request1.request_id: num_external_computed_tokens,
request2.request_id: num_external_computed_tokens,
request3.request_id: num_external_computed_tokens,
}
assert len(scheduler.running) == 3
assert len(scheduler_output.scheduled_new_reqs) == 3
for request in scheduler_output.scheduled_new_reqs:
assert request.num_computed_tokens == expected_computed_tokens[request.req_id]
assert scheduler.connector.get_num_new_matched_tokens.call_count == 3
# Simulate a failure in loading some of request2 blocks.
req2_block_ids = scheduler_output.scheduled_new_reqs[1].block_ids[0]
invalid_block_ids = {req2_block_ids[i] for i in invalid_block_idxs}
model_runner_output = create_model_runner_output(
[request1, request2, request3],
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert scheduler.running[0].request_id == request2.request_id
assert scheduler.running[0].num_computed_tokens == (
min(invalid_block_idxs) * scheduler.block_size
)
assert scheduler.connector.get_num_new_matched_tokens.call_count == 3
assert scheduler.connector.request_finished.call_count == 2
@pytest.mark.parametrize(
"num_prompt_blocks,"
"num_external_computed_blocks,"
"num_common_prefix_blocks,"
"invalid_block_idxs",
[
(100, 99, 50, {0, 49}),
(100, 99, 50, {25, 49}),
(100, 99, 50, {49}),
],
)
def test_sync_load_failure_with_shared_blocks(
scheduler: Scheduler,
num_prompt_blocks: int,
num_external_computed_blocks: int,
num_common_prefix_blocks: int,
invalid_block_idxs: set[int],
):
assert num_prompt_blocks >= num_external_computed_blocks >= num_common_prefix_blocks
num_prompt_tokens = num_prompt_blocks * scheduler.block_size
num_external_computed_tokens = num_external_computed_blocks * scheduler.block_size
common_prefix_len = num_common_prefix_blocks * scheduler.block_size
request1 = create_request(
num_tokens=num_prompt_tokens, common_prefix_len=common_prefix_len
)
scheduler.add_request(request=request1)
request2 = create_request(
num_tokens=num_prompt_tokens, common_prefix_len=common_prefix_len
)
scheduler.add_request(request=request2)
# Mock KV connector method.
# req_id -> num_external_computed_tokens
req_num_new_matched_tokens = {
request1.request_id: num_external_computed_tokens,
}
scheduler.connector = Mock()
scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, async_load=False)
)
scheduler.connector.take_events.return_value = ()
scheduler_output = scheduler.schedule()
# req_id -> num_computed_tokens
expected_computed_tokens = {
request1.request_id: num_external_computed_tokens,
request2.request_id: common_prefix_len,
}
assert len(scheduler.running) == 2
assert len(scheduler_output.scheduled_new_reqs) == 2
for request in scheduler_output.scheduled_new_reqs:
assert request.num_computed_tokens == expected_computed_tokens[request.req_id]
assert scheduler.connector.get_num_new_matched_tokens.call_count == 2
# Simulate a failure in loading some of the shared blocks.
req1_block_ids = scheduler_output.scheduled_new_reqs[0].block_ids[0]
invalid_block_ids = {req1_block_ids[i] for i in invalid_block_idxs}
model_runner_output = create_model_runner_output(
[request1, request2], invalid_block_ids=invalid_block_ids, use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
# req_id -> num_computed_tokens
# all the common prefix blocks will be computed by request1
expected_computed_tokens = {
request1.request_id: min(invalid_block_idxs) * scheduler.block_size,
request2.request_id: common_prefix_len,
}
assert len(scheduler.running) == 2
for request in scheduler.running:
assert (
request.num_computed_tokens == expected_computed_tokens[request.request_id]
)
assert scheduler.connector.get_num_new_matched_tokens.call_count == 2
@pytest.mark.parametrize(
"num_prompt_blocks,num_external_computed_blocks,invalid_block_idxs",
[
(100, 99, {0, 50, 98}),
(100, 99, {98, 50, 0}),
],
)
def test_async_progressive_load_failure(
scheduler: Scheduler,
num_prompt_blocks: int,
num_external_computed_blocks: int,
invalid_block_idxs: set[int],
):
assert num_prompt_blocks >= num_external_computed_blocks
num_prompt_tokens = num_prompt_blocks * scheduler.block_size
num_external_computed_tokens = num_external_computed_blocks * scheduler.block_size
request = create_request(num_tokens=num_prompt_tokens)
scheduler.add_request(request=request)
# Mock KV connector method.
# req_id -> num_external_computed_tokens
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
scheduler.connector = Mock()
scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, async_load=True)
)
scheduler.connector.take_events.return_value = ()
scheduler_output = scheduler.schedule()
assert len(scheduler.waiting) == 1
assert scheduler.waiting.peek_request().request_id == request.request_id
assert request.num_computed_tokens == 0
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert scheduler.connector.get_num_new_matched_tokens.call_count == 1
min_invalid_block_idx = max(invalid_block_idxs) + 1
# Simulate failures when progressively loading request blocks.
for invalid_block_idx in invalid_block_idxs:
(req_block_ids,) = scheduler.kv_cache_manager.get_block_ids(request.request_id)
invalid_block_ids = {req_block_ids[invalid_block_idx]}
model_runner_output = create_model_runner_output(
reqs=[],
finished_recving=set(),
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
scheduler.update_from_output(scheduler_output, model_runner_output)
min_invalid_block_idx = min(min_invalid_block_idx, invalid_block_idx)
assert len(scheduler.waiting) == 1
assert scheduler.waiting.peek_request().request_id == request.request_id
assert request.num_computed_tokens == (
min_invalid_block_idx * scheduler.block_size
)
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert scheduler.failed_recving_kv_req_ids == {request.request_id}
assert scheduler.connector.get_num_new_matched_tokens.call_count == 1
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_lmcache_integration.py | tests/v1/kv_connector/unit/test_lmcache_integration.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# NOTE: if your PR has broken one of the tests here (sorry),
# kindly patch the corresponding integration in
# /vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py
# or reach out to @aposataC for assistance
# Assumption vs. Correctness Tests:
# these unit tests do *not* test correctness of LMCache-side or vLLM-side logic
# it is to ensure that assumptions LMCache makes about vLLM's interface are stable
import pytest
from vllm.platforms import current_platform
def assumes(obj, attr, is_callable=False, is_instance_of=None):
import inspect
from dataclasses import is_dataclass
assumption_msg = (
f"LMCache connector currently assumes that {obj} has a(n) {attr} attribute"
)
if hasattr(obj, attr):
attr_value = getattr(obj, attr)
elif is_dataclass(obj) and attr in getattr(obj, "__dataclass_fields__", {}):
field = obj.__dataclass_fields__[attr]
field_type = field.type
origin = getattr(field_type, "__origin__", None)
if origin is not None:
field_type = origin
attr_value = field_type
else:
raise AssertionError(assumption_msg)
if is_callable:
assumption_msg += f" and that {obj}.{attr} is a callable"
assert callable(attr_value), assumption_msg
if is_instance_of:
assumption_msg += f" and that {obj}.{attr} is an instance of {is_instance_of}"
if isinstance(attr_value, property):
fget = attr_value.fget
assert fget is not None, f"Property {obj}.{attr} has no fget"
sig = inspect.signature(fget)
ret_anno = sig.return_annotation
assert ret_anno is not inspect._empty, (
f"Property {obj}.{attr} has no return annotation"
)
assert ret_anno == is_instance_of, assumption_msg
else:
if isinstance(attr_value, type):
assert attr_value is is_instance_of, assumption_msg
else:
assert isinstance(attr_value, is_instance_of), assumption_msg
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_multimodal_interface():
# protect against interface changes
from vllm.multimodal.inputs import PlaceholderRange
assumes(PlaceholderRange, "offset")
assumes(PlaceholderRange, "length")
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_config_interface():
# protect against interface changes
from vllm.config import VllmConfig
from vllm.config.cache import CacheConfig
from vllm.config.kv_transfer import KVTransferConfig
from vllm.config.model import ModelConfig
from vllm.config.parallel import ParallelConfig
assumes(VllmConfig, "model_config")
assumes(VllmConfig, "cache_config")
assumes(VllmConfig, "parallel_config")
assumes(VllmConfig, "kv_transfer_config")
assumes(KVTransferConfig, "kv_role")
assumes(KVTransferConfig, "kv_connector_extra_config")
assumes(ModelConfig, "use_mla", is_instance_of=bool)
assumes(ModelConfig, "dtype")
assumes(ModelConfig, "max_model_len")
assumes(ModelConfig, "get_vocab_size", is_callable=True)
assumes(ModelConfig, "get_num_attention_heads", is_callable=True)
assumes(ModelConfig, "get_num_kv_heads", is_callable=True)
assumes(ModelConfig, "get_head_size", is_callable=True)
assumes(ModelConfig, "get_num_layers", is_callable=True)
assumes(ModelConfig, "get_num_kv_heads", is_callable=True)
assumes(ModelConfig, "model")
assumes(ParallelConfig, "world_size")
assumes(ParallelConfig, "rank")
assumes(ParallelConfig, "tensor_parallel_size")
assumes(ParallelConfig, "pipeline_parallel_size")
assumes(ParallelConfig, "data_parallel_size_local")
assumes(ParallelConfig, "data_parallel_rank_local")
assumes(CacheConfig, "cache_dtype")
assumes(CacheConfig, "block_size")
assumes(CacheConfig, "gpu_memory_utilization")
# kv metadata minimal case
from vllm.utils.torch_utils import get_kv_cache_torch_dtype
model_config = ModelConfig(dtype="bfloat16")
parallel_config = ParallelConfig()
cache_config = CacheConfig(cache_dtype="bfloat16")
kv_dtype = get_kv_cache_torch_dtype(cache_config.cache_dtype, model_config.dtype)
use_mla = False
chunk_size = 256
num_layer = model_config.get_num_layers(parallel_config)
num_kv_head = model_config.get_num_kv_heads(parallel_config)
head_size = model_config.get_head_size()
kv_shape = (num_layer, 1 if use_mla else 2, chunk_size, num_kv_head, head_size)
# dummy lmcache metadata creation example
_ = (
model_config.model,
parallel_config.world_size,
parallel_config.rank,
"vllm",
kv_dtype,
kv_shape,
use_mla,
)
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_request_interface():
# protect against interface changes
from types import NoneType
from vllm.sampling_params import SamplingParams
from vllm.v1.request import Request
req = Request(
request_id="test_request",
prompt_token_ids=[1, 2, 3],
sampling_params=SamplingParams(max_tokens=10),
pooling_params=None,
eos_token_id=100,
lora_request=None,
)
assumes(req, "mm_features", is_instance_of=(list, NoneType))
assumes(req, "request_id")
assumes(req, "priority")
assumes(req, "prompt_token_ids")
assumes(req, "sampling_params")
assumes(req, "num_tokens")
assumes(req, "kv_transfer_params", is_instance_of=(dict, NoneType))
from vllm.multimodal.inputs import MultiModalFeatureSpec
assumes(MultiModalFeatureSpec, "identifier")
assumes(MultiModalFeatureSpec, "mm_position")
def test_new_request_interface():
# protect against interface changes
from vllm.v1.core.sched.output import NewRequestData
assumes(NewRequestData, "req_id")
assumes(NewRequestData, "block_ids")
assumes(NewRequestData, "prompt_token_ids")
assumes(NewRequestData, "sampling_params")
def test_sampling_params_interface():
# protect against interface changes
from vllm.sampling_params import SamplingParams
assumes(SamplingParams, "extra_args")
# dumb example use case in LMCache
kv_transfer_params = {
"lmcache.tag.user": "example_user_1",
"lmcache.ttl": 60,
}
sampling_params = SamplingParams(
extra_args={"kv_transfer_params": kv_transfer_params}
)
assert sampling_params.extra_args["kv_transfer_params"] == kv_transfer_params
def test_tp_interface():
# protect against interface changes
import inspect
from vllm.distributed.parallel_state import get_tp_group
sig = inspect.signature(get_tp_group)
GroupCoordinator = sig.return_annotation
assumes(GroupCoordinator, "broadcast", is_callable=True)
assumes(GroupCoordinator, "broadcast_object", is_callable=True)
def test_forward_context_interface():
# protect against interface changes
from vllm.forward_context import ForwardContext
assumes(ForwardContext, "no_compile_layers", is_instance_of=dict)
assumes(ForwardContext, "virtual_engine")
assumes(ForwardContext, "attn_metadata")
def test_scheduler_output_interface():
# protect against interface changes
from vllm.v1.core.sched.output import SchedulerOutput
assumes(SchedulerOutput, "finished_req_ids")
assumes(SchedulerOutput, "scheduled_new_reqs", is_instance_of=list)
assumes(SchedulerOutput, "num_scheduled_tokens", is_instance_of=dict)
assumes(SchedulerOutput, "scheduled_cached_reqs")
from vllm.v1.core.sched.output import CachedRequestData
assumes(CachedRequestData, "req_ids", is_instance_of=list)
assumes(CachedRequestData, "new_block_ids", is_instance_of=list)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_offloading_connector.py | tests/v1/kv_connector/unit/test_offloading_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
from typing import Any
from unittest.mock import MagicMock
import pytest
import torch
from vllm import SamplingParams
from vllm.config import KVTransferConfig, VllmConfig
from vllm.distributed.kv_events import BlockRemoved, BlockStored
from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorRole
from vllm.distributed.kv_transfer.kv_connector.v1.offloading_connector import (
OffloadingConnector,
OffloadingConnectorMetadata,
)
from vllm.forward_context import ForwardContext
from vllm.utils.hashing import sha256
from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend
from vllm.v1.core.kv_cache_utils import (
BlockHash,
get_request_block_hasher,
init_none_hash,
)
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.kv_offload.abstract import (
LoadStoreSpec,
OffloadingEvent,
OffloadingManager,
PrepareStoreOutput,
)
from vllm.v1.kv_offload.mediums import GPULoadStoreSpec
from vllm.v1.kv_offload.spec import OffloadingSpec
from vllm.v1.kv_offload.worker.worker import (
OffloadingHandler,
TransferResult,
TransferSpec,
)
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput
from vllm.v1.request import Request
from .utils import (
EOS_TOKEN_ID,
create_model_runner_output,
create_scheduler,
create_vllm_config,
)
class MockLoadStoreSpec(LoadStoreSpec):
def __init__(self, block_hashes: Iterable[BlockHash]):
self.block_hashes: list[BlockHash] = list(block_hashes)
@staticmethod
def medium() -> str:
return "Mock"
def __repr__(self) -> str:
return repr(self.block_hashes)
class MockOffloadingHandler(OffloadingHandler):
def __init__(self):
self.completed_transfers: list[TransferResult] = []
self.completed_specs: list[TransferSpec] = []
def get_finished(self) -> list[TransferResult]:
finished = self.completed_transfers
self.completed_transfers = []
return finished
def transfer_async(self, job_id: int, spec: TransferSpec) -> bool:
self.completed_specs.append(spec)
self.completed_transfers.append((job_id, True))
return True
class MockOffloadingSpec(OffloadingSpec):
def __init__(self, vllm_config: VllmConfig):
super().__init__(vllm_config)
self.manager = MagicMock(spec=OffloadingManager)
self.manager.lookup.return_value = 0
self.manager.prepare_load = lambda block_hashes: (
MockLoadStoreSpec(block_hashes)
)
self.handler = MockOffloadingHandler()
def get_manager(self) -> OffloadingManager:
return self.manager
def get_handlers(
self, _, __
) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]:
yield GPULoadStoreSpec, MockLoadStoreSpec, self.handler
yield MockLoadStoreSpec, GPULoadStoreSpec, self.handler
def get_completed_transfers(self) -> list[TransferSpec]:
specs = self.handler.completed_specs
self.handler.completed_specs = []
return specs
@dataclass
class TransferSummary:
gpu_block_indices: list[int]
offload_addresses: list[Any]
class RequestRunner:
def __init__(
self, offloaded_block_size: int, gpu_block_size: int, num_gpu_blocks: int
):
self.offloaded_block_size: int = offloaded_block_size
self.gpu_block_size: int = gpu_block_size
self.num_gpu_blocks: int = num_gpu_blocks
self.req_id: int = -1
vllm_config = create_vllm_config(
block_size=gpu_block_size, max_num_batched_tokens=1000
)
vllm_config.kv_transfer_config = KVTransferConfig(
kv_connector="OffloadingConnector",
kv_role="kv_both",
kv_connector_extra_config={
"spec_name": "MockOffloadingSpec",
"spec_module_path": "tests.v1.kv_connector.unit.test_offloading_connector", # noqa: E501
"block_size": offloaded_block_size,
},
)
self.scheduler: Scheduler = create_scheduler(
vllm_config, num_blocks=num_gpu_blocks
)
self.worker_connector = OffloadingConnector(vllm_config, KVConnectorRole.WORKER)
# register worker kv_caches to enable OffloadingWorker creations
self.worker_connector.register_cross_layers_kv_cache(
kv_cache=torch.empty(0),
attn_backend=FlashAttentionBackend,
)
# extract connector of scheduler
scheduler_connector = self.scheduler.connector
assert scheduler_connector is not None
assert isinstance(scheduler_connector, OffloadingConnector)
self.scheduler_connector: OffloadingConnector = scheduler_connector
# extract mocked OffloadingManager of scheduler connector
connector_scheduler = scheduler_connector.connector_scheduler
assert connector_scheduler is not None
manager = connector_scheduler.manager
assert isinstance(manager, MagicMock)
self.manager: MagicMock = manager
assert connector_scheduler.gpu_block_size == gpu_block_size
assert connector_scheduler.offloaded_block_size == offloaded_block_size
# extract OffloadingSpec of worker_connector
connector_worker = self.worker_connector.connector_worker
assert connector_worker is not None
offloading_spec = connector_worker.spec
assert isinstance(offloading_spec, MockOffloadingSpec)
self.offloading_spec: MockOffloadingSpec = offloading_spec
# mapping (offloading address) -> gpu_block_index
self.offloaded: dict[Any, int] = {}
self.pending_loads_count: int = 0
self.pending_stores_count: int = 0
self.completed_loads: list[TransferSummary] = []
self.completed_stores: list[TransferSummary] = []
# maps {block_id: block_offset}
self.gpu_block_index: dict[int, int] = {}
init_none_hash(sha256)
self._block_hasher = get_request_block_hasher(gpu_block_size, sha256)
self._dummy_ctx: ForwardContext = ForwardContext(
no_compile_layers={}, attn_metadata={}, virtual_engine=0
)
def new_request(self, token_ids: list[int]):
assert not self.scheduler.requests
self.req_id += 1
req = Request(
request_id=str(self.req_id),
prompt_token_ids=token_ids,
sampling_params=SamplingParams(max_tokens=1000),
pooling_params=None,
eos_token_id=EOS_TOKEN_ID,
block_hasher=self._block_hasher,
)
self.scheduler.add_request(req)
def _wait_for_transfers(self):
block_size_factor = self.offloaded_block_size // self.gpu_block_size
while self.pending_loads_count or self.pending_stores_count:
for transfer_spec in self.offloading_spec.get_completed_transfers():
src_spec, dst_spec = transfer_spec
if isinstance(src_spec, GPULoadStoreSpec):
store = True
gpu_spec = src_spec
offload_spec = dst_spec
else:
store = False
gpu_spec = dst_spec
offload_spec = src_spec
assert isinstance(offload_spec, MockLoadStoreSpec)
assert isinstance(gpu_spec, GPULoadStoreSpec)
gpu_block_indices: list[int] = []
for block_id in gpu_spec.block_ids:
gpu_block_indices.append(self.gpu_block_index[block_id.item()])
# list of (block_hash, sub_block_offset)
offload_addresses: list[Any] = []
for block_hash in offload_spec.block_hashes:
for sub_block_idx in range(block_size_factor):
offload_addresses.append((block_hash, sub_block_idx))
if store:
assert len(gpu_block_indices) == len(offload_addresses)
self.completed_stores.append(
TransferSummary(gpu_block_indices, offload_addresses)
)
self.pending_stores_count -= 1
else:
remainder_sub_block_count = len(offload_addresses) - len(
gpu_block_indices
)
assert remainder_sub_block_count >= 0
assert remainder_sub_block_count < block_size_factor
offload_addresses = offload_addresses[remainder_sub_block_count:]
self.completed_loads.append(
TransferSummary(gpu_block_indices, offload_addresses)
)
self.pending_loads_count -= 1
def _update_gpu_block_idx(self):
for blocks in self.scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks.values():
for block_idx, block in enumerate(blocks):
self.gpu_block_index[block.block_id] = block_idx
def _run(self, decoded_tokens: list[int]):
"""
Runs multiple engine (scheduler + worker) steps.
Assumes a single request is running.
Args:
decoded_tokens: the tokens to yield at each step.
"""
tokens_iter = iter(decoded_tokens)
token_id = next(tokens_iter, None)
while token_id is not None:
assert self.scheduler.requests
scheduler_output = self.scheduler.schedule()
self._update_gpu_block_idx()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, OffloadingConnectorMetadata)
self.pending_loads_count += len(kv_connector_metadata.reqs_to_load)
self.pending_stores_count += len(kv_connector_metadata.reqs_to_store)
self.worker_connector.bind_connector_metadata(kv_connector_metadata)
self.worker_connector.start_load_kv(self._dummy_ctx)
if scheduler_output.total_num_scheduled_tokens > 0:
self.worker_connector.wait_for_save()
finished_sending, finished_recving = self.worker_connector.get_finished(
scheduler_output.finished_req_ids
)
self.worker_connector.clear_connector_metadata()
model_runner_output = create_model_runner_output(
reqs=self.scheduler.running,
finished_sending=finished_sending,
finished_recving=finished_recving,
token_id=token_id,
)
if self.scheduler.running:
token_id = next(tokens_iter, None)
self.scheduler.update_from_output(scheduler_output, model_runner_output)
self._wait_for_transfers()
# run one more step to update finished stored
if EOS_TOKEN_ID in decoded_tokens:
assert not self.scheduler.running
while self.scheduler.requests:
scheduler_output = self.scheduler.schedule()
finished_sending, finished_recving = self.worker_connector.get_finished(
scheduler_output.finished_req_ids
)
assert not finished_recving
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending=finished_sending
)
self.scheduler.update_from_output(scheduler_output, model_runner_output)
def run(
self,
decoded_tokens: list[int],
expected_stored_gpu_block_indexes: tuple[int, ...] = (),
expected_loaded_gpu_block_indexes: tuple[int, ...] = (),
):
"""
Runs multiple engine (scheduler + worker) steps.
Assumes a single request is running.
Args:
decoded_tokens: the tokens to yield at each step.
expected_stored_gpu_block_indexes: GPU block indexes
that are expected to be written during the run.
expected_loaded_gpu_block_indexes: GPU block indexes
that are expected to be loaded during the run.
"""
self.manager.reset_mock()
self._run(decoded_tokens)
loaded_gpu_block_indexes: set[int] = set()
for transfer in self.completed_loads:
for gpu_block_idx, offloaded_address in zip(
transfer.gpu_block_indices, transfer.offload_addresses
):
loaded_gpu_block_indexes.add(gpu_block_idx)
assert gpu_block_idx == self.offloaded[offloaded_address]
assert set(expected_loaded_gpu_block_indexes) == loaded_gpu_block_indexes
self.completed_loads.clear()
stored_gpu_block_indexes: set[int] = set()
for transfer in self.completed_stores:
for gpu_block_idx, offloaded_address in zip(
transfer.gpu_block_indices, transfer.offload_addresses
):
stored_gpu_block_indexes.add(gpu_block_idx)
self.offloaded[offloaded_address] = gpu_block_idx
assert set(expected_stored_gpu_block_indexes) == stored_gpu_block_indexes
self.completed_stores.clear()
@pytest.fixture
def request_runner():
runners = []
def runner_factory(offloaded_block_size, gpu_block_size, num_gpu_blocks):
runner = RequestRunner(
offloaded_block_size=offloaded_block_size,
gpu_block_size=gpu_block_size,
num_gpu_blocks=num_gpu_blocks,
)
runners.append(runner)
return runner
yield runner_factory # pass factory to the test
def generate_store_output(block_hashes: Iterable[BlockHash]):
block_hashes = list(block_hashes)
return PrepareStoreOutput(
block_hashes_to_store=list(block_hashes),
store_spec=MockLoadStoreSpec(block_hashes),
block_hashes_evicted=[],
)
def test_offloading_connector(request_runner):
offloaded_block_size = 12
gpu_block_size = 4
num_gpu_blocks = 100
block_size_factor = offloaded_block_size // gpu_block_size
runner = request_runner(
offloaded_block_size=offloaded_block_size,
gpu_block_size=gpu_block_size,
num_gpu_blocks=num_gpu_blocks,
)
# 3 blocks, store just the middle block (skip first and last)
# blocks = [0, 1, 2], [3, 4, 5], [6, 7, 8]
runner.new_request(token_ids=[0] * offloaded_block_size * 3)
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output(list(block_hashes)[1:2])
)
runner.run(decoded_tokens=[0], expected_stored_gpu_block_indexes=(3, 4, 5))
# add block missing 1 token -> no offload
runner.run(decoded_tokens=[0] * (offloaded_block_size - 1))
runner.manager.prepare_store.assert_not_called()
# +1 token -> single block, fail prepare_store
runner.manager.prepare_store.side_effect = lambda block_hashes: None
runner.run(decoded_tokens=[0])
runner.manager.prepare_store.assert_called()
# 1 more block, now set block_hashes_to_store = []
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output([])
)
runner.run(decoded_tokens=[0] * offloaded_block_size)
# 1 more block, now check touch was called with all 6 blocks
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output(block_hashes)
)
runner.run(
decoded_tokens=[0] * offloaded_block_size,
expected_stored_gpu_block_indexes=(15, 16, 17),
)
runner.manager.touch.assert_called()
block_hashes1 = list(runner.manager.touch.call_args.args[0])
assert len(block_hashes1) == 6
# terminate request
runner.run(decoded_tokens=[EOS_TOKEN_ID])
# create a new request differing only on the last token
runner.new_request(token_ids=[0] * (offloaded_block_size * 6 - 1) + [1])
runner.run(
decoded_tokens=[0],
expected_stored_gpu_block_indexes=tuple(range(6 * block_size_factor)),
)
runner.manager.touch.assert_called()
block_hashes2 = list(runner.manager.touch.call_args.args[0])
assert len(block_hashes2) == 6
# verify hashes are the same, except for the last block
assert block_hashes1[:5] == block_hashes2[:5]
assert block_hashes1[5] != block_hashes2[5]
# terminate request
runner.run(decoded_tokens=[EOS_TOKEN_ID])
# full_block_tokens - num_computed_tokens < offloaded_block_size
runner.new_request(
token_ids=[0] * gpu_block_size + [1] * (offloaded_block_size - gpu_block_size)
)
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output([])
)
runner.run(decoded_tokens=[EOS_TOKEN_ID])
runner.manager.lookup.assert_not_called()
# single block lookup with no hits
runner.new_request(token_ids=[1] * offloaded_block_size)
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output([])
)
runner.run(decoded_tokens=[EOS_TOKEN_ID])
runner.manager.lookup.assert_called()
assert len(list(runner.manager.lookup.call_args.args[0])) == 1
# single block lookup with a hit
runner.scheduler.reset_prefix_cache()
runner.new_request(token_ids=[0] * offloaded_block_size)
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output([])
)
runner.manager.lookup.return_value = 1
runner.run(
decoded_tokens=[EOS_TOKEN_ID], expected_loaded_gpu_block_indexes=(0, 1, 2)
)
# single block lookup with a hit in a middle block
runner.new_request(
token_ids=[0] * offloaded_block_size * 2 + [1] * offloaded_block_size
)
runner.manager.prepare_store.side_effect = (
lambda block_hashes: generate_store_output([])
)
runner.manager.lookup.return_value = 1
runner.run(
decoded_tokens=[EOS_TOKEN_ID], expected_loaded_gpu_block_indexes=(3, 4, 5)
)
# test take_events
def to_hashes(int_hashes: list[int]) -> list[BlockHash]:
return [BlockHash(str(i).encode()) for i in int_hashes]
def take_events() -> Iterable[OffloadingEvent]:
yield OffloadingEvent(
block_hashes=to_hashes([1, 2, 3]), block_size=16, medium="A", removed=False
)
yield OffloadingEvent(
block_hashes=to_hashes([4, 5, 6]), block_size=32, medium="B", removed=True
)
runner.manager.take_events.side_effect = take_events
events = list(runner.scheduler_connector.take_events())
assert len(events) == 2
event = events[0]
assert isinstance(event, BlockStored)
assert event.block_hashes == to_hashes([1, 2, 3])
assert event.block_size == 16
assert event.medium == "A"
assert event.token_ids == []
assert event.parent_block_hash is None
assert event.lora_id is None
assert event.lora_name is None
event = events[1]
assert isinstance(event, BlockRemoved)
assert event.block_hashes == to_hashes([4, 5, 6])
assert event.medium == "B"
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py | tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import pytest
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput
from vllm.v1.request import FinishReason, RequestStatus
from .utils import (
assert_scheduler_empty,
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def test_basic_lifecycle():
"""Test lifecycle of a Remote Decode request."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
max_tokens=1,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1): Prefill.
# (1a): schedule()
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
# (1b): execute_model()
model_runner_output = create_model_runner_output(reqs=[request])
# (1c): update_from_output()
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
# Ensure the request is finished after 1 token.
assert request.is_finished()
assert request.status == RequestStatus.FINISHED_LENGTH_CAPPED
output = engine_core_outputs[0].outputs[0]
assert output.finish_reason == FinishReason.LENGTH
assert output.kv_transfer_params is not None
# Request freed in Scheduler and in Persistent Batch ...
assert request_id in scheduler.finished_req_ids
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 0
# ... but blocks should not be freed.
assert len(scheduler.requests) == 1
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block.ref_cnt == 1
# STEP (2): Send Finished to PB.
# (2a): schedule() - pass finished request to PB.
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 0
assert len(scheduler_output.finished_req_ids) == 1
assert request_id in scheduler_output.finished_req_ids
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler.finished_req_ids) == 0
# (2b): execute_model()
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
# (2c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP (3): Finished sending.
# (3a): schedule() - pass finished request to PB.
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 0
assert len(scheduler_output.finished_req_ids) == 0
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler.finished_req_ids) == 0
# (3b): execute_model()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending={request_id}
)
# (3c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# Confirm we do not have any memory leaks after req lifecycle.
assert_scheduler_empty(scheduler)
def test_short_prompt_lifecycle():
"""Test lifecycle of a Remote Decode request with short prompt."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Not enough tokens for full block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_TOKENS = BLOCK_SIZE // 2
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
max_tokens=1,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
# STEP (1): Prefill.
# (1a): schedule()
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
# (1b): execute_model()
model_runner_output = create_model_runner_output(reqs=[request])
# (1c): update_from_output()
# Even though tokens < block_size, there will be kv xfer for partial block.
eco = scheduler.update_from_output(scheduler_output, model_runner_output)
kv_transfer_params = eco[0].outputs[0].kv_transfer_params
assert len(kv_transfer_params["remote_block_ids"]) == 1
# Confirm we do not have any memory leaks after req lifecycle.
# We need to mark sending finish to clear data for persistent batch.
scheduler_output = scheduler.schedule()
# Use create_model_runner_output to pass kv_connector_output along
model_runner_output = create_model_runner_output(
reqs=[request], finished_sending={request.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
def test_prefix_cache_lifecycle():
"""Test that remote decode params still work with a prefix cache hit."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Prime the KVCache.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 3
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS
)
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
#####################
# Actual Test: confirm we send all blocks.
# Step (1): Send the KV Transfer.
NUM_EXTERNAL_FULL_BLOCKS -= 1
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
eco = scheduler.update_from_output(scheduler_output, model_runner_output)
kv_transfer_params = eco[0].outputs[0].kv_transfer_params
# Ensure we send all block ids, including the partial blocks,
# even if there is a cache hit.
assert len(kv_transfer_params["remote_block_ids"]) == (NUM_EXTERNAL_FULL_BLOCKS + 1)
# STEP (2): Ensure it is freed.
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
def test_abort_during_kv_transfer():
"""Test aborting request does not release blocks for remote decode."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Prime the KVCache.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request])
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
# Request removed from PB but blocks should not be freed.
assert len(scheduler.requests) == 1
# Abort the request, and check the blocks are still not freed
scheduler.finish_requests([request.request_id], RequestStatus.FINISHED_ABORTED)
assert len(scheduler.requests) == 1
# Simulate a finished sending notification
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending=[request.request_id]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_config.py | tests/v1/kv_connector/unit/test_config.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for KV cache offloading configuration."""
import pytest
from vllm.config import CacheConfig, KVTransferConfig, ParallelConfig, VllmConfig
pytestmark = pytest.mark.cpu_test
@pytest.mark.parametrize(
"kv_offloading_backend,kv_offloading_size,tp,pp,expected_backend,expected_bytes",
[
("native", 4.0, 1, 1, "OffloadingConnector", 4.0 * (1 << 30)),
# bytes per rank: 8.0 GiB / (2 * 2) = 2.0 GiB
("native", 8.0, 2, 2, "OffloadingConnector", 8.0 * (1 << 30) / 4),
("lmcache", 4.0, 1, 1, "LMCacheConnectorV1", 4.0),
# size per rank: 8.0 GiB / (2 * 2) = 2.0 GiB
("lmcache", 8.0, 2, 2, "LMCacheConnectorV1", 2.0),
(None, None, 1, 1, None, None),
],
)
def test_kv_connector(
kv_offloading_backend, kv_offloading_size, tp, pp, expected_backend, expected_bytes
):
kv_transfer_config = (
KVTransferConfig(kv_connector_extra_config={"existing_key": "existing_value"})
if expected_backend is not None
else None
)
vllm_config = VllmConfig(
cache_config=CacheConfig(
kv_offloading_backend=kv_offloading_backend,
kv_offloading_size=kv_offloading_size,
),
kv_transfer_config=kv_transfer_config,
parallel_config=ParallelConfig(
tensor_parallel_size=tp, pipeline_parallel_size=pp
),
)
# No KV transfer config expected
if expected_backend is None:
assert vllm_config.kv_transfer_config is expected_backend
return
kv_transfer_config = vllm_config.kv_transfer_config
kv_connector_extra_config = kv_transfer_config.kv_connector_extra_config
assert kv_transfer_config.kv_connector == expected_backend
assert kv_transfer_config.kv_role == "kv_both"
if kv_offloading_backend == "native":
assert kv_connector_extra_config["kv_bytes_per_rank"] == expected_bytes
assert kv_connector_extra_config["num_cpu_blocks"] == 0
# Existing config should be preserved
assert kv_connector_extra_config["existing_key"] == "existing_value"
elif kv_offloading_backend == "lmcache":
assert kv_connector_extra_config["lmcache.local_cpu"] is True
assert kv_connector_extra_config["lmcache.max_local_cpu_size"] == expected_bytes
# Existing config should be replaced
assert "existing_key" not in kv_connector_extra_config
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_multi_connector.py | tests/v1/kv_connector/unit/test_multi_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import filecmp
import shutil
import tempfile
from pathlib import Path
from typing import Any
import pytest
from vllm import LLM, SamplingParams
from vllm.config import KVTransferConfig
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorBase_V1
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
from vllm.distributed.kv_transfer.kv_connector.v1.multi_connector import (
MultiConnector,
MultiKVConnectorStats,
)
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
NixlKVConnectorStats,
)
from vllm.platforms import current_platform
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct"
PROMPT_CONTEXT = "Hi " * 100
PROMPTS = [
PROMPT_CONTEXT + "Hello, my name is",
PROMPT_CONTEXT + "The capital of France is",
]
SAMPLING_PARAMS = SamplingParams(temperature=0, max_tokens=20)
# Test connector with custom stats for testing MultiConnector
class MockConnectorStats(KVConnectorStats):
"""Mock stats class for testing."""
pass
class MockConnector(KVConnectorBase_V1):
"""Mock connector that implements build_kv_connector_stats for testing."""
@classmethod
def build_kv_connector_stats(
cls, data: dict[str, Any] | None = None
) -> KVConnectorStats | None:
return MockConnectorStats(data=data) if data is not None else None
# Register the mock connector
KVConnectorFactory.register_connector("MockConnector", __name__, MockConnector.__name__)
# Helper function to compare directories recursively
def _compare_directories(dir1: Path, dir2: Path) -> bool:
"""Compares two directories recursively for identical content."""
dcmp = filecmp.dircmp(dir1, dir2)
if dcmp.left_only or dcmp.right_only or dcmp.diff_files:
print(f"Differences found between {dir1} and {dir2}:")
print(f" Left only: {dcmp.left_only}")
print(f" Right only: {dcmp.right_only}")
print(f" Different files: {dcmp.diff_files}")
return False
for sub_dir in dcmp.common_dirs:
if not _compare_directories(dir1 / sub_dir, dir2 / sub_dir):
return False
return True
@pytest.mark.skipif(
current_platform.is_rocm(),
reason=(
"hipErrorLaunchFailure when running this test, see issue:"
"https://github.com/ROCm/pytorch/issues/2822"
),
)
def test_multi_example_connector_consistency():
"""
Tests that MultiConnector with two ExampleConnectors saves
identical KV cache data to separate storage locations.
"""
storage_1_path = Path("storage_1/")
storage_2_path = Path("storage_2/")
shutil.rmtree(storage_1_path, ignore_errors=True)
shutil.rmtree(storage_2_path, ignore_errors=True)
storage_1_path.mkdir()
storage_2_path.mkdir()
# Configure MultiConnector with two ExampleConnectors
kv_transfer_config = KVTransferConfig(
kv_connector="MultiConnector",
kv_role="kv_both",
kv_connector_extra_config={
"connectors": [
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_1_path),
"name": "storage1",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_2_path),
"name": "storage2",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
]
},
)
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
gpu_memory_utilization=0.5,
kv_transfer_config=kv_transfer_config,
)
# Run generation - this should trigger saving KV cache
_ = llm.generate(PROMPTS, SAMPLING_PARAMS)
# --- Verification ---
# Check that both storage directories were populated
local_subdirs = list(storage_1_path.iterdir())
external_subdirs = list(storage_2_path.iterdir())
assert len(local_subdirs) > 0, (
f"Local storage path {storage_1_path} is empty after generation."
)
assert len(external_subdirs) > 0, (
f"External storage path {storage_2_path} is empty after generation."
)
assert len(local_subdirs) == len(external_subdirs), (
f"Mismatch in number of cache entries: "
f"Local={len(local_subdirs)}, External={len(external_subdirs)}"
)
# The subdirectories should correspond to the prompt hashes
# Since prompts are the same, the hash directories should be the same name
local_subdir_names = sorted([d.name for d in local_subdirs])
external_subdir_names = sorted([d.name for d in external_subdirs])
assert local_subdir_names == external_subdir_names, (
"Cache directory names do not match between local and external storage"
)
# Compare the contents of each corresponding cache directory
for subdir_name in local_subdir_names:
print(f"Comparing contents of cache directory: {subdir_name}")
assert _compare_directories(
storage_1_path / subdir_name, storage_2_path / subdir_name
), (
f"Contents differ for cache directory '{subdir_name}' between "
f"{storage_1_path} and {storage_2_path}"
)
events = get_connector_events()
# get_num_new_matched_tokens and update_state_after_alloc will be called
# on each connector in turn.
assert events["storage1-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
assert events["storage1-WORKER"][:5] == [
"register_kv_caches",
"bind_connector_metadata",
"start_load_kv",
"wait_for_layer_load",
"save_kv_layer",
]
assert events["storage2-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
assert events["storage2-WORKER"][:5] == [
"register_kv_caches",
"bind_connector_metadata",
"start_load_kv",
"wait_for_layer_load",
"save_kv_layer",
]
# Reset prefix cache or else we'll just get the tokens back from there.
llm.reset_prefix_cache()
# Run generation again - this should trigger loading from the first
# connector.
_ = llm.generate(PROMPTS, SAMPLING_PARAMS)
events = get_connector_events()
# get_num_new_matched_tokens will return new tokens from the first
# connector so update_state_after_alloc will be with allocated blocks
# on that one but with zero blocks for others (first nonzero match is
# chosen).
assert events["storage1-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[7] 96",
"build_connector_meta",
]
assert events["storage2-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
# Delete storage1 connector state
shutil.rmtree(storage_1_path)
# Reset prefix cache or else we'll just get the tokens back from there.
llm.reset_prefix_cache()
# Run generation again - this should trigger loading from the first
# connector.
_ = llm.generate(PROMPTS, SAMPLING_PARAMS)
events = get_connector_events()
# get_num_new_matched_tokens will be called for both connectors but will
# return 0 from the first connector, but the second connector should have
# a hit, so update_state_after_alloc will only be called with allocated
# blocks for the second connector.
assert events["storage1-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
assert events["storage2-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[7] 96",
"build_connector_meta",
]
# Clean up
shutil.rmtree(storage_1_path)
shutil.rmtree(storage_2_path)
def get_connector_events() -> dict[str, list[str]]:
# Read in connector events and reset the files.
import glob
event_files = glob.glob(tempfile.gettempdir() + "/connector_*_events.log")
connector_events = {}
for fname in event_files:
name = fname.split("connector_")[1].split("_events.log")[0]
try:
with open(fname, "r+") as f:
connector_events[name] = [line.strip() for line in f if line.strip()]
f.truncate(0)
except Exception as e:
print(f"[ERROR] Could not read connector events for {name}: {e}")
return connector_events
def test_engine_id_conflict():
configs = [KVTransferConfig() for _ in range(2)]
ids = [config.engine_id for config in configs]
assert ids[0] != ids[1], (
f"Engine IDs should be different for different configs. Got {ids}"
)
class TestMultiConnectorStats:
"""Tests for MultiConnector stats reconstruction and operations."""
def test_build_kv_connector_stats_with_none(self):
"""Test that build_kv_connector_stats returns empty stats when given None."""
stats = MultiConnector.build_kv_connector_stats(data=None)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 0
assert stats.is_empty()
def test_build_kv_connector_stats_with_empty_dict(self):
"""Test that build_kv_connector_stats returns empty stats with empty dict."""
stats = MultiConnector.build_kv_connector_stats(data={})
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 0
assert stats.is_empty()
def test_build_kv_connector_stats_reconstructs_nixl_stats(self):
"""Test that NixlConnector stats are properly reconstructed with
correct data."""
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5, 2.3],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
}
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert "NixlConnector" in stats.data
nixl_stats = stats.data["NixlConnector"]
assert isinstance(nixl_stats, NixlKVConnectorStats)
assert nixl_stats.data["transfer_duration"] == [1.5, 2.3]
assert nixl_stats.data["post_duration"] == [0.1, 0.2]
assert nixl_stats.data["bytes_transferred"] == [1024, 2048]
assert nixl_stats.data["num_descriptors"] == [10, 20]
def test_build_kv_connector_stats_with_multiple_connectors(self):
"""Test reconstruction with multiple connector types that have custom stats."""
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
"MockConnector": {"data": {"mock_field": [1, 2, 3]}},
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
# Both connectors should be reconstructed
assert len(stats.data) == 2
assert "NixlConnector" in stats.data
assert "MockConnector" in stats.data
assert isinstance(stats.data["NixlConnector"], NixlKVConnectorStats)
assert isinstance(stats.data["MockConnector"], MockConnectorStats)
# Verify data is preserved
assert stats.data["MockConnector"].data == {"mock_field": [1, 2, 3]}
def test_build_kv_connector_stats_raises_error_for_unknown_connector(self):
"""Test that unknown connectors raise an error."""
serialized_data = {
"UnknownConnector": {"data": {"some_field": [1, 2, 3]}},
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
}
with pytest.raises(
ValueError, match="Connector 'UnknownConnector' is not registered."
):
MultiConnector.build_kv_connector_stats(data=serialized_data)
def test_build_kv_connector_stats_with_already_instantiated_objects(self):
"""Test that already-instantiated stats objects are preserved (same process)."""
# This simulates the in-process case where stats are not serialized
nixl_stats = NixlKVConnectorStats(
data={
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
mock_stats = MockConnectorStats(data={"mock_field": [1, 2, 3]})
data_with_objects = {
"NixlConnector": nixl_stats,
"MockConnector": mock_stats,
}
stats = MultiConnector.build_kv_connector_stats(data=data_with_objects)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 2
# Verify objects are preserved as-is
assert stats.data["NixlConnector"] is nixl_stats
assert stats.data["MockConnector"] is mock_stats
def test_build_kv_connector_stats_with_mixed_objects_and_dicts(self):
"""Test handling mixed already-instantiated and serialized stats."""
# This can happen during transition or partial serialization
nixl_stats = NixlKVConnectorStats(
data={
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
mixed_data = {
"NixlConnector": nixl_stats, # Already instantiated
"MockConnector": {"data": {"mock_field": [1, 2, 3]}}, # Serialized
}
stats = MultiConnector.build_kv_connector_stats(data=mixed_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 2
# Instantiated object preserved
assert stats.data["NixlConnector"] is nixl_stats
# Serialized object reconstructed
assert isinstance(stats.data["MockConnector"], MockConnectorStats)
assert stats.data["MockConnector"].data == {"mock_field": [1, 2, 3]}
def test_build_kv_connector_stats_skips_connectors_without_custom_stats(self):
"""Test that connectors without custom stats (return None) are skipped."""
# ExampleConnector doesn't override build_kv_connector_stats,
# so it returns None and should be skipped
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
"ExampleConnector": {"data": {"some_field": [1, 2, 3]}},
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
# Only NixlConnector should be reconstructed
assert len(stats.data) == 1
assert "NixlConnector" in stats.data
assert isinstance(stats.data["NixlConnector"], NixlKVConnectorStats)
# ExampleConnector should be skipped (returns None)
assert "ExampleConnector" not in stats.data
def test_build_kv_connector_stats_handles_malformed_data(self):
"""Test that malformed data raises appropriate errors."""
serialized_data = {
"NixlConnector": {"wrong_field": {"transfer_duration": [1.5]}}
}
with pytest.raises(AssertionError, match="Expected a dict with a 'data' field"):
MultiConnector.build_kv_connector_stats(data=serialized_data)
def test_aggregate_same_connector(self):
"""Test aggregating stats from the same connector type."""
stats1 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
stats2 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [2.0],
"post_duration": [0.2],
"bytes_transferred": [2048],
"num_descriptors": [20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
result = stats1.aggregate(stats2)
assert result is stats1 # Should return self
assert "NixlConnector" in result.data
nixl_stats = result.data["NixlConnector"]
assert nixl_stats.data["transfer_duration"] == [1.0, 2.0]
assert nixl_stats.data["post_duration"] == [0.1, 0.2]
assert nixl_stats.data["bytes_transferred"] == [1024, 2048]
assert nixl_stats.data["num_descriptors"] == [10, 20]
def test_aggregate_new_connector(self):
"""Test aggregating stats when a new connector type appears."""
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
KVConnectorStats,
)
stats1 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
stats2 = MultiKVConnectorStats(
data={"ExampleConnector": KVConnectorStats(data={"field": [1, 2]})}
)
result = stats1.aggregate(stats2)
assert "NixlConnector" in result.data
assert "ExampleConnector" in result.data
def test_reduce(self):
"""Test that reduce() correctly reduces all nested connector stats."""
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0, 2.0],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
reduced = stats.reduce()
assert "NixlConnector" in reduced
assert isinstance(reduced["NixlConnector"], dict)
# Check that the stats were reduced (should have aggregated values)
assert "Num successful transfers" in reduced["NixlConnector"]
assert reduced["NixlConnector"]["Num successful transfers"] == 2
def test_reset(self):
"""Test that reset() resets all nested connector stats."""
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0, 2.0],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
assert not stats.is_empty()
stats.reset()
# After reset, stats should be empty
assert stats.is_empty()
nixl_stats = stats.data["NixlConnector"]
assert len(nixl_stats.data["transfer_duration"]) == 0
def test_is_empty_with_multiple_connectors(self):
"""Test is_empty() returns correct value with multiple connectors."""
# All empty
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(data={}),
}
)
# Initialize empty stats
stats.data["NixlConnector"].reset()
assert stats.is_empty()
# One non-empty
stats.data["NixlConnector"].data["transfer_duration"].append(1.0)
assert not stats.is_empty()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py | tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import pytest
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput
from vllm.v1.request import FinishReason, RequestStatus
from .utils import (
assert_scheduler_empty,
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def test_basic_lifecycle():
"""Test lifecycle of a remote prefill."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
START_FREE_BLOCK_QUEUE_SIZE = (
scheduler.kv_cache_manager.block_pool.free_block_queue.num_free_blocks
)
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1):
# (1a): schedule()
scheduler_output = scheduler.schedule()
# Nothing running and empty scheduler output.
assert len(scheduler.running) == 0
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler_output.num_scheduled_tokens) == 0
assert scheduler_output.total_num_scheduled_tokens == 0
# Req waiting for KVs with no computed/scheduled toks ...
assert len(scheduler.waiting) == 1
assert request in scheduler.waiting
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert request.num_computed_tokens == 0
# ... but should have (uncached) blocks allocated to it.
block_pool = scheduler.kv_cache_manager.block_pool
assert block_pool.free_block_queue.num_free_blocks < START_FREE_BLOCK_QUEUE_SIZE
assert len(block_pool.cached_block_hash_to_block) == 0
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block._block_hash is None
# (1b): forward()
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
# (1c): update_from_output()
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
assert not engine_core_outputs or not engine_core_outputs[0].outputs
# STEP (2):
# (2a): schedule(): nothing happens!
scheduler_output = scheduler.schedule()
assert len(scheduler.waiting) == 1
assert len(scheduler.running) == 0
# (2b): forward(): request finishes recv.
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_recving={request_id}
)
# (2c): update_from_output():
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
assert len(scheduler.waiting) == 1
assert request_id in scheduler.finished_recving_kv_req_ids
# STEP (3):
# (3a): schedule(): this should actually schedule.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
# Confirm the block are actually allocated.
num_hashed_blocks = 0
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block.ref_cnt == 1
num_hashed_blocks += 1 if block._block_hash is not None else 0
assert num_hashed_blocks == NUM_EXTERNAL_FULL_BLOCKS
# Confirm the rest of the prompt is scheduled in this step.
scheduled_req = scheduler_output.scheduled_new_reqs[0]
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[request_id]
num_computed_tokens = scheduled_req.num_computed_tokens
total_prompt_tokens = len(scheduled_req.prompt_token_ids)
assert num_scheduled_tokens == total_prompt_tokens - num_computed_tokens
# (3b): execute_model()
model_runner_output = create_model_runner_output([request])
# (3c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# Step (4): Hit EOS.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output([request], use_eos=True)
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
scheduler.schedule()
outputs = engine_core_outputs[0].outputs
assert len(outputs) == 1
output = outputs[0]
assert output.finish_reason == FinishReason.STOP
assert_scheduler_empty(scheduler)
def test_interleaved_lifecycle():
"""Test Remote Prefills Work Well With Other Requests."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
request_local_a = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
)
request_local_b = create_request(
request_id=3,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
)
# STEP 1: Regular request is running.
scheduler.add_request(request_local_a)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
model_runner_output = create_model_runner_output([request_local_a])
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 2: Add a local and remote request.
scheduler.add_request(request_local_b)
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
assert scheduler_output.scheduled_cached_reqs.num_reqs == 1
model_runner_output = create_model_runner_output([request_local_a, request_local_b])
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 3: continue running, KVs not arrived yet.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
reqs=[request_local_a, request_local_b]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
# STEP 4: KVs arrive.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 5: RECVed KVs are sent to ModelRunner.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 3
assert len(scheduler.waiting) == 0
assert len(scheduler_output.scheduled_new_reqs) == 1
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b, request_remote]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 6: Hit EOS and free.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b, request_remote],
use_eos=True,
)
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler.schedule()
assert_scheduler_empty(scheduler)
def test_no_spurious_prefix_caching():
"""
With P/D, blocks can be allocated but uncomputed for
multiple engine steps. This test confirms that we do
not accidentally have cache hits against uncomputed
blocks.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 and a half full external blocks.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
# Both of these requests have prompts like [1,1,1,1,1, ...]
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
common_prefix_len=NUM_TOKENS,
do_remote_prefill=True,
)
request_local = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
common_prefix_len=NUM_TOKENS,
do_remote_prefill=False,
)
# Schedule the remote prefill request. This should not
# cause any blocks to be cached.
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
assert len(scheduler.waiting) == 1
# Schedule the local prefill request. This should
# cause blocks to be cached, but separately from
scheduler.add_request(request_local)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
local_blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_local.request_id]
remote_blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_remote.request_id]
# Local should have cached blocks (but not all due to preallocate).
num_hashed_blocks = 0
for block in local_blocks:
assert block.ref_cnt == 1
num_hashed_blocks += 1 if block._block_hash is not None else 0
assert num_hashed_blocks > 0
# Remote blocks should not be cached.
for block in remote_blocks:
assert block.ref_cnt == 1
assert block._block_hash is None
def test_full_block_prompt():
"""Test that we handle a prompt that is the full block size."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * NUM_EXTERNAL_FULL_BLOCKS)
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1): Initialize a recv.
scheduler_output = scheduler.schedule()
# All blocks should be allocated.
num_blocks = len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
]
)
assert num_blocks == NUM_EXTERNAL_FULL_BLOCKS
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
scheduler.update_from_output(scheduler_output, model_runner_output)
# # STEP (2): Recv.
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_recving={request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.waiting) == 1
assert request_id in scheduler.finished_recving_kv_req_ids
# # STEP (3): Run as usual.
scheduler_output = scheduler.schedule()
# We need to recompute the final token of the prompt to generate
# the first new token, so we should not have a new block.
num_blocks = len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
]
)
assert num_blocks == NUM_EXTERNAL_FULL_BLOCKS
assert scheduler_output.scheduled_new_reqs[0].num_computed_tokens == NUM_TOKENS - 1
assert scheduler_output.num_scheduled_tokens[request_id] == 1
model_runner_output = create_model_runner_output([request])
scheduler.update_from_output(scheduler_output, model_runner_output)
# # Step (4): Hit EOS.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output([request], use_eos=True)
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
scheduler.schedule()
outputs = engine_core_outputs[0].outputs
assert len(outputs) == 1
output = outputs[0]
assert output.finish_reason == FinishReason.STOP
assert_scheduler_empty(scheduler)
def test_cannot_schedule_after_recv():
"""
Test that we can handle no schedule after recv due to not
enough remaining KV blocks.
"""
# NOTE: the KVCacheManager will use 1 null block.
# So there are 5 total working blocks.
TOTAL_NUM_BLOCKS = 6
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config, num_blocks=TOTAL_NUM_BLOCKS)
# Prime the KVCache.
NUM_PROMPT_BLOCKS = 2
BLOCK_SIZE = vllm_config.cache_config.block_size
# Prompt will use 2 blocks + 1 block after we schedule.
NUM_TOKENS_LOCAL = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
NUM_TOKENS_REMOTE = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS_LOCAL
)
request_remote = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS_REMOTE,
do_remote_prefill=True,
)
# STEP 1: 3 blocks are in use (2 for prompt, 1 for decode).
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 2: 5 blocks are in use (2 new for remote blocks).
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 3: finish recving (5 blocks in use)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 4: try to schedule, remote request is put to running list
# because the transfer is completed.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal, request_remote]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 0
# Step 5: Remote request will be put back to waiting list
# because it needs new block to hold generated token.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 6: finish the request, free it.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 7: now we can schedule (with 2 blocks computed),
# request is retrieved from preempted list.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
assert (
scheduler_output.scheduled_cached_reqs.num_computed_tokens[0]
== NUM_PROMPT_BLOCKS * BLOCK_SIZE
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 8: free everything.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_remote], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
_ = scheduler.schedule()
assert_scheduler_empty(scheduler)
def test_cannot_recv():
"""
Test that we can handle no schedule KV block transfer due to not
enough remaining KV blocks.
"""
# NOTE: the KVCacheManager will use 1 null block.
# So there are 5 total working blocks.
TOTAL_NUM_BLOCKS = 6
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config, num_blocks=TOTAL_NUM_BLOCKS)
# Prime the KVCache.
NUM_PROMPT_BLOCKS = 2
BLOCK_SIZE = vllm_config.cache_config.block_size
# Prompt will use 2 blocks + 1 block after we schedule.
NUM_TOKENS_LOCAL = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
NUM_TOKENS_REMOTE = int(BLOCK_SIZE * (NUM_PROMPT_BLOCKS + 0.5))
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS_LOCAL
)
request_remote = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS_REMOTE,
do_remote_prefill=True,
)
# STEP 1: 3 blocks are in use (2 for prompt, 1 for decode).
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 2: 3 blocks are in use,
# need 3 new for remote blocks but only 2 are available.
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Should not have KV transfer in progress.
assert request_remote.status != RequestStatus.WAITING_FOR_REMOTE_KVS
# Step 3: finish the request, free it.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 4: now we can initiate KV transfer (with 2 blocks computed).
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
assert request_remote.status == RequestStatus.WAITING_FOR_REMOTE_KVS
# Step 5: finish recving (5 blocks in use)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 6: schedule remote request
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 7: free everything.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_remote], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
_ = scheduler.schedule()
assert_scheduler_empty(scheduler)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_cache_pollution_prevention.py | tests/v1/kv_connector/unit/test_cache_pollution_prevention.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
test that invalid blocks are evicted from prefix cache to prevent pollution.
verifies that when sync-loading fails, invalid blocks are removed from the
prefix cache hash table so future requests cannot match and reuse corrupted data.
"""
from collections.abc import Callable
from unittest.mock import Mock
import pytest
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.request import Request, RequestStatus
from .utils import (
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def _make_get_num_new_matched_tokens(
req_num_new_matched_tokens: dict[str, int],
async_load: bool,
) -> Callable[[Request, int], tuple[int, bool]]:
def get_num_new_matched_tokens(request: Request, _: int) -> tuple[int, bool]:
value = req_num_new_matched_tokens.get(request.request_id, 0)
return value, async_load
return get_num_new_matched_tokens
@pytest.fixture
def fail_scheduler():
"""scheduler with kv_load_failure_policy='fail'"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_load_failure_policy = "fail"
return create_scheduler(vllm_config)
def test_invalid_blocks_evicted_prevents_cache_pollution(
fail_scheduler: Scheduler,
):
"""
verify invalid blocks are evicted to prevent future cache hits.
scenario:
1. request 1 loads externally-computed blocks (sync mode)
2. some blocks fail to load and are marked invalid
3. with fail policy, invalid blocks should be evicted from prefix cache
4. request is marked as FINISHED_ERROR
"""
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * fail_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * fail_scheduler.block_size
)
# request 1: will have invalid blocks
request1 = create_request(num_tokens=num_prompt_tokens, request_id=1)
fail_scheduler.add_request(request=request1)
req_num_new_matched_tokens = {
request1.request_id: num_external_computed_tokens,
}
# mock connector indicating sync load
fail_scheduler.connector = Mock()
fail_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, False)
)
fail_scheduler.connector.request_finished.return_value = (False, None)
fail_scheduler.connector.take_events.return_value = ()
scheduler_output = fail_scheduler.schedule()
# request should be running with sync KV load
assert len(fail_scheduler.running) == 1
assert request1.status == RequestStatus.RUNNING
# get allocated block IDs
req_block_ids = scheduler_output.scheduled_new_reqs[0].block_ids[0]
invalid_block_id = req_block_ids[invalid_block_idx]
invalid_block_ids = {invalid_block_id}
# get the block object to verify eviction later
block = fail_scheduler.kv_cache_manager.block_pool.blocks[invalid_block_id]
# cache the blocks to simulate they've been computed and cached
# (in real scenario blocks would be cached after compute)
fail_scheduler.kv_cache_manager.cache_blocks(request1, num_external_computed_tokens)
# verify block has a hash (is cached) before reporting invalid blocks
assert block.block_hash is not None, (
f"block {invalid_block_id} should be cached (have a hash) before "
f"eviction test, but hash is None"
)
# report invalid blocks
model_runner_output = create_model_runner_output(
[request1],
invalid_block_ids=invalid_block_ids,
use_eos=False,
)
fail_scheduler.update_from_output(scheduler_output, model_runner_output)
# verify request finished with error (fail policy)
assert request1.status == RequestStatus.FINISHED_ERROR
# critical assertion: invalid block and all subsequent blocks should be evicted
# all blocks from invalid_block_idx onwards become invalid since they were
# computed based on the failed block
for idx in range(invalid_block_idx, len(req_block_ids)):
block_id = req_block_ids[idx]
block_obj = fail_scheduler.kv_cache_manager.block_pool.blocks[block_id]
assert block_obj.block_hash is None, (
f"block {block_id} at index {idx} should have been evicted "
f"(hash reset to None), but hash is {block_obj.block_hash}. "
f"All blocks from index {invalid_block_idx} onwards should be evicted "
f"since they depend on the invalid block at index {invalid_block_idx}."
)
# verify cache contains exactly the valid blocks (before first affected block)
# and none of the invalid blocks (from first affected block onwards)
# valid blocks: all blocks before invalid_block_idx should be cached
for idx in range(invalid_block_idx):
block_id = req_block_ids[idx]
block_obj = fail_scheduler.kv_cache_manager.block_pool.blocks[block_id]
assert block_obj.block_hash is not None, (
f"valid block {block_id} at index {idx} should still be cached "
f"(have a hash), but hash is None. Only blocks from index "
f"{invalid_block_idx} onwards should be evicted."
)
# invalid blocks: verify they're not in the cached_block_hash_to_block map
cached_blocks = (
fail_scheduler.kv_cache_manager.block_pool.cached_block_hash_to_block
)
cached_block_ids = {
b.block_id
for blocks_val in cached_blocks._cache.values()
for b in (
[blocks_val] if not isinstance(blocks_val, dict) else blocks_val.values()
)
}
for idx in range(invalid_block_idx, len(req_block_ids)):
block_id = req_block_ids[idx]
assert block_id not in cached_block_ids, (
f"invalid block {block_id} at index {idx} should not be in cache hash table"
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_lmcache_connector.py | tests/v1/kv_connector/unit/test_lmcache_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import MagicMock
import pytest
from vllm.distributed.kv_events import BlockStored
from vllm.distributed.kv_transfer.kv_connector.v1.lmcache_connector import (
LMCacheConnectorV1,
LMCacheKVEvents,
)
from vllm.v1.outputs import KVConnectorOutput
@pytest.fixture
def mock_lmcache_engine_event():
"""Create a mock event object that mimics what the lmcache engine returns."""
class MockEvent:
def __init__(
self,
block_hashes,
parent_block_hash,
token_ids,
lora_id,
block_size,
medium,
lora_name,
):
self.block_hashes = block_hashes
self.parent_block_hash = parent_block_hash
self.token_ids = token_ids
self.lora_id = lora_id
self.block_size = block_size
self.medium = medium
self.lora_name = lora_name
return MockEvent(
block_hashes=["hash1", "hash2"],
parent_block_hash="parent_hash",
token_ids=[1, 2, 3, 4],
lora_id=None,
block_size=16,
medium="GPU",
lora_name=None,
)
@pytest.fixture
def mock_connector():
"""Create a mock LMCacheConnectorV1 instance with mocked dependencies."""
connector = MagicMock(spec=LMCacheConnectorV1)
connector._kv_cache_events = None
connector._lmcache_engine = MagicMock()
# Make the methods use the real implementation
connector.get_kv_connector_kv_cache_events = (
LMCacheConnectorV1.get_kv_connector_kv_cache_events.__get__(
connector, LMCacheConnectorV1
)
)
connector.update_connector_output = (
LMCacheConnectorV1.update_connector_output.__get__(
connector, LMCacheConnectorV1
)
)
connector.take_events = LMCacheConnectorV1.take_events.__get__(
connector, LMCacheConnectorV1
)
return connector
class TestGetKVConnectorKVCacheEvents:
"""Test get_kv_connector_kv_cache_events method."""
def test_returns_none_when_no_events(self, mock_connector):
"""Test that None is returned when lmcache engine has no events."""
mock_connector._lmcache_engine.get_kv_events.return_value = None
result = mock_connector.get_kv_connector_kv_cache_events()
assert result is None
mock_connector._lmcache_engine.get_kv_events.assert_called_once()
def test_returns_none_when_empty_list(self, mock_connector):
"""Test that None is returned when lmcache engine returns empty list."""
mock_connector._lmcache_engine.get_kv_events.return_value = []
result = mock_connector.get_kv_connector_kv_cache_events()
assert result is None
def test_converts_single_event(self, mock_connector, mock_lmcache_engine_event):
"""Test conversion of a single event from lmcache engine format."""
mock_connector._lmcache_engine.get_kv_events.return_value = [
mock_lmcache_engine_event
]
result = mock_connector.get_kv_connector_kv_cache_events()
assert result is not None
assert isinstance(result, LMCacheKVEvents)
assert result.get_number_of_workers() == 1
events = result.get_all_events()
assert len(events) == 1
assert isinstance(events[0], BlockStored)
assert events[0].block_hashes == ["hash1", "hash2"]
assert events[0].parent_block_hash == "parent_hash"
assert events[0].token_ids == [1, 2, 3, 4]
assert events[0].lora_id is None
assert events[0].block_size == 16
assert events[0].medium == "GPU"
assert events[0].lora_name is None
def test_converts_multiple_events(self, mock_connector):
"""Test conversion of multiple events from lmcache engine format."""
class MockEvent:
def __init__(self, i):
self.block_hashes = [f"hash{i}"]
self.parent_block_hash = f"parent{i}"
self.token_ids = [i]
self.lora_id = None
self.block_size = 16
self.medium = "GPU"
self.lora_name = None
events = [MockEvent(i) for i in range(5)]
mock_connector._lmcache_engine.get_kv_events.return_value = events
result = mock_connector.get_kv_connector_kv_cache_events()
assert result is not None
assert isinstance(result, LMCacheKVEvents)
converted_events = result.get_all_events()
assert len(converted_events) == 5
for i, event in enumerate(converted_events):
assert isinstance(event, BlockStored)
assert event.block_hashes == [f"hash{i}"]
assert event.parent_block_hash == f"parent{i}"
assert event.token_ids == [i]
def test_preserves_event_attributes(self, mock_connector):
"""Test that all event attributes are correctly preserved."""
class MockEventWithLora:
def __init__(self):
self.block_hashes = ["hash_a", "hash_b", "hash_c"]
self.parent_block_hash = "parent_xyz"
self.token_ids = [100, 200, 300]
self.lora_id = 42
self.block_size = 32
self.medium = "DISK"
self.lora_name = "lora_example"
mock_connector._lmcache_engine.get_kv_events.return_value = [
MockEventWithLora()
]
result = mock_connector.get_kv_connector_kv_cache_events()
events = result.get_all_events()
event = events[0]
assert event.block_hashes == ["hash_a", "hash_b", "hash_c"]
assert event.parent_block_hash == "parent_xyz"
assert event.token_ids == [100, 200, 300]
assert event.lora_id == 42
assert event.block_size == 32
assert event.medium == "DISK"
assert event.lora_name == "lora_example"
def test_handles_none_parent_block_hash(self, mock_connector):
"""Test handling of events with None parent_block_hash."""
class MockEventNoParent:
def __init__(self):
self.block_hashes = ["hash1"]
self.parent_block_hash = None
self.token_ids = [1, 2]
self.lora_id = None
self.block_size = 16
self.medium = "GPU"
self.lora_name = None
mock_connector._lmcache_engine.get_kv_events.return_value = [
MockEventNoParent()
]
result = mock_connector.get_kv_connector_kv_cache_events()
events = result.get_all_events()
assert events[0].parent_block_hash is None
class TestUpdateConnectorOutput:
"""Test update_connector_output method."""
def test_does_nothing_when_kv_cache_events_is_none(self, mock_connector):
"""Test that method returns early when kv_cache_events is None."""
connector_output = KVConnectorOutput(kv_cache_events=None)
mock_connector.update_connector_output(connector_output)
assert mock_connector._kv_cache_events is None
def test_does_nothing_when_kv_cache_events_is_not_lmcache_kv_events(
self, mock_connector
):
"""Test that method returns early when kv_cache_events is not
LMCacheKVEvents."""
# Create a mock object that is not LMCacheKVEvents
fake_events = MagicMock()
connector_output = KVConnectorOutput(kv_cache_events=fake_events)
mock_connector.update_connector_output(connector_output)
assert mock_connector._kv_cache_events is None
def test_sets_kv_cache_events_when_none(self, mock_connector):
"""Test that _kv_cache_events is set when it was None."""
kv_events = LMCacheKVEvents(num_workers=1)
event = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1, 2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
kv_events.add_events([event])
connector_output = KVConnectorOutput(kv_cache_events=kv_events)
mock_connector.update_connector_output(connector_output)
assert mock_connector._kv_cache_events is kv_events
def test_adds_events_when_kv_cache_events_already_exists(self, mock_connector):
"""Test that events are added when _kv_cache_events already exists."""
# Set up existing events
existing_events = LMCacheKVEvents(num_workers=2)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
existing_events.add_events([event1])
existing_events.add_events([event1]) # Simulate 2 workers reporting
mock_connector._kv_cache_events = existing_events
# Create new events to add
new_events = LMCacheKVEvents(num_workers=1)
event2 = BlockStored(
block_hashes=["hash2"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
new_events.add_events([event2])
connector_output = KVConnectorOutput(kv_cache_events=new_events)
mock_connector.update_connector_output(connector_output)
# Check that events were added
all_events = mock_connector._kv_cache_events.get_all_events()
assert len(all_events) == 3 # 2 from existing + 1 from new
assert event1 in all_events
assert event2 in all_events
def test_increments_workers_when_kv_cache_events_already_exists(
self, mock_connector
):
"""Test that worker count is incremented correctly."""
# Set up existing events with 2 workers
existing_events = LMCacheKVEvents(num_workers=2)
mock_connector._kv_cache_events = existing_events
# Create new events from 3 workers
new_events = LMCacheKVEvents(num_workers=3)
event = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
new_events.add_events([event])
connector_output = KVConnectorOutput(kv_cache_events=new_events)
mock_connector.update_connector_output(connector_output)
# Worker count should be 2 + 3 = 5
assert mock_connector._kv_cache_events.get_number_of_workers() == 5
def test_multiple_updates(self, mock_connector):
"""Test multiple consecutive updates."""
# First update
events1 = LMCacheKVEvents(num_workers=1)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
events1.add_events([event1])
output1 = KVConnectorOutput(kv_cache_events=events1)
mock_connector.update_connector_output(output1)
# Second update
events2 = LMCacheKVEvents(num_workers=2)
event2 = BlockStored(
block_hashes=["hash2"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
events2.add_events([event2])
output2 = KVConnectorOutput(kv_cache_events=events2)
mock_connector.update_connector_output(output2)
# Third update
events3 = LMCacheKVEvents(num_workers=1)
event3 = BlockStored(
block_hashes=["hash3"],
parent_block_hash=None,
token_ids=[3],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
events3.add_events([event3])
output3 = KVConnectorOutput(kv_cache_events=events3)
mock_connector.update_connector_output(output3)
# Check final state
all_events = mock_connector._kv_cache_events.get_all_events()
assert len(all_events) == 3
assert mock_connector._kv_cache_events.get_number_of_workers() == 4 # 1+2+1
def test_updates_with_empty_events(self, mock_connector):
"""Test updating with empty event lists."""
# First update with actual events
events1 = LMCacheKVEvents(num_workers=1)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
events1.add_events([event1])
output1 = KVConnectorOutput(kv_cache_events=events1)
mock_connector.update_connector_output(output1)
# Second update with empty events
events2 = LMCacheKVEvents(num_workers=2)
# No events added
output2 = KVConnectorOutput(kv_cache_events=events2)
mock_connector.update_connector_output(output2)
# Should still have the original event
all_events = mock_connector._kv_cache_events.get_all_events()
assert len(all_events) == 1
assert mock_connector._kv_cache_events.get_number_of_workers() == 3
class TestTakeEvents:
"""Test take_events method."""
def test_yields_nothing_when_kv_cache_events_is_none(self, mock_connector):
"""Test that nothing is yielded when _kv_cache_events is None."""
mock_connector._kv_cache_events = None
events = list(mock_connector.take_events())
assert events == []
def test_yields_events_and_clears(self, mock_connector):
"""Test that events are yielded and then cleared."""
# Set up events
kv_events = LMCacheKVEvents(num_workers=1)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
event2 = BlockStored(
block_hashes=["hash2"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
kv_events.add_events([event1, event2])
mock_connector._kv_cache_events = kv_events
# Take events
events = list(mock_connector.take_events())
# Check that events were yielded
assert len(events) == 2
assert event1 in events
assert event2 in events
# Check that _kv_cache_events was cleared
assert mock_connector._kv_cache_events is None
def test_aggregates_before_yielding(self, mock_connector):
"""Test that events are aggregated before yielding."""
# Set up events from multiple workers
kv_events = LMCacheKVEvents(num_workers=3)
common_event = BlockStored(
block_hashes=["hash_common"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
uncommon_event = BlockStored(
block_hashes=["hash_uncommon"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
# All 3 workers report common_event
kv_events.add_events([common_event])
kv_events.add_events([common_event])
kv_events.add_events([common_event])
# Only 1 worker reports uncommon_event
kv_events.add_events([uncommon_event])
mock_connector._kv_cache_events = kv_events
# Take events
events = list(mock_connector.take_events())
# Only the common event should be yielded
assert len(events) == 1
assert events[0] == common_event
def test_multiple_take_events_calls(self, mock_connector):
"""Test calling take_events multiple times."""
# First call with events
kv_events1 = LMCacheKVEvents(num_workers=1)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
kv_events1.add_events([event1])
mock_connector._kv_cache_events = kv_events1
events1 = list(mock_connector.take_events())
assert len(events1) == 1
assert events1[0] == event1
assert mock_connector._kv_cache_events is None
# Second call with no events
events2 = list(mock_connector.take_events())
assert events2 == []
# Third call after adding new events
kv_events2 = LMCacheKVEvents(num_workers=1)
event2 = BlockStored(
block_hashes=["hash2"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
kv_events2.add_events([event2])
mock_connector._kv_cache_events = kv_events2
events3 = list(mock_connector.take_events())
assert len(events3) == 1
assert events3[0] == event2
def test_yields_empty_after_aggregation_removes_all(self, mock_connector):
"""Test that nothing is yielded if aggregation removes all events."""
# Set up events from 2 workers with no common events
kv_events = LMCacheKVEvents(num_workers=2)
event1 = BlockStored(
block_hashes=["hash1"],
parent_block_hash=None,
token_ids=[1],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
event2 = BlockStored(
block_hashes=["hash2"],
parent_block_hash=None,
token_ids=[2],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
# Worker 1 reports event1
kv_events.add_events([event1])
# Worker 2 reports event2
kv_events.add_events([event2])
mock_connector._kv_cache_events = kv_events
# Take events
events = list(mock_connector.take_events())
# No common events, so nothing should be yielded
assert events == []
assert mock_connector._kv_cache_events is None
class TestIntegrationScenarios:
"""Test integration scenarios."""
def test_full_workflow(self, mock_connector, mock_lmcache_engine_event):
"""Test a complete workflow from getting events to taking them."""
# Step 1: Get events from lmcache engine
mock_connector._lmcache_engine.get_kv_events.return_value = [
mock_lmcache_engine_event
]
kv_events = mock_connector.get_kv_connector_kv_cache_events()
assert kv_events is not None
assert len(kv_events.get_all_events()) == 1
# Step 2: Update connector output (simulate receiving from worker)
output1 = KVConnectorOutput(kv_cache_events=kv_events)
mock_connector.update_connector_output(output1)
assert mock_connector._kv_cache_events is not None
# Step 3: Take events
taken_events = list(mock_connector.take_events())
assert len(taken_events) == 1
assert mock_connector._kv_cache_events is None
def test_multiple_workers_workflow(self, mock_connector):
"""Test workflow with multiple workers."""
class MockEvent:
def __init__(self, hash_val):
self.block_hashes = [hash_val]
self.parent_block_hash = None
self.token_ids = [1]
self.lora_id = None
self.block_size = 16
self.medium = "GPU"
self.lora_name = None
# Worker 1
mock_connector._lmcache_engine.get_kv_events.return_value = [
MockEvent("hash_common"),
MockEvent("hash_worker1"),
]
kv_events1 = mock_connector.get_kv_connector_kv_cache_events()
output1 = KVConnectorOutput(kv_cache_events=kv_events1)
mock_connector.update_connector_output(output1)
# Worker 2
mock_connector._lmcache_engine.get_kv_events.return_value = [
MockEvent("hash_common"),
MockEvent("hash_worker2"),
]
kv_events2 = mock_connector.get_kv_connector_kv_cache_events()
output2 = KVConnectorOutput(kv_cache_events=kv_events2)
mock_connector.update_connector_output(output2)
# Take events (should only get common events)
taken_events = list(mock_connector.take_events())
# With aggregation, only events reported by both workers should be present
# In this case, hash_common was reported by both
event_hashes = [e.block_hashes[0] for e in taken_events]
assert "hash_common" in event_hashes
def test_empty_workflow(self, mock_connector):
"""Test workflow when there are no events at any stage."""
# Get events returns None
mock_connector._lmcache_engine.get_kv_events.return_value = None
kv_events = mock_connector.get_kv_connector_kv_cache_events()
assert kv_events is None
# Update with None
output = KVConnectorOutput(kv_cache_events=None)
mock_connector.update_connector_output(output)
# Take events
taken_events = list(mock_connector.take_events())
assert taken_events == []
assert mock_connector._kv_cache_events is None
def test_repeated_cycles(self, mock_connector):
"""Test multiple cycles of the complete workflow."""
class MockEvent:
def __init__(self, cycle_num):
self.block_hashes = [f"hash_cycle_{cycle_num}"]
self.parent_block_hash = None
self.token_ids = [cycle_num]
self.lora_id = None
self.block_size = 16
self.medium = "GPU"
self.lora_name = None
for cycle in range(3):
# Get events
mock_connector._lmcache_engine.get_kv_events.return_value = [
MockEvent(cycle)
]
kv_events = mock_connector.get_kv_connector_kv_cache_events()
# Update
output = KVConnectorOutput(kv_cache_events=kv_events)
mock_connector.update_connector_output(output)
# Take
taken_events = list(mock_connector.take_events())
# Verify
assert len(taken_events) == 1
assert taken_events[0].block_hashes[0] == f"hash_cycle_{cycle}"
assert mock_connector._kv_cache_events is None
def test_lmcache_kv_events_aggregation(self):
"""
Test LMCacheKVEvents aggregation across TP ranks using
KVOutputAggregator (used by MultiprocExecutor).
"""
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
from vllm.v1.outputs import ModelRunnerOutput
# Create KVOutputAggregator for 3 workers (simulating TP=3)
aggregator = KVOutputAggregator(expected_finished_count=3)
# Define common and unique events
common_event = BlockStored(
block_hashes=["hash_common"],
parent_block_hash="parent_common",
token_ids=[1, 2, 3],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
worker1_unique_event = BlockStored(
block_hashes=["hash_worker1"],
parent_block_hash="parent_w1",
token_ids=[4, 5],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
worker2_unique_event = BlockStored(
block_hashes=["hash_worker2"],
parent_block_hash="parent_w2",
token_ids=[6, 7],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
worker3_unique_event = BlockStored(
block_hashes=["hash_worker3"],
parent_block_hash="parent_w3",
token_ids=[8, 9],
block_size=16,
lora_id=None,
medium="GPU",
lora_name=None,
)
# Create events for each worker
# Worker 0: reports common event and its unique event
worker0_events = LMCacheKVEvents(num_workers=1)
worker0_events.add_events([common_event, worker1_unique_event])
# Worker 1: reports common event and its unique event
worker1_events = LMCacheKVEvents(num_workers=1)
worker1_events.add_events([common_event, worker2_unique_event])
# Worker 2: reports common event and its unique event
worker2_events = LMCacheKVEvents(num_workers=1)
worker2_events.add_events([common_event, worker3_unique_event])
# Create ModelRunnerOutput instances for each worker
worker_outputs = []
for i, worker_events in enumerate(
[worker0_events, worker1_events, worker2_events]
):
output = ModelRunnerOutput(
req_ids=[f"req_{i}"],
req_id_to_index={f"req_{i}": 0},
sampled_token_ids=[[123]], # dummy token
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=set([f"req_{i}_send"])
if i < 2
else None, # Workers 0,1 finished sending
finished_recving=set([f"req_{i}_recv"])
if i > 0
else None, # Workers 1,2 finished receiving
kv_cache_events=worker_events,
),
)
worker_outputs.append(output)
# Use the real aggregation mechanism (like MultiprocExecutor.execute_model)
aggregated_output = aggregator.aggregate(worker_outputs, output_rank=0)
kv_cache_events = aggregated_output.kv_connector_output.kv_cache_events
assert isinstance(kv_cache_events, LMCacheKVEvents)
# After aggregation, events should be combined from all workers
# The aggregator doesn't automatically aggregate events, so we need to call
# aggregate() to get only common events
kv_cache_events.aggregate()
aggregated_events = kv_cache_events.get_all_events()
# Only the common event should remain after aggregation
# because it's the only event reported by all 3 workers
assert len(aggregated_events) == 1
assert aggregated_events[0] == common_event
# Verify the common event properties
assert aggregated_events[0].block_hashes == ["hash_common"]
assert aggregated_events[0].parent_block_hash == "parent_common"
assert aggregated_events[0].token_ids == [1, 2, 3]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_kv_connector_lifecyle.py | tests/v1/kv_connector/unit/test_kv_connector_lifecyle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import ( # noqa: E501
ExampleConnectorMetadata,
)
from vllm.distributed.kv_transfer.kv_transfer_state import (
ensure_kv_transfer_initialized,
get_kv_transfer_group,
)
from vllm.v1.core.sched.output import CachedRequestData, SchedulerOutput
from vllm.v1.worker.kv_connector_model_runner_mixin import KVConnectorModelRunnerMixin
# Importing utils registers TestExampleConnector with the factory
from .utils import create_vllm_config
def _make_empty_scheduler_output():
return SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens={},
total_num_scheduled_tokens=0,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
kv_connector_metadata=ExampleConnectorMetadata(),
)
def test_kv_connector_mixin_clears_metadata():
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "TestExampleConnector"
vllm_config.kv_transfer_config.kv_role = "kv_both"
vllm_config.kv_transfer_config.kv_connector_extra_config["name"] = "unit"
# Initialize the global connector instance
ensure_kv_transfer_initialized(vllm_config)
try:
# Minimal scheduler output with empty metadata; mixin should still
# bind/clear metadata even if no loads happen
scheduler_output = _make_empty_scheduler_output()
# Invoke the no-forward path which uses the mixin context manager
KVConnectorModelRunnerMixin.kv_connector_no_forward(
scheduler_output, vllm_config
)
# Verify clear_connector_metadata was called on the connector
connector = get_kv_transfer_group()
assert connector._connector_metadata is None
# Test connector wrapper records method calls
assert connector.call_record.get("bind_connector_metadata", 0) == 1
assert connector.call_record.get("clear_connector_metadata", 0) == 1
finally:
# Ensure we clean up the global connector between tests
KVConnectorModelRunnerMixin.ensure_kv_transfer_shutdown()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_error_propagation.py | tests/v1/kv_connector/unit/test_error_propagation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from unittest.mock import Mock
import pytest
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.request import FinishReason, Request, RequestStatus
from .utils import (
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def _make_get_num_new_matched_tokens(
req_num_new_matched_tokens: dict[str, int],
async_load: bool,
) -> Callable[[Request, int], tuple[int, bool]]:
def get_num_new_matched_tokens(request: Request, _: int) -> tuple[int, bool]:
value = req_num_new_matched_tokens.get(request.request_id, 0)
return value, async_load
return get_num_new_matched_tokens
@pytest.fixture
def fail_scheduler():
"""scheduler with kv_load_failure_policy='fail'"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_load_failure_policy = "fail"
return create_scheduler(vllm_config)
def test_error_propagation_sync_load(fail_scheduler: Scheduler):
"""test invalid_block_ids with fail policy -> FINISHED_ERROR (sync load)"""
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * fail_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * fail_scheduler.block_size
)
request = create_request(num_tokens=num_prompt_tokens)
fail_scheduler.add_request(request=request)
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
fail_scheduler.connector = Mock()
fail_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, False)
)
fail_scheduler.connector.request_finished.return_value = (False, None)
fail_scheduler.connector.take_events.return_value = ()
scheduler_output = fail_scheduler.schedule()
assert len(fail_scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
assert fail_scheduler.connector.get_num_new_matched_tokens.call_count == 1
req_block_ids = scheduler_output.scheduled_new_reqs[0].block_ids[0]
invalid_block_ids = {req_block_ids[invalid_block_idx]}
model_runner_output = create_model_runner_output(
[request],
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
outputs = fail_scheduler.update_from_output(scheduler_output, model_runner_output)
assert request.status == RequestStatus.FINISHED_ERROR
assert request.get_finished_reason() == FinishReason.ERROR
assert len(outputs) == 1
engine_outputs = next(iter(outputs.values()))
assert len(engine_outputs.outputs) == 1
output = engine_outputs.outputs[0]
assert output.request_id == request.request_id
assert output.finish_reason == FinishReason.ERROR
assert len(fail_scheduler.running) == 0
def test_error_propagation_async_load(fail_scheduler: Scheduler):
"""test invalid_block_ids with fail policy -> FINISHED_ERROR (async load)"""
num_prompt_blocks = 100
num_external_computed_blocks = 99
invalid_block_idx = 50
num_prompt_tokens = num_prompt_blocks * fail_scheduler.block_size
num_external_computed_tokens = (
num_external_computed_blocks * fail_scheduler.block_size
)
request = create_request(num_tokens=num_prompt_tokens)
fail_scheduler.add_request(request=request)
req_num_new_matched_tokens = {
request.request_id: num_external_computed_tokens,
}
fail_scheduler.connector = Mock()
fail_scheduler.connector.get_num_new_matched_tokens.side_effect = (
_make_get_num_new_matched_tokens(req_num_new_matched_tokens, True)
)
fail_scheduler.connector.request_finished.return_value = (False, None)
fail_scheduler.connector.take_events.return_value = ()
scheduler_output = fail_scheduler.schedule()
assert len(fail_scheduler.waiting) == 1
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert request.num_computed_tokens == 0
(req_block_ids,) = fail_scheduler.kv_cache_manager.get_block_ids(request.request_id)
invalid_block_ids = {req_block_ids[invalid_block_idx]}
model_runner_output = create_model_runner_output(
reqs=[],
finished_recving=set(),
invalid_block_ids=invalid_block_ids,
use_eos=True,
)
outputs = fail_scheduler.update_from_output(scheduler_output, model_runner_output)
assert request.status == RequestStatus.FINISHED_ERROR
assert request.get_finished_reason() == FinishReason.ERROR
assert len(outputs) == 1
engine_outputs = next(iter(outputs.values()))
assert len(engine_outputs.outputs) == 1
output = engine_outputs.outputs[0]
assert output.request_id == request.request_id
assert output.finish_reason == FinishReason.ERROR
assert len(fail_scheduler.waiting) == 0
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_output_aggregator.py | tests/v1/kv_connector/unit/test_output_aggregator.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
pytestmark = pytest.mark.cpu_test
class DummyModelRunnerOutput(ModelRunnerOutput):
def __init__(
self,
finished_sending: set[str] | None = None,
finished_recving: set[str] | None = None,
invalid_block_ids: set[int] | None = None,
expected_finished_count: int = 0,
):
self.kv_connector_output = KVConnectorOutput(
finished_sending=finished_sending,
finished_recving=finished_recving,
invalid_block_ids=invalid_block_ids or set(),
expected_finished_count=expected_finished_count,
)
def __repr__(self):
return (
f"DummyModelRunnerOutput("
f"finished_sending={self.kv_connector_output.finished_sending},"
f"finished_recving={self.kv_connector_output.finished_recving})"
f"invalid_block_ids={self.kv_connector_output.invalid_block_ids})"
)
def test_aggregate_workers_output():
aggregator = KVOutputAggregator(expected_finished_count=2)
output1 = DummyModelRunnerOutput()
output2 = DummyModelRunnerOutput()
aggregated = aggregator.aggregate([output1, output2])
assert aggregated is output1
aggregated = aggregated.kv_connector_output
assert aggregated.finished_sending is None
assert aggregated.finished_recving is None
assert not aggregated.invalid_block_ids
output1 = DummyModelRunnerOutput(
finished_sending={"req1"}, finished_recving={"req2"}
)
output2 = DummyModelRunnerOutput(invalid_block_ids={1})
aggregated = aggregator.aggregate([output1, output2])
assert aggregated is output1
aggregated = aggregated.kv_connector_output
assert aggregated.finished_sending is None
assert aggregated.finished_recving is None
assert aggregated.invalid_block_ids == {1}
output1 = DummyModelRunnerOutput(invalid_block_ids={2})
output2 = DummyModelRunnerOutput(finished_sending={"req1"})
aggregated = aggregator.aggregate([output1, output2])
assert aggregated is output1
aggregated = aggregated.kv_connector_output
assert aggregated.finished_sending == {"req1"}
assert aggregated.finished_recving is None
assert aggregated.invalid_block_ids == {2}
output1 = DummyModelRunnerOutput(invalid_block_ids={3, 4})
output2 = DummyModelRunnerOutput(
finished_recving={"req2"}, invalid_block_ids={4, 5}
)
aggregated = aggregator.aggregate([output1, output2])
assert aggregated is output1
aggregated = aggregated.kv_connector_output
assert aggregated.finished_sending is None
assert aggregated.finished_recving == {"req2"}
assert aggregated.invalid_block_ids == {3, 4, 5}
def test_aggregate_workers_output_with_expected_finished_count():
# We create the aggregator expecting to collect from 4 workers
aggregator = KVOutputAggregator(expected_finished_count=4)
assert aggregator._expected_finished_count == 4
# Some request with default expected finished requests
output1 = DummyModelRunnerOutput(finished_sending={"req1"})
aggregated = aggregator.aggregate([output1])
# still expecting to collect from 4 workers
assert aggregator._send_remaining_count["req1"] == 3
assert not aggregated.kv_connector_output.finished_sending
assert not aggregated.kv_connector_output.finished_recving
# Workers discover and find that in this setup they only need to
# collect from 2
output1 = DummyModelRunnerOutput(
finished_sending={"req1"}, expected_finished_count=2
)
output2 = DummyModelRunnerOutput(
finished_recving={"req2"}, expected_finished_count=2
)
output3 = DummyModelRunnerOutput(finished_recving={"req2"})
# Req2 only needs 2 acks
aggregated = aggregator.aggregate([output1, output2, output3])
assert aggregated.kv_connector_output.expected_finished_count == 2
assert not aggregated.kv_connector_output.finished_sending
# Req2 is finished
assert "req2" not in aggregator._recv_remaining_count
assert aggregated.kv_connector_output.finished_recving == {"req2"}
# Req1 is still waiting for 2 more acks (expected_finished_count has no effect)
# NOTE: This is to showcase dynamic update. Workers are responsible for
# ensuring "req1" termination in this case
assert aggregator._send_remaining_count["req1"] == 2
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/utils.py | tests/v1/kv_connector/unit/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import tempfile
from collections import defaultdict
from collections.abc import Callable
from dataclasses import dataclass
from itertools import chain, count
from typing import Any
import torch
from vllm import SamplingParams
from vllm.config import (
AttentionConfig,
CacheConfig,
DeviceConfig,
KVTransferConfig,
ModelConfig,
SchedulerConfig,
VllmConfig,
)
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import ( # noqa
ExampleConnector,
)
from vllm.utils.hashing import sha256
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.core.kv_cache_utils import get_request_block_hasher, init_none_hash
from vllm.v1.core.sched.scheduler import Scheduler, SchedulerOutput
from vllm.v1.kv_cache_interface import (
FullAttentionSpec,
KVCacheConfig,
KVCacheGroupSpec,
)
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
from vllm.v1.request import Request
from vllm.v1.structured_output import StructuredOutputManager
EOS_TOKEN_ID = 50256
def assert_scheduler_empty(scheduler: Scheduler):
"""Confirm the scheduler is "empty" - i.e. no leaks."""
# Scheduler Metadata.
assert len(scheduler.requests) == 0
assert len(scheduler.waiting) == 0
assert len(scheduler.running) == 0
assert len(scheduler.finished_req_ids) == 0
assert len(scheduler.finished_recving_kv_req_ids) == 0
# EncoderCacheManager.
assert len(scheduler.encoder_cache_manager.freed) == 0
assert len(scheduler.encoder_cache_manager.cached) == 0
# KVCache Manager.
assert (
len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks
)
== 0
)
assert (
len(
scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].num_cached_block
)
== 0
)
num_free_blocks = (
scheduler.kv_cache_manager.block_pool.free_block_queue.num_free_blocks
)
assert num_free_blocks == (scheduler.kv_cache_manager.block_pool.num_gpu_blocks - 1)
# NOTE(rob): just the ref count on blocks will be 0. The hash
# value, etc will remain since we lazily evict for prefix cache.
for block in scheduler.kv_cache_manager.block_pool.blocks:
assert block.ref_cnt == 0
def create_vllm_config(
model: str = "facebook/opt-125m",
max_num_seqs: int = 16,
max_num_batched_tokens: int = 64,
block_size: int = 16,
max_model_len: int = 10000,
enable_chunked_prefill: bool = True,
enable_permute_local_kv: bool = False,
kv_connector_extra_config: dict[str, Any] | None = None,
dtype: str = "float16",
cache_dtype: str = "auto",
hf_overrides: dict[str, Any] | None = None,
attention_backend: str | None = None,
) -> VllmConfig:
"""Initialize VllmConfig For Testing."""
model_config = ModelConfig(
model=model,
trust_remote_code=True,
dtype=dtype,
seed=42,
hf_overrides=hf_overrides or {},
)
scheduler_config = SchedulerConfig(
max_num_seqs=max_num_seqs,
max_num_batched_tokens=max_num_batched_tokens,
max_model_len=max_model_len,
enable_chunked_prefill=enable_chunked_prefill,
is_encoder_decoder=model_config.is_encoder_decoder,
)
# Cache config, optionally force APC
cache_config = CacheConfig(
block_size=block_size,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype=cache_dtype,
enable_prefix_caching=True,
)
kv_transfer_config = KVTransferConfig(
kv_connector="NixlConnector",
kv_role="kv_both",
enable_permute_local_kv=enable_permute_local_kv,
kv_connector_extra_config=kv_connector_extra_config or {},
)
attention_config = AttentionConfig(backend=attention_backend)
return VllmConfig(
scheduler_config=scheduler_config,
model_config=model_config,
cache_config=cache_config,
kv_transfer_config=kv_transfer_config,
device_config=DeviceConfig("cpu"),
attention_config=attention_config,
)
def create_scheduler(
vllm_config: VllmConfig,
num_blocks: int = 10000,
) -> Scheduler:
"""Initialize Scheduler For Testing."""
block_size = vllm_config.cache_config.block_size
kv_cache_config = KVCacheConfig(
num_blocks=num_blocks, # A large number of blocks to hold all requests
kv_cache_tensors=[],
kv_cache_groups=[
KVCacheGroupSpec(
["layer"], FullAttentionSpec(block_size, 1, 1, torch.float32, False)
)
],
)
vllm_config.cache_config.num_gpu_blocks = num_blocks
return Scheduler(
vllm_config=vllm_config,
kv_cache_config=kv_cache_config,
log_stats=True,
structured_output_manager=StructuredOutputManager(vllm_config),
block_size=block_size,
)
_request_count = count(1)
_none_hash_initialized = False
def create_request(
request_id: int | None = None,
num_tokens: int = 10,
common_prefix_len=0,
max_tokens: int = 16,
do_remote_decode: bool = False,
do_remote_prefill: bool = False,
num_remote_blocks: int = 3,
block_size: int = 16,
hash_fn: Callable = sha256,
) -> Request:
"""Make dummy request for testing."""
assert num_tokens >= common_prefix_len >= 0
if request_id is None:
request_id = next(_request_count)
global _none_hash_initialized
if not _none_hash_initialized:
init_none_hash(hash_fn)
_none_hash_initialized = True
kv_transfer_params: dict[str, Any] | None = None
if do_remote_decode:
assert not do_remote_prefill
kv_transfer_params = dict(do_remote_prefill=False, do_remote_decode=True)
elif do_remote_prefill:
kv_transfer_params = dict(
do_remote_prefill=True,
do_remote_decode=False,
remote_engine_id="my-engine-id",
remote_request_id=f"prefill-{request_id}",
remote_block_ids=list(range(num_remote_blocks)),
remote_host="my-host",
remote_port=1234,
)
max_tokens = 1 if do_remote_decode else max_tokens
sampling_params = SamplingParams(max_tokens=max_tokens)
common_prefix = [1] * common_prefix_len if common_prefix_len > 0 else []
suffix = [i * request_id for i in range(num_tokens - common_prefix_len)]
prompt_token_ids = common_prefix + suffix
req = Request(
request_id=f"id-{request_id}",
prompt_token_ids=prompt_token_ids,
sampling_params=sampling_params,
pooling_params=None,
mm_features=None,
eos_token_id=EOS_TOKEN_ID,
block_hasher=get_request_block_hasher(block_size, hash_fn),
)
req.kv_transfer_params = kv_transfer_params
return req
def create_model_runner_output(
reqs: list[Request],
finished_sending: set[str] | None = None,
finished_recving: set[str] | None = None,
invalid_block_ids: set[int] | None = None,
use_eos: bool = False,
token_id: int = 0,
) -> ModelRunnerOutput:
"""Make dummy model runner output for testing."""
# Make request data.
req_ids = [req.request_id for req in reqs]
req_id_to_index = {req_id: idx for idx, req_id in enumerate(req_ids)}
# Make sampled tokens.
sampled_token = EOS_TOKEN_ID if use_eos else token_id
sampled_token_ids = [[sampled_token] for _ in req_ids]
kv_connector_output = (
None
if (
finished_sending is None
and finished_recving is None
and invalid_block_ids is None
)
else KVConnectorOutput(
finished_sending=finished_sending,
finished_recving=finished_recving,
invalid_block_ids=invalid_block_ids or set(),
)
)
# Make output data structure.
return ModelRunnerOutput(
req_ids=req_ids,
req_id_to_index=req_id_to_index,
sampled_token_ids=sampled_token_ids,
logprobs=None,
prompt_logprobs_dict={},
pooler_output=None,
kv_connector_output=kv_connector_output,
)
class TestExampleConnector(ExampleConnector):
def __init__(self, config: VllmConfig, role, kv_cache_config):
self.name = config.kv_transfer_config.kv_connector_extra_config["name"]
self._connector = ExampleConnector(config, role)
self.call_record: dict[str, int] = defaultdict(int)
# Use a unique temp file per connector
self._event_file = (
tempfile.gettempdir()
+ f"/connector_{self.name}-{self.role.name}_events.log"
)
# Start with an empty file
with open(self._event_file, "w") as _:
pass
def __getattribute__(self, name):
if name in (
"_connector",
"call_record",
"name",
"_event_file",
"__class__",
"__dict__",
"__getattribute__",
"__init__",
): # avoid recursion
return object.__getattribute__(self, name)
if not hasattr(self._connector, name):
return object.__getattribute__(self, name)
attr = getattr(self._connector, name)
# Intercept calls to the connector interface and write an event
# for each one to a file, which can be read back in the main test proc.
if callable(attr):
def wrapper(*args, **kwargs):
self.call_record[name] += 1
# Include args that we're interested in
to_log = [name]
for arg in args:
if isinstance(arg, int):
to_log.append(str(arg))
elif isinstance(arg, KVCacheBlocks):
to_log.append(f"num_blocks={[len(b) for b in arg.blocks]}")
# Log the event as a line to the file
try:
with open(self._event_file, "a") as f:
f.write(" ".join(to_log) + "\n")
except Exception as e:
print(f"[ERROR] Could not log event {name} for {self.name}: {e}")
return attr(*args, **kwargs)
return wrapper
return attr
@dataclass(frozen=True)
class MockKVConfig:
matched_tokens: int = 0
is_async: bool = False
class MockKVConnectorMetadata(KVConnectorMetadata):
def __init__(self):
# Scheduler tests check metadata.requests
self.requests: list = []
class MockKVConnector(KVConnectorBase_V1):
"""Mock KV connector for scheduler tests, supporting both sync and async mode."""
def __init__(
self,
vllm_config: VllmConfig,
role: KVConnectorRole,
kv_cache_config: KVCacheConfig | None = None,
):
super().__init__(vllm_config, role, kv_cache_config)
extra_config = self._kv_transfer_config.kv_connector_extra_config
self.config = MockKVConfig(
matched_tokens=extra_config["matched_tokens"],
is_async=extra_config["is_async"],
)
def get_num_new_matched_tokens(
self,
request: Request,
num_computed_tokens: int,
) -> tuple[int | None, bool]:
return (self.config.matched_tokens, self.config.is_async)
def update_state_after_alloc(
self,
request: Request,
blocks: KVCacheBlocks,
num_external_tokens: int,
):
pass
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> KVConnectorMetadata:
metadata = MockKVConnectorMetadata()
cached_reqs = scheduler_output.scheduled_cached_reqs
for req_id in chain(
(req.req_id for req in scheduler_output.scheduled_new_reqs),
(
req_id
for req_id in cached_reqs.req_ids
if req_id in cached_reqs.resumed_req_ids
),
):
metadata.requests.append({"req_id": req_id})
return metadata
def start_load_kv(self, kv_caches, finished_req_ids):
pass
def wait_for_layer_load(self, layer_name):
pass
def save_kv_layer(self, layer_name, kv_layer, attn_metadata, **kwargs):
pass
def wait_for_save(self):
pass
KVConnectorFactory.register_connector(
"TestExampleConnector", __name__, TestExampleConnector.__name__
)
KVConnectorFactory.register_connector(
"MockKVConnector", __name__, MockKVConnector.__name__
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/__init__.py | tests/v1/kv_connector/unit/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_backwards_compatibility.py | tests/v1/kv_connector/unit/test_backwards_compatibility.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for backwards compatibility with external KV connector implementations.
This test ensures that external connectors (loaded via kv_connector_module_path)
implemented with the old signature continue to work:
- Old signature: __init__(self, vllm_config, role)
- New signature: __init__(self, vllm_config, role, kv_cache_config)
"""
from typing import TYPE_CHECKING
from unittest.mock import patch
import pytest
from vllm.attention.backends.abstract import AttentionMetadata
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1 import (
KVConnectorBase_V1,
KVConnectorRole,
)
from vllm.v1.core.sched.output import SchedulerOutput
from .utils import create_scheduler, create_vllm_config
if TYPE_CHECKING:
from vllm.config import VllmConfig
from vllm.forward_context import ForwardContext
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
class OldStyleTestConnector(KVConnectorBase_V1):
"""
Test connector using the old signature with 2 required arguments.
This simulates external connectors that haven't been updated yet.
"""
def __init__(self, vllm_config: "VllmConfig", role: KVConnectorRole):
# Old-style call to super().__init__ with only 2 arguments
super().__init__(vllm_config=vllm_config, role=role)
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int | None, bool]:
return 0, False
def update_state_after_alloc(
self,
request: "Request",
blocks: "KVCacheBlocks",
num_external_tokens: int,
):
pass
def build_connector_meta(self, scheduler_output: SchedulerOutput):
return None
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
pass
def wait_for_layer_load(self, layer_name: str) -> None:
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
pass
def wait_for_save(self):
pass
class NewStyleTestConnector(KVConnectorBase_V1):
"""
Test connector using the new signature with 3 required arguments.
"""
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig",
):
# New-style call to super().__init__ with all 3 arguments
super().__init__(
vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config
)
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int | None, bool]:
return 0, False
def update_state_after_alloc(
self,
request: "Request",
blocks: "KVCacheBlocks",
num_external_tokens: int,
):
pass
def build_connector_meta(self, scheduler_output: SchedulerOutput):
return None
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
pass
def wait_for_layer_load(self, layer_name: str) -> None:
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
pass
def wait_for_save(self):
pass
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_external_old_signature_factory_instantiation(role):
"""
Test that external connectors with old signature (2 required args) loaded
via kv_connector_module_path are correctly instantiated with backwards
compatibility support.
"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "OldStyleTestConnector"
vllm_config.kv_transfer_config.kv_connector_module_path = (
"tests.v1.kv_connector.unit.test_backwards_compatibility"
)
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(vllm_config, role, kv_cache_config)
assert connector is not None
assert isinstance(connector, OldStyleTestConnector)
assert connector.role == role
assert connector._kv_cache_config is None
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_external_new_signature_factory_instantiation(role):
"""
Test that external connectors with new signature (3 required args) loaded
via kv_connector_module_path are correctly instantiated.
"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "NewStyleTestConnector"
vllm_config.kv_transfer_config.kv_connector_module_path = (
"tests.v1.kv_connector.unit.test_backwards_compatibility"
)
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(vllm_config, role, kv_cache_config)
assert connector is not None
assert isinstance(connector, NewStyleTestConnector)
assert connector.role == role
assert connector._kv_cache_config is not None
assert connector._kv_cache_config == kv_cache_config
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_old_signature_super_init(role):
"""
Test that old-style connectors can call super().__init__() without
kv_cache_config parameter.
"""
vllm_config = create_vllm_config()
connector = OldStyleTestConnector(vllm_config, role)
assert connector is not None
assert connector.role == role
assert connector._kv_cache_config is None
def test_old_signature_super_init_with_kwargs():
"""
Test that old-style connectors can call super().__init__() with keyword
arguments in different orders.
"""
vllm_config = create_vllm_config()
# Test with vllm_config= and role= kwargs
connector1 = OldStyleTestConnector(
vllm_config=vllm_config, role=KVConnectorRole.SCHEDULER
)
assert connector1 is not None
assert connector1._kv_cache_config is None
# Test with role= and vllm_config= in reversed order
connector2 = OldStyleTestConnector(
role=KVConnectorRole.WORKER, vllm_config=vllm_config
)
assert connector2 is not None
assert connector2._kv_cache_config is None
def test_internal_connector_uses_new_signature():
"""
Test that internal connectors (registered in factory) always use the new
signature and get kv_cache_config.
"""
from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import (
ExampleConnector,
)
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "ExampleConnector"
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert connector is not None
assert isinstance(connector, ExampleConnector)
assert connector._kv_cache_config is not None
assert connector._kv_cache_config == kv_cache_config
def test_signature_detection_with_mocking():
"""
Test that the factory correctly applies compat_sig flag returned from
_get_connector_class_with_compat.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
# Mock _get_connector_class_with_compat to return old-style connector
with patch.object(
KVConnectorFactory,
"_get_connector_class_with_compat",
return_value=(OldStyleTestConnector, True),
):
old_connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert old_connector is not None
assert isinstance(old_connector, OldStyleTestConnector)
assert old_connector._kv_cache_config is None
# Mock _get_connector_class_with_compat to return new-style connector
with patch.object(
KVConnectorFactory,
"_get_connector_class_with_compat",
return_value=(NewStyleTestConnector, False),
):
new_connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert new_connector is not None
assert isinstance(new_connector, NewStyleTestConnector)
assert new_connector._kv_cache_config is not None
assert new_connector._kv_cache_config == kv_cache_config
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_nixl_connector.py | tests/v1/kv_connector/unit/test_nixl_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import inspect
import os
import tempfile
import textwrap
import time
import uuid
from collections import defaultdict
from typing import Any
from unittest.mock import MagicMock, patch
import msgspec
import pytest
import ray
import torch
from vllm import LLM
from vllm.config import KVTransferConfig
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
from vllm.distributed.kv_transfer.kv_connector.v1 import nixl_connector
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
from vllm.distributed.kv_transfer.kv_connector.v1.multi_connector import (
MultiKVConnectorStats,
)
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
KVConnectorRole,
NixlAgentMetadata,
NixlConnector,
NixlConnectorMetadata,
NixlConnectorScheduler,
NixlConnectorWorker,
NixlHandshakePayload,
NixlKVConnectorStats,
compute_nixl_compatibility_hash,
)
from vllm.distributed.kv_transfer.kv_transfer_state import (
ensure_kv_transfer_shutdown,
has_kv_transfer_group,
)
from vllm.forward_context import ForwardContext
from vllm.outputs import RequestOutput
from vllm.platforms import current_platform
from vllm.platforms.interface import Platform
from vllm.sampling_params import SamplingParams
from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.output_processor import OutputProcessor
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
from vllm.v1.request import RequestStatus
from .utils import create_request, create_scheduler, create_vllm_config
@pytest.fixture(scope="module", autouse=True)
def clear_kv_transfer():
"""
The test cases in this file use `VLLM_ENABLE_V1_MULTIPROCESSING=0`,
causing the global variable `_KV_CONNECTOR_AGENT`
to be assigned but never deleted.
Since the current pytest process does not terminate and instead
continues running tests from other files,
this global variable remains in memory and interferes
with test cases in other modules.
So we use this fixture to ensure that the global variable
`_KV_CONNECTOR_AGENT` is properly cleaned up after each test.
"""
yield
if has_kv_transfer_group():
ensure_kv_transfer_shutdown()
def get_default_xfer_telemetry(
xferDurationS: float = 1,
postDurationS: float = 1,
totalBytes: int = 1,
descCount: int = 1,
) -> dict:
class AttributeDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__ # type: ignore[assignment]
# We can't instantiate nixlXferTelemetry because it's read only and
# ray env does not have NIXL, so we must fake it
return AttributeDict(
xferDuration=xferDurationS * 1e6, # in us
postDuration=postDurationS * 1e6, # in us
totalBytes=totalBytes,
descCount=descCount,
)
class FakeNixlWrapper:
"""Mock implementation of NixlWrapper for testing.
We don't inherit from nixl._api.nixl_agent because nixl may not be
installed.
Note: The complete source of this class is also used in the
`_make_fake_nixl_pkg` function to create a fake nixl package
for Ray workers.
"""
AGENT_METADATA = b"fake_agent_metadata"
REMOTE_AGENT_NAME = "remote_agent"
def __init__(self, agent_name: str, *args, **kwargs):
self._cycles_before_xfer_done = 0
self._check_xfer_state_cycles: defaultdict[int, int] = defaultdict(lambda: 0)
def get_reg_descs(self, caches_data, memory_type: str) -> list:
return [str(uuid.uuid4()) for _ in caches_data]
def register_memory(self, descs, backends) -> None:
pass
def deregister_memory(self, descs) -> None:
pass
def get_xfer_descs(self, blocks_data, memory_type: str) -> list:
return [str(uuid.uuid4()) for _ in blocks_data]
def prep_xfer_dlist(self, agent_name: str, descs: list) -> int:
return uuid.uuid4().int
def get_agent_metadata(self) -> bytes:
return self.AGENT_METADATA
def add_remote_agent(self, agent_metadata: bytes) -> str:
return self.REMOTE_AGENT_NAME
def get_new_notifs(self) -> dict[str, list[bytes]]:
# Used to collect done_sending, which we don't test yet.
return {}
def check_xfer_state(self, handle: int) -> str:
if self._check_xfer_state_cycles[handle] >= self._cycles_before_xfer_done:
return "DONE"
self._check_xfer_state_cycles[handle] += 1
return "PROC"
def release_xfer_handle(self, handle: int) -> None:
pass
def release_dlist_handle(self, handle: int) -> None:
pass
def remove_remote_agent(self, agent: str) -> None:
pass
def send_notif(self, agent_name: str, notif_msg: bytes) -> None:
pass
def make_prepped_xfer(
self,
xfer_type: str,
local_xfer_side_handle: int,
local_block_descs_ids: list[int],
remote_xfer_side_handle: int,
remote_block_descs_ids: list[int],
notif_msg: bytes | None = None,
) -> int:
return uuid.uuid4().int
def transfer(self, handle: int) -> str:
return "PROC"
def get_xfer_telemetry(self, handle: int) -> dict:
return get_default_xfer_telemetry()
############################################################
# Follow are for changing the behavior during testing.
############################################################
def set_cycles_before_xfer_done(self, cycles: int):
"""Set the number of cycles before a transfer is considered done."""
@contextlib.contextmanager
def _make_fake_nixl_pkg():
"""Context manager that creates a temporary package making
`from nixl._api import nixl_agent` resolve to our FakeNixlWrapper.
Automatically cleans up the temporary directory when done.
"""
with tempfile.TemporaryDirectory() as td:
pkg_root = os.path.join(td, "nixl", "_api")
os.makedirs(pkg_root, exist_ok=True)
# Get the source code of FakeNixlWrapper class and dedent it
fake_nixl_source = inspect.getsource(FakeNixlWrapper)
fake_nixl_source = textwrap.dedent(fake_nixl_source)
stub = f"""\
# Copy of FakeNixlWrapper implementation for Ray workers
import uuid
from collections import defaultdict
{fake_nixl_source}
# Export as nixl_agent
nixl_agent = FakeNixlWrapper
"""
with open(os.path.join(pkg_root, "__init__.py"), "w") as f:
f.write(stub)
# Mock nixlXferTelemetry class
pkg_root2 = os.path.join(td, "nixl", "_bindings")
os.makedirs(pkg_root2, exist_ok=True)
with open(os.path.join(pkg_root2, "__init__.py"), "w") as f:
f.write("class nixlXferTelemetry: pass")
# touch parent package
open(os.path.join(td, "nixl", "__init__.py"), "w").close()
yield td
def test_basic_interface():
"""Unit test for basic NixlConnector interface functionality."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
request_id = request.request_id
scheduler.add_request(request)
# Remote Prefill, triggers NixlConnectorMetadata.
scheduler_output = scheduler.schedule()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, NixlConnectorMetadata)
assert len(kv_connector_metadata.reqs_to_recv) == 1
assert request_id in kv_connector_metadata.reqs_to_recv
req_meta = kv_connector_metadata.reqs_to_recv[request_id]
for block_id, block in zip(
req_meta.local_block_ids,
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
],
):
assert block_id == block.block_id
def test_prompt_less_than_block_size():
"""
Test that we can handle case where prompt is < block.
In this case, the P worker will still send remote_block_ids of the
partial block. The D worker should schedule an async read
in this case.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Half of a block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_TOKENS = int(BLOCK_SIZE * 0.5)
# Request will have 1 partial remote block.
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
num_remote_blocks=1,
)
scheduler.add_request(request)
scheduler_output = scheduler.schedule()
# This request will read async.
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, NixlConnectorMetadata)
assert len(kv_connector_metadata.reqs_to_recv) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_kv_transfer_handshake(dist_init):
"""Unit test for basic NixlConnector interface functionality."""
# Test setup, we creates a scheduler that contains a NixlConnector
# of role SCHEDULER, and expect it to be serving NixlAgentMetadata from
# all workers of the instance.
vllm_config = create_vllm_config()
# in case the test runs on non-GPU machine
vllm_config.kv_transfer_config.kv_buffer_device = "cpu"
scheduler = create_scheduler(vllm_config)
# Create two NixlConnector of role WORKER, one is the worker of
# the scheduler (prefill), the other is a worker of decode instance.
# Prefill connector will register KV cache to populate proper handshake
# metadata.
prefill_connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
kv_cache_shape = FlashAttentionBackend.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
prefill_connector.register_kv_caches(kv_caches)
# Simulate EngineCore initialization that would gather connector
# metadata from all workers
metadata = prefill_connector.get_handshake_metadata()
# metadata is a NixlHandshakePayload, decode it to get NixlAgentMetadata
decoder = msgspec.msgpack.Decoder(NixlAgentMetadata)
expected_agent_metadata = decoder.decode(metadata.agent_metadata_bytes)
# The scheduler connector expects metadata to be in
# dict[int, KVConnectorHandshakeMetadata], where the first key is
# the dp_rank, the second key is the tp_rank.
scheduler_connector = scheduler.get_kv_connector()
scheduler_connector.set_xfer_handshake_metadata({0: metadata})
# Simulate a request that finishes prefill, which returns
# corresponding NixlConnectorMetadata for decode instance.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
request.status = RequestStatus.FINISHED_LENGTH_CAPPED
delay, kv_connector_metadata = scheduler.get_kv_connector().request_finished(
request, [0, 1, 2]
)
assert delay
# Decode connector will be able to create handshake with the prefill connector.
decode_connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
# Here we are testing the retrieval of NIXLAgentMetadata.
# Knowing the implementation detail, we override the add_remote_agent
# to validate the metadata received is the same as the one in prefill_connector.
with patch.object(
decode_connector.connector_worker, "add_remote_agent"
) as mock_add_remote_agent:
mock_add_remote_agent.return_type = "remote_agent"
decode_connector.connector_worker._nixl_handshake(
kv_connector_metadata["remote_host"],
kv_connector_metadata["remote_port"],
kv_connector_metadata["tp_size"],
kv_connector_metadata["remote_engine_id"],
)
received_metadata = mock_add_remote_agent.call_args.args
assert received_metadata[0] == expected_agent_metadata
assert received_metadata[1] == 0 # remote_tp_rank
assert received_metadata[2] == 1 # remote_tp_size
# Need to shutdown the background thread to release NIXL side channel port
scheduler_connector.shutdown()
class FakeNixlConnectorWorker(NixlConnectorWorker):
REMOTE_ENGINE_ID = "remote_engine"
def __init__(
self, *args, hand_shake_latency: float = 1.8, kv_cache_layout="HND", **kwargs
):
super().__init__(*args, **kwargs)
self._hand_shake_latency = hand_shake_latency
self.kv_cache_layout = kv_cache_layout
# Mock register_kv_caches attribute needed for tests that do not call it.
self.src_xfer_handles_by_block_size = {self.block_size: 1}
def _nixl_handshake(
self, host: str, port: int, remote_tp_size: int, expected_engine_id: str
) -> dict[int, str]:
# Mimic slow _nixl_handshake, as well as bypass zmq communication.
time.sleep(self._hand_shake_latency)
# These should've been done in register_kv_caches(), called by
# gpu_model_runner. Here we just hardcode some dummy values.
slot_size_bytes = 4096
self.slot_size_per_layer = [slot_size_bytes]
self.block_len_per_layer = [slot_size_bytes * self.block_size]
self.num_blocks = 1
self.dst_num_blocks[self.engine_id] = self.num_blocks
assert expected_engine_id == self.REMOTE_ENGINE_ID
# Adjust remote block length metadata to satisfy heterogeneous TP
# invariants enforced during handshake validation.
remote_block_lens = list(self.block_len_per_layer)
tp_ratio = self.kv_topo.tp_ratio(remote_tp_size)
if remote_tp_size > self.world_size:
# P TP > D TP case, block_len of remote is smaller
remote_block_lens = [
block_len // (-tp_ratio) for block_len in remote_block_lens
]
elif remote_tp_size < self.world_size:
remote_block_lens = [
block_len * tp_ratio for block_len in remote_block_lens
]
# When remote tp_size > local tp_size, handshake with multiple
# remote ranks.
num_hanshakes = 1 if tp_ratio > 0 else -tp_ratio
remote_agents: dict[int, str] = {}
for remote_tp_rank in range(num_hanshakes):
remote_agent_name = self.add_remote_agent(
NixlAgentMetadata(
engine_id=self.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=remote_tp_rank,
num_blocks=1,
block_lens=remote_block_lens,
# `self.kv_cache_layout` is only forced to HND when vllm engine
# is started. We mock HND here.
kv_cache_layout="HND",
block_size=self.block_size,
),
remote_tp_rank=remote_tp_rank,
remote_tp_size=remote_tp_size,
)
remote_agents[remote_tp_rank] = remote_agent_name
return remote_agents
class TestNixlHandshake:
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_multi_xfer_one_engine(
self,
# dist_init is a fixture that initializes the distributed environment.
dist_init,
):
"""Test case where multiple xfers are initiated to the same engine.
This test triggers the connector to load remote KV for the same
`request_id`. The transfer is not done immediately due to
`set_cycles_before_xfer_done`, so there is a state where there are
multiple transfer states for the same `request_id`, and `get_finished`
should handle it correctly (wait for all transfers to be done).
"""
vllm_config = create_vllm_config()
request_id = "req_id"
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
assert isinstance(connector.connector_worker.nixl_wrapper, FakeNixlWrapper)
worker = connector.connector_worker
worker.nixl_wrapper.set_cycles_before_xfer_done(3)
# simulate handshake
worker.dst_xfer_side_handles = {
FakeNixlConnectorWorker.REMOTE_ENGINE_ID: {0: 1}
}
worker.kv_cache_layout = "HND"
num_xfers = 4
while True:
# For the same request_id, initiate multiple xfers across different
# round of `execute_model` calls.
metadata = NixlConnectorMetadata()
if num_xfers > 0:
num_xfers -= 1
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=[num_xfers + 1, num_xfers + 2, num_xfers + 3],
kv_transfer_params={
"remote_block_ids": [
num_xfers + 4,
num_xfers + 5,
num_xfers + 6,
],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
# Mimic maybe_setup_kv_connector in gpu_model_runner.
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
# Mimic get_finished_kv_transfers in gpu_model_runner.
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
assert request_id in done_recving
break
connector.clear_connector_metadata()
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize(
"decode_tp_size, prefill_tp_size",
[
(1, 1),
(2, 1),
(4, 2),
(4, 4),
],
)
def test_async_load_kv(
self,
# Fixture that initializes the distributed environment.
dist_init,
# Simulate consumer-producer TP sizes.
decode_tp_size,
prefill_tp_size,
):
"""Test that NixlConnector's start_load_kv should be non-blocking."""
vllm_config = create_vllm_config()
vllm_config.parallel_config.tensor_parallel_size = decode_tp_size
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id
)
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id="id",
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": "prefill-id",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": prefill_tp_size,
},
)
connector.bind_connector_metadata(metadata)
timeout = 2.5
start = time.perf_counter()
while time.perf_counter() - start < timeout:
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
time.sleep(0.5) # backoff for the async handshake to complete.
connector.bind_connector_metadata(NixlConnectorMetadata())
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
return
raise TimeoutError("Took too long to complete async handshake.")
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize("local_tp_size", [1, 2])
def test_prefill_tp_size_greater_than_decode_tp_size(
self, local_tp_size: int, dist_init
):
"""
Verify remote TP > local TP handshake succeeds with different
remote configurations.
"""
vllm_config = create_vllm_config()
local_tp_size = 1
vllm_config.parallel_config.tensor_parallel_size = local_tp_size
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [4096]
worker.block_len_per_layer = [4096 * worker.block_size]
worker.num_blocks = 1
worker.dst_num_blocks[worker.engine_id] = worker.num_blocks
worker.src_blocks_data = [(0, worker.block_len_per_layer[0], worker.tp_rank)]
def check_handshake(remote_tp_size: int):
tp_ratio = remote_tp_size // local_tp_size
assert set(remote_agents.keys()) == set(range(tp_ratio))
remote_engine_id = worker.REMOTE_ENGINE_ID
assert worker._tp_size[remote_engine_id] == remote_tp_size
assert -tp_ratio == worker.kv_topo.tp_ratio_from_engine_id(remote_engine_id)
# ensure src_xfer_handles_by_tp_ratio is populated with tpratio chunks
assert -tp_ratio in worker.src_xfer_handles_by_tp_ratio
assert len(worker.src_xfer_handles_by_tp_ratio[-tp_ratio]) == tp_ratio
assert remote_engine_id in worker.dst_xfer_side_handles
assert set(worker.dst_xfer_side_handles[remote_engine_id].keys()) == set(
range(tp_ratio)
)
remote_agents = worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=2,
expected_engine_id=worker.REMOTE_ENGINE_ID,
)
check_handshake(2)
# NOTE flexiblity: a second remote with higher number of ranks is
# discovered. This is not a scenario we actively support right now, but
# the connector allows it.
worker.REMOTE_ENGINE_ID = "remote_engine_2"
remote_agents = worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=6,
expected_engine_id=worker.REMOTE_ENGINE_ID,
)
check_handshake(6)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize("local_tp_size", [1, 2])
def test_prefill_tp_size_greater_than_decode_tp_size_mla(
self, local_tp_size: int, dist_init
):
"""
Verify remote TP > local TP handshake succeeds with different
remote configurations for an MLA model.
"""
vllm_config = create_vllm_config()
d_tp_size = 1
p_tp_size = 2
# Build two separate connectors/workers to emulate P TP=2 ranks.
conn_p0 = NixlConnector(vllm_config, KVConnectorRole.WORKER)
conn_p1 = NixlConnector(vllm_config, KVConnectorRole.WORKER)
conn_p0.connector_worker = FakeNixlConnectorWorker(
vllm_config, conn_p0.engine_id, hand_shake_latency=0
)
conn_p1.connector_worker = FakeNixlConnectorWorker(
vllm_config, conn_p1.engine_id, hand_shake_latency=0
)
# Force P world size to 2 for both workers and emulate distinct tp_ranks.
# Also enable MLA path so that expected_finished_count is updated.
for rank, worker in enumerate(
(conn_p0.connector_worker, conn_p1.connector_worker)
):
worker.world_size = p_tp_size
worker.kv_topo.remote_tp_size = {worker.engine_id: p_tp_size}
worker.tp_rank = rank
worker.use_mla = True
req_id = "req-ep-dp2-p0"
now = time.perf_counter()
# Register a request on P that is waiting for consumers to read
# (both workers track it).
conn_p0.connector_worker._reqs_to_send[req_id] = now + 10.0
conn_p0.connector_worker._reqs_to_process.add(req_id)
conn_p1.connector_worker._reqs_to_send[req_id] = now + 10.0
conn_p1.connector_worker._reqs_to_process.add(req_id)
# Simulate a read notification coming from D with (tp=1, dp=2).
notif = f"{req_id}:{d_tp_size}".encode()
# D0-0->P0 notif
conn_p0.connector_worker.nixl_wrapper.get_new_notifs = lambda: {
"agent": [notif]
} # type: ignore[method-assign]
conn_p1.connector_worker.nixl_wrapper.get_new_notifs = lambda: {
"agent": [notif]
} # type: ignore[method-assign]
# Trigger notification processing via get_finished().
done_sending0, _ = conn_p0.get_finished(finished_req_ids=set())
done_sending1, _ = conn_p1.get_finished(finished_req_ids=set())
assert req_id in done_sending0 and req_id in done_sending1
# E2E aggregation: ensure the aggregated output marks the request
# as finished using the connector's expected_finished_count.
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
aggregator = KVOutputAggregator.from_connector(conn_p0, world_size=2)
out0 = ModelRunnerOutput(
req_ids=[req_id],
req_id_to_index={req_id: 0},
sampled_token_ids=[[0]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=done_sending0,
finished_recving=None,
),
)
out1 = ModelRunnerOutput(
req_ids=[req_id],
req_id_to_index={req_id: 0},
sampled_token_ids=[[0]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=done_sending1,
finished_recving=None,
),
)
aggregated = aggregator.aggregate([out0, out1], output_rank=0)
assert aggregated.kv_connector_output is not None
assert aggregated.kv_connector_output.finished_sending == {req_id}
# Producers cleaned up state for the finished request.
assert req_id not in conn_p0.connector_worker._reqs_to_send
assert req_id not in conn_p0.connector_worker._reqs_to_process
assert req_id not in conn_p1.connector_worker._reqs_to_send
assert req_id not in conn_p1.connector_worker._reqs_to_process
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_concurrent_load_kv(
self,
# dist_init is a fixture that initializes the distributed environment.
dist_init,
):
"""Test that multiple start_load_kv calls should occur concurrently."""
vllm_config = create_vllm_config()
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id
)
# Register (mocked) local xfer handler
# worker = connector.connector_worker
# worker.src_xfer_handles_by_block_size = {worker.block_size: 1}
metadata = NixlConnectorMetadata()
total_reqs = 5
for i in range(total_reqs):
metadata.add_new_req_to_recv(
request_id=f"id_{i}",
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-id-{i}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
timeout = 2.5 * total_reqs
cnt_finished_reqs = 0
start = time.perf_counter()
while time.perf_counter() - start < timeout:
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
time.sleep(0.5) # backoff for the async handshake to complete.
connector.bind_connector_metadata(NixlConnectorMetadata())
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
cnt_finished_reqs += len(done_recving)
if cnt_finished_reqs == total_reqs:
return
raise TimeoutError("Took too long to complete async handshake.")
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_handshake_fails_on_kv_cache_layout_mismatch(self, dist_init):
"""
Verify that adding a remote agent fails if kv_cache_layout differs.
This test is only relevant for heterogeneous TP.
"""
vllm_config = create_vllm_config()
# Mock TP world size to 2 to force heterogeneous TP when
# remote_tp_size=1
with patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.get_tensor_model_parallel_world_size", # noqa: E501
return_value=2,
):
# Initialize connector and worker (with fake NIXL wrapper)
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [4096]
worker.block_len_per_layer = [4096 * worker.block_size]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | true |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/unit/test_example_connector.py | tests/v1/kv_connector/unit/test_example_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import asdict
from typing import NamedTuple
import pytest
from PIL import Image
from vllm import LLM, EngineArgs, SamplingParams
from vllm.assets.image import ImageAsset
from vllm.config import KVTransferConfig
from vllm.multimodal.utils import encode_image_url
from vllm.platforms import current_platform
MODEL_NAME = "RedHatAI/Qwen2.5-VL-3B-Instruct-quantized.w8a8"
SAMPLING_PARAMS = SamplingParams(temperature=0.0, top_k=1, max_tokens=128)
TEXT_PROMPTS = [
"What's in the image(s)? Around 30 words. What's special in 2nd image?",
"The future of AI is",
]
class InputCase(NamedTuple):
text: str
img: list[Image]
expected_len: int
info: str
def _check_path_len(path):
"""Return the latest length in path"""
return len(list(path.iterdir()))
def _list_path(path):
"""Return the list of foldername (hashes generated) under the path"""
return list(path.iterdir())
def run_test(
tmp_path,
processor,
llm: LLM,
question: str,
image_urls: list[Image],
expected_len: int,
info: str,
):
"""
One individual test to process the prompt and output base on 1 set of input
Then check if the length in the storage path matches the expected length
`info` introduces details or purpose of the individual test
"""
print(f"***info: {info}***")
print(f"**Expected storage path length after llm generate: {expected_len}**")
process_prompt(processor, llm, question, image_urls)
print(f"Path matched expected length: {_check_path_len(tmp_path)}")
print(f"Hashes under the storage path: {_list_path(tmp_path)}")
assert _check_path_len(tmp_path) == expected_len, (
f"Expect storage path length {expected_len} ;",
f"but end up {_check_path_len(tmp_path)} instead. ",
f"Info: {info}",
)
def process_prompt(processor, llm: LLM, question: str, image_urls: list[Image]):
"""
Form the prompt based on the text and image input, then llm generate output
"""
placeholders = [
{
"type": "image_url",
"image_url": {"url": encode_image_url(image_pil)},
}
for image_pil in image_urls
]
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
*placeholders,
{"type": "text", "text": question},
],
},
]
prompt = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
outputs = llm.generate(
{
"prompt": prompt,
**({"multi_modal_data": {"image": [*image_urls]}} if image_urls else {}),
},
sampling_params=SAMPLING_PARAMS,
)
print("-" * 50)
print("Output:")
for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)
print("-" * 50)
@pytest.mark.skipif(
current_platform.is_rocm(),
reason=(
"hipErrorLaunchFailure when running this test, see issue:"
"https://github.com/ROCm/pytorch/issues/2822"
),
)
def test_shared_storage_connector_hashes(tmp_path):
"""
Tests that ExampleConnector saves KV to the storage locations
with proper hashes; that are unique for inputs with identical text but
different images (same size), or same multiple images but different orders.
"""
# Using tmp_path as the storage path to store KV
print(f"KV storage path at: {str(tmp_path)}")
# Configure the ExampleConnector
kv_transfer_config = KVTransferConfig(
kv_connector="ExampleConnector",
kv_role="kv_both",
kv_connector_extra_config={"shared_storage_path": str(tmp_path)},
)
engine_args = EngineArgs(
model=MODEL_NAME,
max_model_len=8192,
max_num_seqs=1,
gpu_memory_utilization=0.4,
enforce_eager=True,
kv_transfer_config=kv_transfer_config,
limit_mm_per_prompt={"image": 2},
)
# don't put this import at the top level
# it will call torch.cuda.device_count()
from transformers import AutoProcessor
# Create processor to handle the chat prompt
processor = AutoProcessor.from_pretrained(MODEL_NAME)
# Prepare images for the tests
# Resize to the same size to check hashes correctness
image_1 = ImageAsset("stop_sign").pil_image.resize((1280, 720))
image_2 = ImageAsset("cherry_blossom").pil_image.resize((1280, 720))
# Make sure that they are not the same picture
assert image_1 != image_2, "The images should not be identical"
# Create the LLM instance
engine_args = asdict(engine_args)
llm = LLM(**engine_args)
# Prepare the input cases
input_cases = [
InputCase(
text=TEXT_PROMPTS[0],
img=[image_1],
expected_len=1,
info="image_1 single input the first time.",
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_2],
expected_len=2,
info=(
"image_2 single input the first time. "
"It is in same pixel size with image_1, yet it "
"should be able to form a new unique hash."
),
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_1],
expected_len=2,
info=(
"image_1 single input the 2nd time. "
"It should not form another new hash."
),
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_2],
expected_len=2,
info=(
"image_2 single input the 2nd time. "
"It should not form another new hash."
),
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_1, image_2],
expected_len=3,
info="image_1 with image_2 input the first time.",
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_2, image_1],
expected_len=4,
info="The image order is swapped. Should form new hash.",
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_1, image_2],
expected_len=4,
info=(
"[image_1, image_2] input the 2nd time. "
"It should not form another new hash."
),
),
InputCase(
text=TEXT_PROMPTS[0],
img=[image_2, image_1],
expected_len=4,
info=(
"[image_2, image_1] input the 2nd time. "
"It should not form another new hash."
),
),
InputCase(
text=TEXT_PROMPTS[0],
img=[],
expected_len=5,
info="Pure text input test as a case-control",
),
InputCase(
text=TEXT_PROMPTS[0],
img=[],
expected_len=5,
info="Identical pure text input as a case-control",
),
InputCase(
text=TEXT_PROMPTS[1],
img=[],
expected_len=6,
info="Another pure text input as a case-control",
),
]
# Run tests
for case_id, (text, img, expected_len, info) in enumerate(input_cases):
print("\n", "=" * 25, f"Below running input case: {case_id}", "=" * 25)
run_test(tmp_path, processor, llm, text, img, expected_len, info)
print("All tests passed successfully!")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/nixl_integration/test_edge_cases.py | tests/v1/kv_connector/nixl_integration/test_edge_cases.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import openai
PREFILL_HOST = os.getenv("PREFILL_HOST", "localhost")
PREFILL_PORT = os.getenv("PREFILL_PORT", None)
DECODE_HOST = os.getenv("DECODE_HOST", "localhost")
DECODE_PORT = os.getenv("DECODE_PORT", None)
PROXY_HOST = os.getenv("PROXY_HOST", "localhost")
PROXY_PORT = os.getenv("PROXY_PORT", None)
if PREFILL_PORT is None or DECODE_PORT is None or PROXY_PORT is None:
raise ValueError("Please set the PREFILL_PORT, DECODE_PORT, and PROXY_PORT.")
LONG_PROMPT = "Red Hat is the best company in the world to work for because it works on open source software, which means that all the contributions are delivered to the community. As a result, when working on projects like vLLM we are able to meet many amazing people from various organizations like AMD, Google, NVIDIA, " # noqa: E501
PROMPT = "Red Hat is the best company in the world to work for because it works on open source software, which means that all the contributions are delivered to the community. As a result," # noqa: E501
SHORT_PROMPT = "Red Hat is "
def test_edge_cases():
# Set the OpenAI API key and base URL
decode_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{DECODE_HOST}:{DECODE_PORT}/v1",
)
prefill_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{PREFILL_HOST}:{PREFILL_PORT}/v1",
)
proxy_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{PROXY_HOST}:{PROXY_PORT}/v1",
)
# Get the list of models
models = decode_client.models.list()
MODEL = models.data[0].id
# (1) Check that we can handle a very short prompt,
# less than the length of the block size.
completion = proxy_client.completions.create(
model=MODEL, prompt=SHORT_PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
completion = prefill_client.completions.create(
model=MODEL, prompt=SHORT_PROMPT, temperature=0
)
prefill_response = completion.choices[0].text
print(f"SMALL PROMPT: {proxy_response=}")
assert proxy_response == prefill_response
# (2) Check that we can handle a full prefix cache
# hit on the D worker but not on the P worker.
# (2a): prime the D worker.
completion = decode_client.completions.create(
model=MODEL, prompt=PROMPT, temperature=0
)
decode_response = completion.choices[0].text
# (2b): send via the P/D setup
completion = proxy_client.completions.create(
model=MODEL, prompt=PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
print(f"FULL CACHE HIT: {proxy_response=}")
assert proxy_response == decode_response
# (3) Check that we can handle a partial prefix cache
# hit on the D worker.
completion = proxy_client.completions.create(
model=MODEL, prompt=LONG_PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
completion = prefill_client.completions.create(
model=MODEL, prompt=LONG_PROMPT, temperature=0
)
prefill_response = completion.choices[0].text
print(f"PARTIAL CACHE HIT: {proxy_response=}")
assert proxy_response == prefill_response
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py | tests/v1/kv_connector/nixl_integration/toy_proxy_server.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import itertools
import logging
import os
import uuid
from contextlib import asynccontextmanager
import httpx
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Lifespan context manager to handle startup and shutdown events.
"""
# Startup: Initialize client pools for prefiller and decoder services
app.state.prefill_clients = []
app.state.decode_clients = []
# Create prefill clients
for i, (host, port) in enumerate(global_args.prefiller_instances):
prefiller_base_url = f"http://{host}:{port}/v1"
app.state.prefill_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=prefiller_base_url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
"host": host,
"port": port,
"id": i,
}
)
# Create decode clients
for i, (host, port) in enumerate(global_args.decoder_instances):
decoder_base_url = f"http://{host}:{port}/v1"
app.state.decode_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=decoder_base_url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
"host": host,
"port": port,
"id": i,
}
)
# Initialize round-robin iterators
app.state.prefill_iterator = itertools.cycle(range(len(app.state.prefill_clients)))
app.state.decode_iterator = itertools.cycle(range(len(app.state.decode_clients)))
print(
f"Initialized {len(app.state.prefill_clients)} prefill clients "
f"and {len(app.state.decode_clients)} decode clients."
)
yield
# Shutdown: Close all clients
for client_info in app.state.prefill_clients:
await client_info["client"].aclose()
for client_info in app.state.decode_clients:
await client_info["client"].aclose()
# Update FastAPI app initialization to use lifespan
app = FastAPI(lifespan=lifespan)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=8000)
# Always use 127.0.0.1 as localhost binds to IPv6 which is blocked on CI
parser.add_argument("--host", type=str, default="127.0.0.1")
# For prefiller instances
parser.add_argument(
"--prefiller-hosts",
"--prefiller-host",
type=str,
nargs="+",
default=["localhost"],
)
parser.add_argument(
"--prefiller-ports", "--prefiller-port", type=int, nargs="+", default=[8100]
)
# For decoder instances
parser.add_argument(
"--decoder-hosts", "--decoder-host", type=str, nargs="+", default=["localhost"]
)
parser.add_argument(
"--decoder-ports", "--decoder-port", type=int, nargs="+", default=[8200]
)
args = parser.parse_args()
# Validate and pair hosts with ports
if len(args.prefiller_hosts) != len(args.prefiller_ports):
raise ValueError(
"Number of prefiller hosts must match number of prefiller ports"
)
if len(args.decoder_hosts) != len(args.decoder_ports):
raise ValueError("Number of decoder hosts must match number of decoder ports")
# Create tuples of (host, port) for each service type
args.prefiller_instances = list(zip(args.prefiller_hosts, args.prefiller_ports))
args.decoder_instances = list(zip(args.decoder_hosts, args.decoder_ports))
return args
def get_next_client(app, service_type: str):
"""
Get the next client in round-robin fashion.
Args:
app: The FastAPI app instance
service_type: Either 'prefill' or 'decode'
Returns:
The next client to use
"""
if service_type == "prefill":
client_idx = next(app.state.prefill_iterator)
return app.state.prefill_clients[client_idx]
elif service_type == "decode":
client_idx = next(app.state.decode_iterator)
return app.state.decode_clients[client_idx]
else:
raise ValueError(f"Unknown service type: {service_type}")
async def send_request_to_service(
client_info: dict, endpoint: str, req_data: dict, request_id: str
):
"""
Send a request to a service using a client from the pool.
"""
req_data = req_data.copy()
req_data["kv_transfer_params"] = {
"do_remote_decode": True,
"do_remote_prefill": False,
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": None,
"remote_port": None,
}
req_data["stream"] = False
req_data["max_tokens"] = 1
if "max_completion_tokens" in req_data:
req_data["max_completion_tokens"] = 1
if "stream_options" in req_data:
del req_data["stream_options"]
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
response = await client_info["client"].post(
endpoint, json=req_data, headers=headers
)
response.raise_for_status()
# read/consume the response body to release the connection
# otherwise, it would http.ReadError
await response.aread()
return response
async def stream_service_response(
client_info: dict, endpoint: str, req_data: dict, request_id: str
):
"""
Asynchronously stream response from a service using a client from the pool.
"""
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
async with client_info["client"].stream(
"POST", endpoint, json=req_data, headers=headers
) as response:
response.raise_for_status()
async for chunk in response.aiter_bytes():
yield chunk
async def _handle_completions(api: str, request: Request):
try:
req_data = await request.json()
request_id = str(uuid.uuid4())
# Get the next prefill client in round-robin fashion
prefill_client_info = get_next_client(request.app, "prefill")
# Send request to prefill service
response = await send_request_to_service(
prefill_client_info, api, req_data, request_id
)
# Extract the needed fields
response_json = response.json()
await response.aclose() # CRITICAL: Release connection back to pool
kv_transfer_params = response_json.get("kv_transfer_params", {})
if kv_transfer_params:
req_data["kv_transfer_params"] = kv_transfer_params
# Get the next decode client in round-robin fashion
decode_client_info = get_next_client(request.app, "decode")
logger.debug("Using %s %s", prefill_client_info, decode_client_info)
# Stream response from decode service
async def generate_stream():
async for chunk in stream_service_response(
decode_client_info, api, req_data, request_id=request_id
):
yield chunk
return StreamingResponse(generate_stream(), media_type="application/json")
except Exception as e:
import sys
import traceback
exc_info = sys.exc_info()
print(f"Error occurred in disagg prefill proxy server - {api} endpoint")
print(e)
print("".join(traceback.format_exception(*exc_info)))
raise
@app.post("/v1/completions")
async def handle_completions(request: Request):
return await _handle_completions("/completions", request)
@app.post("/v1/chat/completions")
async def handle_chat_completions(request: Request):
return await _handle_completions("/chat/completions", request)
@app.get("/healthcheck")
async def healthcheck():
"""Simple endpoint to check if the server is running."""
return {
"status": "ok",
"prefill_instances": len(app.state.prefill_clients),
"decode_instances": len(app.state.decode_clients),
}
if __name__ == "__main__":
global global_args
global_args = parse_args()
import uvicorn
uvicorn.run(app, host=global_args.host, port=global_args.port)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/nixl_integration/test_accuracy.py | tests/v1/kv_connector/nixl_integration/test_accuracy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import lm_eval
import openai
BASE_URL = "http://localhost:8192/v1"
NUM_CONCURRENT = 100
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
# Model-specific expected values
EXPECTED_VALUES = {
"Qwen/Qwen3-0.6B": 0.41,
"deepseek-ai/deepseek-vl2-small": 0.59,
"deepseek-ai/deepseek-vl2-tiny": 0.19,
"deepseek-ai/DeepSeek-V2-Lite-Chat": 0.65,
}
SIMPLE_PROMPT = (
"The best part about working on vLLM is that I got to meet so many people across "
"various different organizations like UCB, Google, and Meta which means",
)
# Get model name from environment variable
MODEL_NAME = os.environ.get("TEST_MODEL", "Qwen/Qwen3-0.6B")
def run_simple_prompt():
client = openai.OpenAI(api_key="EMPTY", base_url=BASE_URL)
completion = client.completions.create(model=MODEL_NAME, prompt=SIMPLE_PROMPT)
print("-" * 50)
print(f"Completion results for {MODEL_NAME}:")
print(completion)
print("-" * 50)
def test_accuracy():
"""Run the end to end accuracy test."""
run_simple_prompt()
model_args = (
f"model={MODEL_NAME},"
f"base_url={BASE_URL}/completions,"
f"num_concurrent={NUM_CONCURRENT},tokenized_requests=False"
)
results = lm_eval.simple_evaluate(
model="local-completions",
model_args=model_args,
tasks=TASK,
)
measured_value = results["results"][TASK][FILTER]
expected_value = EXPECTED_VALUES.get(MODEL_NAME)
if expected_value is None:
print(
f"Warning: No expected value found for {MODEL_NAME}. "
"Skipping accuracy check."
)
print(f"Measured value: {measured_value}")
return
assert (
measured_value - RTOL < expected_value
and measured_value + RTOL > expected_value
), f"Expected: {expected_value} | Measured: {measured_value}"
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/kv_connector/nixl_integration/test_disagg_accuracy.py | tests/v1/kv_connector/nixl_integration/test_disagg_accuracy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import json
import os
import time
import openai
import requests
MAX_OUTPUT_LEN = 30
SAMPLE_PROMPTS = (
"Red Hat is the best company in the world to work for because it works on "
"open source software, which means that all the contributions are "
"delivered to the community. As a result, when working on projects like "
"vLLM we are able to meet many amazing people from various organizations "
"like AMD, Google, NVIDIA, ",
"We hold these truths to be self-evident, that all men are created equal, "
"that they are endowed by their Creator with certain unalienable Rights, "
"that among these are Life, Liberty and the pursuit of Happiness.--That "
"to secure these rights, Governments are instituted among Men, deriving "
"their just powers from the consent of the governed, ",
)
def check_vllm_server(url: str, timeout=5, retries=3) -> bool:
"""
Checks if the vLLM server is ready by sending a GET request to the
/health endpoint.
Args:
url (str): The base URL of the vLLM server.
timeout (int): Timeout in seconds for the request.
retries (int): Number of retries if the server is not ready.
Returns:
bool: True if the server is ready, False otherwise.
"""
for attempt in range(retries):
try:
response = requests.get(url, timeout=timeout)
if response.status_code == 200:
return True
else:
print(
f"Attempt {attempt + 1}: Server returned status code "
"{response.status_code}"
)
except requests.exceptions.RequestException as e:
print(f"Attempt {attempt + 1}: Error connecting to server: {e}")
time.sleep(1) # Wait before retrying
return False
def run_simple_prompt(
base_url: str, model_name: str, input_prompt: str, use_chat_endpoint: bool
) -> str:
client = openai.OpenAI(api_key="EMPTY", base_url=base_url)
if use_chat_endpoint:
completion = client.chat.completions.create(
model=model_name,
messages=[
{"role": "user", "content": [{"type": "text", "text": input_prompt}]}
],
max_completion_tokens=MAX_OUTPUT_LEN,
temperature=0.0,
seed=42,
)
return completion.choices[0].message.content
else:
completion = client.completions.create(
model=model_name,
prompt=input_prompt,
max_tokens=MAX_OUTPUT_LEN,
temperature=0.0,
seed=42,
)
return completion.choices[0].text
def main():
"""
This script demonstrates how to accept two optional string arguments
("service_url" and "file_name") from the command line, each with a
default value of an empty string, using the argparse module.
"""
parser = argparse.ArgumentParser(description="vLLM client script")
parser.add_argument(
"--service_url", # Name of the first argument
type=str,
required=True,
help="The vLLM service URL.",
)
parser.add_argument(
"--model_name", # Name of the first argument
type=str,
required=True,
help="model_name",
)
parser.add_argument(
"--mode", # Name of the second argument
type=str,
default="baseline",
help="mode: baseline==non-disagg, or disagg",
)
parser.add_argument(
"--file_name", # Name of the second argument
type=str,
default=".vllm_output.txt",
help="the file that saves the output tokens ",
)
args = parser.parse_args()
for arg in vars(args):
print(f"{arg}: {getattr(args, arg)}")
if args.mode == "baseline":
# non-disagg
health_check_url = f"{args.service_url}/health"
else:
# disagg proxy
health_check_url = f"{args.service_url}/healthcheck"
if not os.path.exists(args.file_name):
raise ValueError(
f"In disagg mode, the output file {args.file_name} from "
"non-disagg. baseline does not exist."
)
service_url = f"{args.service_url}/v1"
if not check_vllm_server(health_check_url):
raise RuntimeError(f"vllm server: {args.service_url} is not ready yet!")
output_strs = dict()
for i, prompt in enumerate(SAMPLE_PROMPTS):
use_chat_endpoint = i % 2 == 1
output_str = run_simple_prompt(
base_url=service_url,
model_name=args.model_name,
input_prompt=prompt,
use_chat_endpoint=use_chat_endpoint,
)
print(f"Prompt: {prompt}, output: {output_str}")
output_strs[prompt] = output_str
if args.mode == "baseline":
# baseline: save outputs
try:
with open(args.file_name, "w") as json_file:
json.dump(output_strs, json_file, indent=4)
except OSError as e:
print(f"Error writing to file: {e}")
raise
else:
# disagg. verify outputs
baseline_outputs = None
try:
with open(args.file_name) as json_file:
baseline_outputs = json.load(json_file)
except OSError as e:
print(f"Error writing to file: {e}")
raise
assert isinstance(baseline_outputs, dict)
assert len(baseline_outputs) == len(output_strs)
for prompt, output in baseline_outputs.items():
assert prompt in output_strs, f"{prompt} not included"
assert output == output_strs[prompt], (
f"baseline_output: {output} != PD output: {output_strs[prompt]}"
)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/test_gpu_model_runner.py | tests/v1/worker/test_gpu_model_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import pytest
import torch
from vllm.attention.backends.abstract import MultipleOf
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.attention.layer import Attention
from vllm.config import (
AttentionConfig,
CacheConfig,
ModelConfig,
ParallelConfig,
SchedulerConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.distributed.parallel_state import (
init_distributed_environment,
initialize_model_parallel,
)
from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams
from vllm.utils.mem_constants import GiB_bytes
from vllm.utils.system_utils import update_environment_variables
from vllm.v1.core.kv_cache_utils import estimate_max_model_len, get_kv_cache_configs
from vllm.v1.core.sched.output import CachedRequestData, NewRequestData, SchedulerOutput
from vllm.v1.kv_cache_interface import (
FullAttentionSpec,
KVCacheConfig,
KVCacheGroupSpec,
KVCacheTensor,
)
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.worker.gpu_input_batch import InputBatch
from vllm.v1.worker.gpu_model_runner import GPUModelRunner
from vllm.v1.worker.utils import AttentionGroup
BLOCK_SIZE = 16
NUM_BLOCKS = 10
DEVICE = current_platform.device_type
def initialize_kv_cache(runner: GPUModelRunner):
"""
Only perform necessary steps in GPUModelRunner.initialize_kv_cache()
"""
attn_spec = FullAttentionSpec(
block_size=BLOCK_SIZE,
num_kv_heads=runner.model_config.get_num_kv_heads(runner.parallel_config),
head_size=runner.model_config.get_head_size(),
dtype=runner.kv_cache_dtype,
)
tensor_size = attn_spec.page_size_bytes * NUM_BLOCKS
kv_cache_config = KVCacheConfig(
num_blocks=NUM_BLOCKS,
kv_cache_tensors=[
KVCacheTensor(size=tensor_size, shared_by=["layer.0"]),
],
kv_cache_groups=[
KVCacheGroupSpec(layer_names=["layer.0"], kv_cache_spec=attn_spec)
],
)
runner.kv_cache_config = kv_cache_config
runner.input_batch = InputBatch(
max_num_reqs=runner.max_num_reqs,
max_model_len=runner.max_model_len,
max_num_batched_tokens=runner.max_num_tokens,
device=runner.device,
pin_memory=runner.pin_memory,
vocab_size=runner.model_config.get_vocab_size(),
block_sizes=[kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size],
kernel_block_sizes=[
kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
],
)
runner.initialize_attn_backend(kv_cache_config)
def get_vllm_config():
model_config = ModelConfig(
model="facebook/opt-125m",
dtype="float16",
seed=42,
)
scheduler_config = SchedulerConfig(
max_num_seqs=10,
max_num_batched_tokens=512,
max_model_len=512,
is_encoder_decoder=model_config.is_encoder_decoder,
)
cache_config = CacheConfig(
block_size=BLOCK_SIZE,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype="auto",
)
parallel_config = ParallelConfig()
vllm_config = VllmConfig(
model_config=model_config,
cache_config=cache_config,
scheduler_config=scheduler_config,
parallel_config=parallel_config,
)
return vllm_config
@pytest.fixture
def model_runner():
vllm_config = get_vllm_config()
model_config = vllm_config.model_config
num_heads = model_config.get_num_kv_heads(vllm_config.parallel_config)
head_size = model_config.get_head_size()
vllm_config.compilation_config.static_forward_context["layer.0"] = Attention(
num_heads, head_size, 0.1
)
runner = GPUModelRunner(vllm_config, DEVICE)
initialize_kv_cache(runner)
return runner
model_runner_2 = model_runner
def _schedule_new_request(*req_ids: str) -> SchedulerOutput:
new_reqs = []
num_scheduled_tokens = {}
total_num_scheduled_tokens = 0
for req_id in req_ids:
new_reqs.append(
NewRequestData(
req_id=req_id,
prompt_token_ids=[1, 2, 3],
mm_features=[],
sampling_params=SamplingParams(),
pooling_params=None,
block_ids=([0],),
num_computed_tokens=0,
lora_request=None,
)
)
num_scheduled_tokens[req_id] = 3
total_num_scheduled_tokens += num_scheduled_tokens[req_id]
return SchedulerOutput(
scheduled_new_reqs=new_reqs,
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens=num_scheduled_tokens,
total_num_scheduled_tokens=total_num_scheduled_tokens,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
def _is_req_scheduled(model_runner, req_id: str) -> bool:
return req_id in model_runner.input_batch.req_id_to_index
def _is_req_added(model_runner, req_id: str) -> bool:
return req_id in model_runner.requests
def _is_sampling_metadata_changed(
model_runner, sampling_metadata_before: SamplingMetadata
):
return model_runner.input_batch.sampling_metadata is not (sampling_metadata_before)
def _is_req_state_block_table_match(model_runner, req_id: str) -> bool:
req_index = model_runner.input_batch.req_id_to_index[req_id]
block_table = model_runner.input_batch.block_table[0]
req_state = model_runner.requests[req_id]
if block_table.num_blocks_per_row[req_index] != len(req_state.block_ids[0]):
return False
num_blocks = block_table.num_blocks_per_row[req_index]
return (
block_table.block_table.np[req_index, :num_blocks] == req_state.block_ids[0]
).all()
def _make_mock_backend_for_kernel_block_size(
supported_sizes: list[int | MultipleOf],
):
class _MockBackend:
@staticmethod
def get_supported_kernel_block_sizes():
return supported_sizes
return _MockBackend()
def _make_kv_cache_spec() -> FullAttentionSpec:
return FullAttentionSpec(block_size=1, num_kv_heads=1, head_size=1, dtype="float16")
def test_select_common_block_size_prefers_manager_block_size():
backend_a = _make_mock_backend_for_kernel_block_size([MultipleOf(32)])
backend_b = _make_mock_backend_for_kernel_block_size([64, MultipleOf(16)])
attn_groups = [
AttentionGroup(backend_a, [], [], _make_kv_cache_spec(), 0),
AttentionGroup(backend_b, [], [], _make_kv_cache_spec(), 0),
]
selected_size = GPUModelRunner.select_common_block_size(128, attn_groups)
assert selected_size == 128
def test_select_common_block_size_uses_largest_shared_int():
backend_a = _make_mock_backend_for_kernel_block_size([128, 64])
backend_b = _make_mock_backend_for_kernel_block_size([64, 32])
attn_groups = [
AttentionGroup(backend_a, [], [], _make_kv_cache_spec(), 0),
AttentionGroup(backend_b, [], [], _make_kv_cache_spec(), 0),
]
selected_size = GPUModelRunner.select_common_block_size(256, attn_groups)
assert selected_size == 64
def test_select_common_block_size_no_valid_option():
backend_a = _make_mock_backend_for_kernel_block_size([64])
backend_b = _make_mock_backend_for_kernel_block_size([MultipleOf(16)])
attn_groups = [
AttentionGroup(backend_a, [], [], _make_kv_cache_spec(), 0),
AttentionGroup(backend_b, [], [], _make_kv_cache_spec(), 0),
]
with pytest.raises(ValueError):
GPUModelRunner.select_common_block_size(48, attn_groups)
def test_update_states_new_request(model_runner, dist_init):
req_id = "req_0"
# new req
scheduler_output = _schedule_new_request(req_id)
metadata_before = model_runner.input_batch.sampling_metadata
model_runner._update_states(scheduler_output)
assert _is_sampling_metadata_changed(model_runner, metadata_before)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
assert _is_req_state_block_table_match(model_runner, req_id)
def test_update_states_request_finished(model_runner, dist_init):
req_id = "req_0"
# new req
scheduler_output = _schedule_new_request(req_id)
model_runner._update_states(scheduler_output)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
# finish req
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens={},
total_num_scheduled_tokens=0,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids={req_id},
free_encoder_mm_hashes=[],
)
metadata_before = model_runner.input_batch.sampling_metadata
model_runner._update_states(scheduler_output)
assert _is_sampling_metadata_changed(model_runner, metadata_before)
assert not _is_req_added(model_runner, req_id)
assert not _is_req_scheduled(model_runner, req_id)
def test_update_states_request_resumed(model_runner, dist_init):
req_id = "req_0"
# new req
scheduler_output = _schedule_new_request(req_id)
model_runner._update_states(scheduler_output)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
# unschedule req
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens={},
total_num_scheduled_tokens=0,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
model_runner._update_states(scheduler_output)
assert _is_req_added(model_runner, req_id)
assert not _is_req_scheduled(model_runner, req_id)
# resume req
cached_req_data = CachedRequestData(
req_ids=[req_id],
resumed_req_ids=set(),
new_token_ids=[[]],
all_token_ids={},
new_block_ids=[([0],)],
num_computed_tokens=[0],
num_output_tokens=[0],
)
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=cached_req_data,
num_scheduled_tokens={req_id: 1},
total_num_scheduled_tokens=1,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
metadata_before = model_runner.input_batch.sampling_metadata
model_runner._update_states(scheduler_output)
assert _is_sampling_metadata_changed(model_runner, metadata_before)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
assert _is_req_state_block_table_match(model_runner, req_id)
def test_get_nans_in_logits(model_runner, dist_init):
req_ids = ("req_0", "req_1")
scheduler_output = _schedule_new_request(*req_ids)
model_runner._update_states(scheduler_output)
logits = torch.tensor(
[
[1.0, 2.0, 3.0],
[3.0, 2.0, 1.0],
],
device=DEVICE,
)
result = model_runner._get_nans_in_logits(logits)
assert result == {"req_0": 0, "req_1": 0}
logits = torch.tensor(
[
[1.0, float("nan"), 3.0],
[4.0, float("nan"), float("nan")],
],
device=DEVICE,
)
result = model_runner._get_nans_in_logits(logits)
assert result == {"req_0": 1, "req_1": 2}
logits = torch.tensor(
[
[1.0, 2.0, 3.0],
[4.0, float("nan"), float("nan")],
],
device=DEVICE,
)
result = model_runner._get_nans_in_logits(logits)
assert result == {"req_0": 0, "req_1": 2}
result = model_runner._get_nans_in_logits(logits=None)
assert result == {"req_0": 0, "req_1": 0}
logits = torch.tensor(
[
[1.0, float("nan"), 3.0],
],
device=DEVICE,
)
result = model_runner._get_nans_in_logits(logits)
assert result == {"req_0": 1, "req_1": 0}
logits = torch.tensor(
[
[float("nan"), float("nan"), 2.0],
[1.0, 2.0, 3.0],
[float("nan"), 2.0, 3.0],
],
device=DEVICE,
)
result = model_runner._get_nans_in_logits(logits)
assert result == {"req_0": 2, "req_1": 0}
def test_update_states_no_changes(model_runner, dist_init):
req_id = "req_0"
# new req
scheduler_output = _schedule_new_request(req_id)
model_runner._update_states(scheduler_output)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
# schedule req
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens={req_id: 1},
total_num_scheduled_tokens=1,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
metadata_before = model_runner.input_batch.sampling_metadata
model_runner._update_states(scheduler_output)
assert not _is_sampling_metadata_changed(model_runner, metadata_before)
assert _is_req_added(model_runner, req_id)
assert _is_req_scheduled(model_runner, req_id)
assert _is_req_state_block_table_match(model_runner, req_id)
def test_update_states_request_unscheduled(model_runner, dist_init):
req_ids = ("req_0", "req_1")
# new reqs
scheduler_output = _schedule_new_request(*req_ids)
model_runner._update_states(scheduler_output)
assert _is_req_added(model_runner, req_ids[0])
assert _is_req_scheduled(model_runner, req_ids[0])
assert _is_req_added(model_runner, req_ids[1])
assert _is_req_scheduled(model_runner, req_ids[1])
# unschedule req_1
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=CachedRequestData.make_empty(),
num_scheduled_tokens={req_ids[0]: 1},
total_num_scheduled_tokens=1,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
metadata_before = model_runner._update_states(scheduler_output)
assert _is_sampling_metadata_changed(model_runner, metadata_before)
assert _is_req_added(model_runner, req_ids[0])
assert _is_req_scheduled(model_runner, req_ids[0])
assert _is_req_added(model_runner, req_ids[1])
assert not _is_req_scheduled(model_runner, req_ids[1])
def test_kv_cache_stride_order(monkeypatch, model_runner):
# This test checks if GPUModelRunner initializes correctly when an attention
# backend enforces a non-default KV cache stride order.
n_heads = model_runner.model_config.get_num_kv_heads(model_runner.parallel_config)
head_size = model_runner.model_config.get_head_size()
# Get the expected shape from the backend's get_kv_cache_shape method
# to ensure compatibility with different backends (triton vs flexattention)
attn_backend = None
for attn_group in model_runner._attn_group_iterator():
attn_backend = attn_group.backend
break
assert attn_backend is not None, "No attention backend found"
expected_kv_cache_shape = list(
attn_backend.get_kv_cache_shape(NUM_BLOCKS, BLOCK_SIZE, n_heads, head_size)
)
# TODO mla test
default_stride = tuple(range(5))
# Permutation that gets you back to expected kv shape
for test_stride in ((1, 4, 0, 2, 3), (0, 1, 2, 3, 4)):
def rnd_stride_order(
include_num_layers_dimension: bool = False, test_stride=test_stride
):
assert not include_num_layers_dimension
return test_stride
# Patch the attention backend class and re-trigger the KV cache creation
for attn_group in model_runner._attn_group_iterator():
attn_backend = attn_group.backend
monkeypatch.setattr(
attn_backend, "get_kv_cache_stride_order", rnd_stride_order
)
model_runner.attn_groups = []
model_runner.kv_caches = []
model_runner.initialize_kv_cache(model_runner.kv_cache_config)
# Shape is unchanged, but layout may differ
kv_cache_shape = model_runner.kv_caches[0].shape
assert list(kv_cache_shape) == expected_kv_cache_shape
if default_stride == test_stride:
assert all(kv.is_contiguous() for kv in model_runner.kv_caches)
else:
assert all(not kv.is_contiguous() for kv in model_runner.kv_caches)
def test_update_config(model_runner):
# Simple update
model_runner.update_config({"load_config": {"load_format": "dummy"}})
assert model_runner.load_config.load_format == "dummy"
# Raise error on non-existing config
with pytest.raises(AssertionError):
model_runner.update_config({"do_not_exist_config": "dummy"})
def test_load_model_weights_inplace(dist_init, model_runner, model_runner_2):
# In this test, model_runner loads model + weights in one go, while
# model_runner_2 loads dummy weights first then load real weights inplace
model_runner.load_model()
original_load_format = model_runner_2.load_config.load_format
model_runner_2.update_config({"load_config": {"load_format": "dummy"}})
model_runner_2.load_model() # Initial model loading with dummy weights
assert str(model_runner.get_model().state_dict()) != str(
model_runner_2.get_model().state_dict()
)
model_runner_2.update_config({"load_config": {"load_format": original_load_format}})
model_runner_2.reload_weights() # Load real weights inplace
assert str(model_runner.get_model().state_dict()) == str(
model_runner_2.get_model().state_dict()
)
def test_reload_weights_before_load_model(model_runner):
with pytest.raises(AssertionError):
model_runner.reload_weights()
def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order():
torch.set_default_dtype(torch.float16)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
error_msg = f"{layer_1} must come before the current layer"
with pytest.raises(ValueError, match=error_msg):
fwd_context = {
# initialization below will fail because target layer is invalid;
# the target layer needs to come before layer 1
layer_0: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_0,
kv_sharing_target_layer_name=layer_1,
),
layer_1: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_1,
),
}
# suppress var not used error
assert fwd_context is not None
def test_init_kv_cache_with_kv_sharing_target_layer_not_exist():
torch.set_default_dtype(torch.float16)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
invalid_layer = "model.layers.0.cross_attn.attn"
error_msg = f"{invalid_layer} is not a valid Attention layer in the model"
with pytest.raises(ValueError, match=error_msg):
fwd_context = {
layer_0: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_0,
),
layer_1: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_1,
# invalid layer: cross_attn.atn doesn't exist!
kv_sharing_target_layer_name=invalid_layer,
),
}
# suppress var not used error
assert fwd_context is not None
def test_init_kv_cache_with_kv_sharing_target_same_as_current():
torch.set_default_dtype(torch.float16)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
error_msg = f"{layer_1} cannot be the same as the current layer"
with pytest.raises(ValueError, match=error_msg):
fwd_context = {
# initialization below will fail because target layer is invalid;
# the target layer needs to come before layer 1
layer_0: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_0,
),
layer_1: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name=layer_1,
),
}
# suppress var not used error
assert fwd_context is not None
def test_init_kv_cache_without_kv_sharing():
torch.set_default_dtype(torch.float16)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
vllm_config = get_vllm_config()
with set_current_vllm_config(vllm_config):
fwd_context = {
layer_0: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_0,
),
layer_1: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_1,
),
}
# suppress var not used error
assert fwd_context is not None
# Set high context length to test max context length estimation
vllm_config.model_config.max_model_len = 3_000_000
vllm_ctx = vllm_config.compilation_config.static_forward_context
runner = GPUModelRunner(vllm_config, DEVICE)
kv_cache_spec = runner.get_kv_cache_spec()
assert len(kv_cache_spec) == 2
assert len(runner.shared_kv_cache_layers) == 0
available_memory = 20 * GiB_bytes
# page size for layer 0's kv_cache_spec is 32KB
num_expected_blocks = 327680 # 20GB / 32KB / 2 (num layers)
kv_cache_config = get_kv_cache_configs(
vllm_config, [kv_cache_spec], [available_memory]
)[0]
assert kv_cache_config.num_blocks == num_expected_blocks
assert len(kv_cache_config.kv_cache_tensors) == 2
assert kv_cache_config.kv_cache_tensors[0].size == available_memory // 2
assert kv_cache_config.kv_cache_tensors[1].size == available_memory // 2
max_context_len = estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
# max context len with KV sharing should be 2x as large as without
assert max_context_len == 1310720
# important: override tensor size to prevent large mem alloc during test
# this will only allocate 2 block worth of memory (2 * 32kb)
kv_cache_config.num_blocks = 1
for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
kv_cache_tensor.size = kv_cache_spec[
kv_cache_tensor.shared_by[0]
].page_size_bytes
runner.initialize_kv_cache(kv_cache_config)
layer_0_kv = vllm_ctx[layer_0].kv_cache[0]
layer_1_kv = vllm_ctx[layer_1].kv_cache[0]
# check layer 1 kv cache does NOT share memory with layer 0
assert id(layer_1_kv) != id(layer_0_kv)
# check layer 1 added to kv cache group's layer names
assert len(kv_cache_config.kv_cache_groups) == 1
assert len(kv_cache_config.kv_cache_groups[0].layer_names) == 2
assert kv_cache_config.kv_cache_groups[0].layer_names[0] == layer_0
assert kv_cache_config.kv_cache_groups[0].layer_names[1] == layer_1
def test_init_kv_cache_with_kv_sharing_valid():
torch.set_default_dtype(torch.float16)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
vllm_config = get_vllm_config()
with set_current_vllm_config(vllm_config):
fwd_context = {
layer_0: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_0,
),
layer_1: Attention(
num_heads=8,
head_size=64,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name="model.layers.0.self_attn.attn",
),
}
# suppress var not used error
assert fwd_context is not None
# Set high context length to test max context length estimation
vllm_config.model_config.max_model_len = 3_000_000
vllm_ctx = vllm_config.compilation_config.static_forward_context
runner = GPUModelRunner(vllm_config, DEVICE)
kv_cache_spec = runner.get_kv_cache_spec()
assert len(kv_cache_spec) == 1
assert layer_0 in kv_cache_spec
assert runner.shared_kv_cache_layers[layer_1] == layer_0
available_memory = 20 * GiB_bytes
# page size for layer 0's kv_cache_spec is 32KB
# with KV sharing, we can allocate (available_mem//page_size//1) blocks
# which is twice as many as without KV sharing
num_expected_blocks = 655360 # 20GB / 32KB
kv_cache_config = get_kv_cache_configs(
vllm_config, [kv_cache_spec], [available_memory]
)[0]
assert kv_cache_config.num_blocks == num_expected_blocks
assert len(kv_cache_config.kv_cache_tensors) == 1
# Each layer now has twice the available memory for KV cache
# compared to no KV sharing
assert kv_cache_config.kv_cache_tensors[0].size == available_memory
max_context_len = estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
# max context len with KV sharing should be 2x as large as without
assert max_context_len == 2 * 1310720
# important: override tensor size to prevent large mem alloc during test
# this will only allocate 1 block worth of memory (32kb)
kv_cache_config.num_blocks = 1
kv_cache_config.kv_cache_tensors[0].size = kv_cache_spec[layer_0].page_size_bytes
runner.initialize_kv_cache(kv_cache_config)
kv_cache_config_after_init = runner.kv_cache_config
layer_0_kv = vllm_ctx[layer_0].kv_cache[0]
layer_1_kv = vllm_ctx[layer_1].kv_cache[0]
# check layer 1 kv cache shares memory with layer 0
assert id(layer_1_kv) == id(layer_0_kv)
# check layer 1 added to kv cache group's layer names
assert len(kv_cache_config_after_init.kv_cache_groups) == 1
assert len(kv_cache_config_after_init.kv_cache_groups[0].layer_names) == 2
assert kv_cache_config_after_init.kv_cache_groups[0].layer_names[0] == layer_0
assert kv_cache_config_after_init.kv_cache_groups[0].layer_names[1] == layer_1
@pytest.mark.skipif(
current_platform.is_rocm(),
reason="Attention backend FLASHINFER is not supported on ROCm.",
)
def test_hybrid_attention_mamba_tensor_shapes():
"""
The GPU model runner creates different views into the
KVCacheTensors for the attention and mamba layers
(via _reshape_kv_cache_tensors function). This test verifies
that the views are compatible: writing a mamba block
will not corrupt an attention block and vice versa
"""
current_platform.seed_everything(42)
update_environment_variables(
{
"RANK": "0",
"LOCAL_RANK": "0",
"WORLD_SIZE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": "12345",
}
)
init_distributed_environment()
initialize_model_parallel(tensor_model_parallel_size=1)
torch.set_default_dtype(torch.float16)
model_config = ModelConfig(
model="ibm-granite/granite-4.0-tiny-preview",
dtype="float16",
)
scheduler_config = SchedulerConfig(
max_num_seqs=10,
max_num_batched_tokens=512,
max_model_len=512,
is_encoder_decoder=model_config.is_encoder_decoder,
)
cache_config = CacheConfig(
block_size=BLOCK_SIZE,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype="auto",
)
parallel_config = ParallelConfig()
attention_config = AttentionConfig(backend=AttentionBackendEnum.FLASHINFER)
vllm_config = VllmConfig(
model_config=model_config,
cache_config=cache_config,
scheduler_config=scheduler_config,
parallel_config=parallel_config,
attention_config=attention_config,
)
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
layer_2 = "model.layers.2.mixer"
layer_3 = "model.layers.3.mixer"
layer_4 = "model.layers.4.mixer"
layer_5 = "model.layers.5.mixer"
with set_current_vllm_config(vllm_config):
hf_config = vllm_config.model_config.hf_config
fwd_context = {}
for key in [layer_0, layer_1]:
fwd_context[key] = Attention(
num_heads=model_config.get_num_attention_heads(parallel_config),
num_kv_heads=model_config.get_num_kv_heads(parallel_config),
head_size=model_config.get_head_size(),
scale=1.0,
prefix=key,
)
for key in [layer_2, layer_3, layer_4, layer_5]:
fwd_context[key] = MambaMixer2(
hidden_size=hf_config.hidden_size,
ssm_state_size=hf_config.mamba_d_state,
conv_kernel_size=hf_config.mamba_d_conv,
intermediate_size=hf_config.mamba_expand * hf_config.hidden_size,
use_conv_bias=hf_config.mamba_conv_bias,
use_bias=hf_config.mamba_proj_bias,
n_groups=hf_config.mamba_n_groups,
num_heads=hf_config.mamba_n_heads,
head_dim=hf_config.mamba_d_head,
rms_norm_eps=hf_config.rms_norm_eps,
activation=hf_config.hidden_act,
cache_config=cache_config,
model_config=model_config,
prefix=key,
)
# suppress var not used error
assert fwd_context is not None
vllm_ctx = vllm_config.compilation_config.static_forward_context
runner = GPUModelRunner(vllm_config, DEVICE)
kv_cache_spec = runner.get_kv_cache_spec()
available_memory = 5 * GiB_bytes
kv_cache_config = get_kv_cache_configs(
vllm_config, [kv_cache_spec], [available_memory]
)[0]
runner.initialize_kv_cache(kv_cache_config)
# random partition of blocks
# blocks0 will be assigned to attention layers
# blocks1 will be assigned to mamba layers
num_blocks = kv_cache_config.num_blocks
ind = np.arange(num_blocks)
np.random.shuffle(ind)
blocks0, blocks1 = ind[: (num_blocks // 2)], ind[(num_blocks // 2) :]
attn_shape = vllm_ctx[layer_0].kv_cache[0].shape
conv_shape = vllm_ctx[layer_2].kv_cache[0][0].shape
ssm_shape = vllm_ctx[layer_2].kv_cache[0][1].shape
# assert we are using FlashInfer
assert attn_shape[0] % num_blocks == 0
block_split_ratio = attn_shape[0] // num_blocks
# use small blocks for testing to avoid memory issues
test_block_size = min(2, len(blocks0), len(blocks1))
# use non-overlapping blocks to avoid data contamination
# Split kernel blocks: first half for attention, second half for mamba
mid_point = num_blocks // 2
# attention uses kernel blocks from first half (mapped to logical blocks)
kv_blocks_for_attention = np.array([0, 1])[:test_block_size]
# mamba uses kernel blocks from second half
kv_blocks_for_mamba = np.array([mid_point, mid_point + 1])[:test_block_size]
# create small constant tensors for testing with corrected shapes
# attention: [block_size, ...] starting from dimension 2
attn_constant_shape = attn_shape[2:]
conv_constant_shape = conv_shape[1:]
ssm_constant_shape = ssm_shape[1:]
attn_blocks_constant = torch.full(
(test_block_size, *attn_constant_shape), device=DEVICE, fill_value=3.33
)
conv_blocks_constant = torch.full(
(test_block_size, *conv_constant_shape), device=DEVICE, fill_value=6.66
)
ssm_blocks_constant = torch.full(
(test_block_size, *ssm_constant_shape), device=DEVICE, fill_value=9.99
)
# Fill attention blocks with constants using kv block indices
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | true |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/test_utils.py | tests/v1/worker/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.v1.worker.utils import bind_kv_cache
def test_bind_kv_cache():
from vllm.attention.layer import Attention
ctx = {
"layers.0.self_attn": Attention(32, 128, 0.1),
"layers.1.self_attn": Attention(32, 128, 0.1),
"layers.2.self_attn": Attention(32, 128, 0.1),
"layers.3.self_attn": Attention(32, 128, 0.1),
}
kv_cache = {
"layers.0.self_attn": torch.zeros((1,)),
"layers.1.self_attn": torch.zeros((1,)),
"layers.2.self_attn": torch.zeros((1,)),
"layers.3.self_attn": torch.zeros((1,)),
}
runner_kv_caches: list[torch.Tensor] = []
bind_kv_cache(kv_cache, ctx, runner_kv_caches)
assert ctx["layers.0.self_attn"].kv_cache[0] is kv_cache["layers.0.self_attn"]
assert ctx["layers.1.self_attn"].kv_cache[0] is kv_cache["layers.1.self_attn"]
assert ctx["layers.2.self_attn"].kv_cache[0] is kv_cache["layers.2.self_attn"]
assert ctx["layers.3.self_attn"].kv_cache[0] is kv_cache["layers.3.self_attn"]
assert runner_kv_caches[0] is kv_cache["layers.0.self_attn"]
assert runner_kv_caches[1] is kv_cache["layers.1.self_attn"]
assert runner_kv_caches[2] is kv_cache["layers.2.self_attn"]
assert runner_kv_caches[3] is kv_cache["layers.3.self_attn"]
def test_bind_kv_cache_non_attention():
from vllm.attention.layer import Attention
# example from Jamba PP=2
ctx = {
"model.layers.20.attn": Attention(32, 128, 0.1),
"model.layers.28.attn": Attention(32, 128, 0.1),
}
kv_cache = {
"model.layers.20.attn": torch.zeros((1,)),
"model.layers.28.attn": torch.zeros((1,)),
}
runner_kv_caches: list[torch.Tensor] = []
bind_kv_cache(kv_cache, ctx, runner_kv_caches)
assert ctx["model.layers.20.attn"].kv_cache[0] is kv_cache["model.layers.20.attn"]
assert ctx["model.layers.28.attn"].kv_cache[0] is kv_cache["model.layers.28.attn"]
assert runner_kv_caches[0] is kv_cache["model.layers.20.attn"]
assert runner_kv_caches[1] is kv_cache["model.layers.28.attn"]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/test_worker_memory_snapshot.py | tests/v1/worker/test_worker_memory_snapshot.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import multiprocessing as mp
import os
import tempfile
from multiprocessing.queues import Queue
from unittest.mock import patch
import pytest
import torch
from vllm.engine.arg_utils import EngineArgs
from vllm.utils.mem_utils import MemorySnapshot
from vllm.v1.worker.gpu_worker import Worker, init_worker_distributed_environment
# Global queue to track operation order across processes
_QUEUE: Queue | None = None
def track_operation(operation: str, rank: int):
"""Track when an operation happens and its rank."""
if _QUEUE is not None:
_QUEUE.put((operation, rank))
def make_operation_tracker(operation_name: str, original_func):
"""Create a mock function that tracks when an operation is called.
Args:
operation_name: Name to use when tracking this operation
original_func: The original function to wrap
Returns:
A wrapper function that tracks the operation and calls the original
"""
def wrapper(*args, **kwargs):
rank = int(os.environ.get("RANK", "-1"))
track_operation(operation_name, rank)
return original_func(*args, **kwargs)
return wrapper
def worker_process(
rank: int,
world_size: int,
distributed_init_method: str,
queue: Queue,
error_queue: Queue,
):
"""Worker process that initializes a GPU worker with proper tracking."""
global _QUEUE
_QUEUE = queue
try:
# Set environment variables
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
# Create vLLM config with small model
vllm_config = EngineArgs(
model="facebook/opt-125m", tensor_parallel_size=2, load_format="dummy"
).create_engine_config()
# Create worker
worker = Worker(
vllm_config=vllm_config,
local_rank=rank,
rank=rank,
distributed_init_method=distributed_init_method,
)
# Get original functions before patching
original_init_worker = init_worker_distributed_environment
original_memory_snapshot_init = MemorySnapshot.__init__
original_all_reduce = torch.distributed.all_reduce
# Apply minimal patches to track operation order
init_patch = patch(
"vllm.v1.worker.gpu_worker.init_worker_distributed_environment",
side_effect=make_operation_tracker(
"init_distributed", original_init_worker
),
)
memory_patch = patch.object(
MemorySnapshot,
"__init__",
make_operation_tracker("memory_snapshot", original_memory_snapshot_init),
)
all_reduce_patch = patch(
"torch.distributed.all_reduce",
side_effect=make_operation_tracker("nccl_all_reduce", original_all_reduce),
)
with init_patch, memory_patch, all_reduce_patch:
# Initialize device (this is where we test the order)
worker.init_device()
# Load model to ensure everything works
worker.load_model()
# Signal success
queue.put(("success", rank))
except Exception as e:
error_queue.put((rank, str(e), type(e).__name__))
raise
@pytest.mark.skipif(
torch.cuda.device_count() < 2, reason="Need at least 2 GPUs for tensor parallelism"
)
def test_init_distributed_is_called_before_memory_snapshot():
"""Test that distributed env is setup before memory snapshot.
This test makes sure during worker initialization, the initial memory
snapshot is taken after distributed env is setup to include all the buffers
allocated by distributed env.
"""
world_size = 2
# Create a temporary file for distributed init
with tempfile.NamedTemporaryFile(delete=False) as f:
distributed_init_method = f"file://{f.name}"
# Create queues for inter-process communication
ctx = mp.get_context("spawn")
operation_queue = ctx.Queue()
error_queue = ctx.Queue()
# Start worker processes
processes = []
for rank in range(world_size):
p = ctx.Process(
target=worker_process,
args=(
rank,
world_size,
distributed_init_method,
operation_queue,
error_queue,
),
)
p.start()
processes.append(p)
# Wait for all processes to complete
for p in processes:
p.join(timeout=60) # 60 second timeout
# Check for errors
errors = []
while not error_queue.empty():
rank, error_msg, error_type = error_queue.get()
errors.append(f"Rank {rank}: {error_type}: {error_msg}")
if errors:
pytest.fail("Worker processes failed:\n" + "\n".join(errors))
# Collect all operations from the queue
operations = []
while not operation_queue.empty():
operations.append(operation_queue.get())
# Verify we got operations from both ranks
print(f"Collected operations: {operations}")
# Check operations for each rank
for rank in range(world_size):
rank_ops = [op for op, r in operations if r == rank]
print(f"\nRank {rank} operations: {rank_ops}")
# Raises ValueError if the operation is not found
init_distributed = rank_ops.index("init_distributed")
nccl_all_reduce = rank_ops.index("nccl_all_reduce")
memory_snapshot = rank_ops.index("memory_snapshot")
# Verify order: init_distributed should happen before memory_snapshot
assert init_distributed < nccl_all_reduce < memory_snapshot, (
f"Rank {rank}: init_distributed (index {init_distributed}) "
f"must happen before nccl_all_reduce (index {nccl_all_reduce}) "
f"and memory_snapshot (index {memory_snapshot})"
)
# Clean up
os.unlink(distributed_init_method.replace("file://", ""))
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/test_gpu_profiler.py | tests/v1/worker/test_gpu_profiler.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.config import ProfilerConfig
from vllm.profiler.wrapper import WorkerProfiler
class ConcreteWorkerProfiler(WorkerProfiler):
"""
A basic implementation of a worker profiler for testing purposes.
"""
def __init__(self, profiler_config: ProfilerConfig):
self.start_call_count = 0
self.stop_call_count = 0
self.should_fail_start = False
super().__init__(profiler_config)
def _start(self) -> None:
if self.should_fail_start:
raise RuntimeError("Simulated start failure")
self.start_call_count += 1
def _stop(self) -> None:
self.stop_call_count += 1
@pytest.fixture
def default_profiler_config():
return ProfilerConfig(
profiler="torch",
torch_profiler_dir="/tmp/mock",
delay_iterations=0,
max_iterations=0,
)
def test_immediate_start_stop(default_profiler_config):
"""Test standard start without delay."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
assert profiler._running is True
assert profiler._active is True
assert profiler.start_call_count == 1
profiler.stop()
assert profiler._running is False
assert profiler._active is False
assert profiler.stop_call_count == 1
def test_delayed_start(default_profiler_config):
"""Test that profiler waits for N steps before actually starting."""
default_profiler_config.delay_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
# User requests start
profiler.start()
# Should be active (request accepted) but not running (waiting for delay)
assert profiler._active is True
assert profiler._running is False
assert profiler.start_call_count == 0
# Step 1
profiler.step()
assert profiler._running is False
# Step 2 (Threshold reached)
profiler.step()
assert profiler._running is True
assert profiler.start_call_count == 1
def test_max_iterations(default_profiler_config):
"""Test that profiler stops automatically after max iterations."""
default_profiler_config.max_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
assert profiler._running is True
# Iteration 1
profiler.step() # profiling_count becomes 1
assert profiler._running is True
# Iteration 2
profiler.step() # profiling_count becomes 2
assert profiler._running is True
# Iteration 3 (Exceeds max)
profiler.step() # profiling_count becomes 3
# Should have stopped now
assert profiler._running is False
assert profiler.stop_call_count == 1
def test_delayed_start_and_max_iters(default_profiler_config):
"""Test combined delayed start and max iterations."""
default_profiler_config.delay_iterations = 2
default_profiler_config.max_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
# Step 1
profiler.step()
assert profiler._running is False
assert profiler._active is True
# Step 2 (Starts now)
profiler.step()
assert profiler._profiling_for_iters == 1
assert profiler._running is True
assert profiler._active is True
# Next iteration
profiler.step()
assert profiler._profiling_for_iters == 2
assert profiler._running is True
# Iteration 2 (exceeds max)
profiler.step()
# Should have stopped now
assert profiler._running is False
assert profiler.stop_call_count == 1
def test_idempotency(default_profiler_config):
"""Test that calling start/stop multiple times doesn't break logic."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Double Start
profiler.start()
profiler.start()
assert profiler.start_call_count == 1 # Should only start once
# Double Stop
profiler.stop()
profiler.stop()
assert profiler.stop_call_count == 1 # Should only stop once
def test_step_inactive(default_profiler_config):
"""Test that stepping while inactive does nothing."""
default_profiler_config.delay_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Not started yet
profiler.step()
profiler.step()
# Even though we stepped 2 times, start shouldn't happen because active=False
assert profiler.start_call_count == 0
def test_start_failure(default_profiler_config):
"""Test behavior when the underlying _start method raises exception."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.should_fail_start = True
profiler.start()
# Exception caught in _call_start
assert profiler._running is False # Should not mark as running
assert profiler._active is True # Request is still considered active
assert profiler.start_call_count == 0 # Logic failed inside start
def test_shutdown(default_profiler_config):
"""Test that shutdown calls stop only if running."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Case 1: Not running
profiler.shutdown()
assert profiler.stop_call_count == 0
# Case 2: Running
profiler.start()
profiler.shutdown()
assert profiler.stop_call_count == 1
def test_mixed_delay_and_stop(default_profiler_config):
"""Test manual stop during the delay period."""
default_profiler_config.delay_iterations = 5
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
profiler.step()
profiler.step()
# User cancels before delay finishes
profiler.stop()
assert profiler._active is False
# Further steps should not trigger start
profiler.step()
profiler.step()
profiler.step()
assert profiler.start_call_count == 0
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/test_gpu_input_batch.py | tests/v1/worker/test_gpu_input_batch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import inspect
from collections.abc import Sequence
import numpy as np
import pytest
import torch
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams
from vllm.utils.platform_utils import is_pin_memory_available
from vllm.utils.torch_utils import make_tensor_with_pad
from vllm.v1.pool.metadata import PoolingMetadata
from vllm.v1.sample.logits_processor import LogitsProcessors
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.utils import CpuGpuBuffer
from vllm.v1.worker.block_table import BlockTable, MultiGroupBlockTable
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
VOCAB_SIZE = 1024
NUM_OUTPUT_TOKENS = 20
MAX_PROMPT_SIZE = 100
CUDA_DEVICES = [
f"{current_platform.device_type}:{i}"
for i in range(min(current_platform.device_count(), 2))
]
MAX_NUM_PROMPT_TOKENS = 64
def _compare_objs(obj1, obj2, skip: Sequence = ("logitsprocs", "batch_update_builder")):
attrs = inspect.getmembers(obj1, lambda a: not (inspect.isroutine(a)))
attr_names = set(
[a[0] for a in attrs if not (a[0].startswith("__") and a[0].endswith("__"))]
)
for attr_name in attr_names:
if attr_name in skip:
continue
a = getattr(obj1, attr_name)
b = getattr(obj2, attr_name)
is_same = False
if isinstance(a, torch.Tensor):
if a.numel() == 0 or b.numel() == 0:
is_same = a.numel() == 0 and b.numel() == 0
elif torch.allclose(a, b):
is_same = True
elif isinstance(a, np.ndarray):
if np.allclose(a, b):
is_same = True
elif isinstance(a, MultiGroupBlockTable):
for a_i, b_i in zip(a.block_tables, b.block_tables):
_compare_objs(a_i, b_i)
is_same = True
elif isinstance(a, (BlockTable, SamplingMetadata, PoolingMetadata)):
_compare_objs(a, b)
is_same = True # if we make it here must be same
elif a == b:
is_same = True
elif isinstance(a, CpuGpuBuffer):
is_same = np.allclose(a.np, b.np) and torch.allclose(a.gpu, b.gpu)
assert is_same, (
f"Attribute {attr_name} is different in {obj1} and {obj2}: {a} != {b}"
)
def _remove_requests(
input_batch: InputBatch, batch_size: int, reqs: list[CachedRequestState]
) -> set[str]:
"""
Remove some requests randomly from the batch and returns
set of request removed
"""
num_reqs_to_remove = np.random.randint(0, batch_size)
req_indices_to_remove: set[int] = set()
for _ in range(num_reqs_to_remove):
req_index_to_remove = np.random.randint(0, batch_size)
req_indices_to_remove.add(req_index_to_remove)
req_ids_to_remove: set[str] = set()
for index in req_indices_to_remove:
input_batch.remove_request(reqs[index].req_id)
req_ids_to_remove.add(reqs[index].req_id)
return req_ids_to_remove
def _construct_expected_sampling_metadata(
reqs: list[CachedRequestState],
req_ids_retained: set[int],
req_id_index_in_input_batch: dict[str, int],
device: torch.device,
) -> SamplingMetadata:
"""
Constructs and returns the expected SamplingMetadata for this
batch.
"""
num_reqs = len(req_ids_retained)
output_token_ids: list[list[int]] = [list() for _ in range(num_reqs)]
prompt_token_ids: list[list[int]] = [list() for _ in range(num_reqs)]
presence_penalties = [0.0 for _ in range(num_reqs)]
frequency_penalties = [0.0 for _ in range(num_reqs)]
repetition_penalties = [1.0 for _ in range(num_reqs)]
top_k = [0 for _ in range(num_reqs)]
top_p = [0.0 for _ in range(num_reqs)]
temperature = [0.0 for _ in range(num_reqs)]
min_tokens = {}
logit_bias = [None] * num_reqs
allowed_token_ids_mask = torch.zeros(
num_reqs, VOCAB_SIZE, dtype=torch.bool, device=device
)
bad_words_token_ids = {}
for req in reqs:
if req.req_id not in req_ids_retained:
continue
index_in_input_batch = req_id_index_in_input_batch[req.req_id]
output_token_ids[index_in_input_batch] = req.output_token_ids
prompt_token_ids[index_in_input_batch] = req.prompt_token_ids
presence_penalties[index_in_input_batch] = req.sampling_params.presence_penalty
frequency_penalties[index_in_input_batch] = (
req.sampling_params.frequency_penalty
)
repetition_penalties[index_in_input_batch] = (
req.sampling_params.repetition_penalty
)
top_k[index_in_input_batch] = req.sampling_params.top_k
top_p[index_in_input_batch] = req.sampling_params.top_p
temperature[index_in_input_batch] = req.sampling_params.temperature
min_tokens[index_in_input_batch] = (
req.sampling_params.min_tokens,
req.sampling_params.all_stop_token_ids,
)
logit_bias[index_in_input_batch] = req.sampling_params.logit_bias
if req.sampling_params.allowed_token_ids:
allowed_token_ids_mask[index_in_input_batch][
req.sampling_params.allowed_token_ids
] = True
if req.sampling_params.bad_words_token_ids:
bad_words_token_ids[index_in_input_batch] = (
req.sampling_params.bad_words_token_ids
)
return SamplingMetadata(
temperature=torch.tensor(temperature, dtype=torch.float, device=device),
all_greedy=False,
all_random=True,
top_p=None
if all(x == 1.0 for x in top_p)
else torch.tensor(top_p, dtype=torch.float, device=device),
top_k=None
if all(x == 0 for x in top_k)
else torch.tensor(top_k, dtype=torch.int, device=device),
generators={},
max_num_logprobs=0,
prompt_token_ids=make_tensor_with_pad(
prompt_token_ids,
pad=VOCAB_SIZE,
device=torch.device(device),
dtype=torch.int64,
),
frequency_penalties=torch.tensor(
frequency_penalties, dtype=torch.float, device=device
),
presence_penalties=torch.tensor(
presence_penalties, dtype=torch.float, device=device
),
repetition_penalties=torch.tensor(
repetition_penalties, dtype=torch.float, device=device
),
output_token_ids=output_token_ids,
spec_token_ids=[[] for _ in range(len(output_token_ids))],
no_penalties=(
all(x == 0 for x in presence_penalties)
and all(x == 0 for x in frequency_penalties)
and all(x == 1 for x in repetition_penalties)
),
allowed_token_ids_mask=allowed_token_ids_mask,
bad_words_token_ids=bad_words_token_ids,
logitsprocs=LogitsProcessors(),
)
def _create_sampling_params():
return SamplingParams(
top_k=np.random.randint(1, 10),
top_p=np.random.uniform(0.0, 1.0),
presence_penalty=np.random.uniform(-2.0, 2.0),
repetition_penalty=np.random.uniform(0.0, 2.0),
frequency_penalty=np.random.uniform(-2.0, 2.0),
min_tokens=np.random.randint(1, 10),
stop_token_ids=[
np.random.randint(0, VOCAB_SIZE) for _ in range(np.random.randint(10))
],
logit_bias={0: np.random.uniform(-3.0, 3.0)},
)
def _construct_cached_request_state(req_id_suffix: int):
prompt_token_ids = [
np.random.randint(0, VOCAB_SIZE)
for _ in range(np.random.randint(0, MAX_PROMPT_SIZE))
]
output_token_ids = [
np.random.randint(0, VOCAB_SIZE)
for _ in range(np.random.randint(0, NUM_OUTPUT_TOKENS))
]
return CachedRequestState(
req_id=f"req_id_{req_id_suffix}",
prompt_token_ids=prompt_token_ids,
sampling_params=_create_sampling_params(),
pooling_params=None,
mm_features=[],
block_ids=([],),
generator=None,
num_computed_tokens=len(output_token_ids),
output_token_ids=output_token_ids,
)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@pytest.mark.parametrize("batch_size", [1, 2, 32, 64])
def test_sampling_metadata_in_input_batch(device: str, batch_size: int):
"""
Tests the logic for managing sampling metadata in the InputBatch.
This test involves adding a set of requests to the InputBatch,
followed by removing a subset of them. Afterward, the batch is compacted,
and the `make_sampling_metadata` method is invoked on the batch. The
output of `make_sampling_metadata` is then compared against the expected
results to ensure correctness.
Note: Ignore logits processor logic, which is tested separately
"""
input_batch: InputBatch = InputBatch(
max_num_reqs=batch_size,
max_model_len=1024,
max_num_batched_tokens=1024,
device=torch.device(device),
pin_memory=is_pin_memory_available(),
vocab_size=1024,
block_sizes=[1],
kernel_block_sizes=[1],
)
reqs: list[CachedRequestState] = []
req_id_reqs = {}
req_id_output_token_ids = {}
# Add requests
for req_index in range(batch_size):
req: CachedRequestState = _construct_cached_request_state(req_index)
assigned_req_index = input_batch.add_request(req)
assert req_index == assigned_req_index
reqs.append(req)
req_id_reqs[req.req_id] = req
req_id_output_token_ids[req.req_id] = req.output_token_ids
# Remove some requests
req_ids_to_remove = _remove_requests(input_batch, batch_size, reqs)
req_ids_retained = set(req_id_reqs.keys()) - req_ids_to_remove
# Compact the input batch
input_batch.condense()
# Generate the sampling metadata
sampling_metadata = input_batch._make_sampling_metadata()
# Create expected output.
expected_sampling_metadata = _construct_expected_sampling_metadata(
reqs, req_ids_retained, input_batch.req_id_to_index, device=torch.device(device)
)
def same(t1: torch.Tensor | None, t2: torch.Tensor | None) -> bool:
return (t1 is None and t2 is None) or (
t1 is not None and t2 is not None and torch.allclose(t1, t2)
)
# Assert the actual and expected output.
assert torch.allclose(
expected_sampling_metadata.temperature, sampling_metadata.temperature
)
assert same(expected_sampling_metadata.top_p, sampling_metadata.top_p)
assert same(expected_sampling_metadata.top_k, sampling_metadata.top_k)
assert torch.allclose(
expected_sampling_metadata.frequency_penalties,
sampling_metadata.frequency_penalties,
)
assert torch.allclose(
expected_sampling_metadata.presence_penalties,
sampling_metadata.presence_penalties,
)
assert torch.allclose(
expected_sampling_metadata.repetition_penalties,
sampling_metadata.repetition_penalties,
)
assert torch.allclose(
expected_sampling_metadata.prompt_token_ids, sampling_metadata.prompt_token_ids
)
assert (
expected_sampling_metadata.output_token_ids
== sampling_metadata.output_token_ids
)
assert expected_sampling_metadata.no_penalties == sampling_metadata.no_penalties
if sampling_metadata.allowed_token_ids_mask:
assert torch.allclose(
expected_sampling_metadata.allowed_token_ids_mask,
sampling_metadata.allowed_token_ids_mask,
)
assert (
expected_sampling_metadata.bad_words_token_ids
== sampling_metadata.bad_words_token_ids
)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("swap_list", [((0, 1),)])
def test_swap_states_in_input_batch(device: str, batch_size: int, swap_list: list):
"""
Tests the logic for managing sampling metadata in the InputBatch.
This test involves adding a set of requests to the InputBatch,
followed by removing a subset of them. Afterward, the batch is compacted,
and the `make_sampling_metadata` method is invoked on the batch. The
output of `make_sampling_metadata` is then compared against the expected
results to ensure correctness.
Note: Ignore logits processor logic, which is tested separately
"""
input_batch: InputBatch = InputBatch(
max_num_reqs=batch_size,
max_model_len=1024,
max_num_batched_tokens=1024,
device=torch.device(device),
pin_memory=is_pin_memory_available(),
vocab_size=1024,
block_sizes=[1],
kernel_block_sizes=[1],
)
ref_input_batch: InputBatch = InputBatch(
max_num_reqs=batch_size,
max_model_len=1024,
max_num_batched_tokens=1024,
device=torch.device(device),
pin_memory=is_pin_memory_available(),
vocab_size=1024,
block_sizes=[1],
kernel_block_sizes=[1],
)
reqs: list[CachedRequestState] = []
req_id_reqs = {}
req_id_output_token_ids = {}
# Add requests
for req_index in range(batch_size):
req: CachedRequestState = _construct_cached_request_state(req_index)
assigned_req_index = input_batch.add_request(req)
assert assigned_req_index == req_index
reqs.append(req)
req_id_reqs[req.req_id] = req
req_id_output_token_ids[req.req_id] = req.output_token_ids
reordered_reqs = reqs.copy()
for swap_pair in swap_list:
reordered_reqs[swap_pair[0]], reordered_reqs[swap_pair[1]] = (
reordered_reqs[swap_pair[1]],
reordered_reqs[swap_pair[0]],
)
input_batch.swap_states(swap_pair[0], swap_pair[1])
for req_index in range(batch_size):
req = reordered_reqs[req_index]
assigned_req_index = ref_input_batch.add_request(req)
assert assigned_req_index == req_index
input_batch.refresh_metadata()
ref_input_batch.refresh_metadata()
_compare_objs(input_batch, ref_input_batch)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/worker/__init__.py | tests/v1/worker/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/logits_processors/test_correctness.py | tests/v1/logits_processors/test_correctness.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import random
from collections.abc import Callable
from typing import NamedTuple, TypeAlias
import numpy as np
import pytest
import torch
from tests.utils import create_new_process_for_each_test
from tests.v1.sample.utils import (
LogitsprocsTestFakes,
create_fake_logits,
create_penalty_tensor,
create_prompt_tokens_tensor,
fake_apply_logitsprocs,
fake_update_logitsprocs_state,
)
from vllm.config import VllmConfig
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams
from vllm.utils.platform_utils import is_pin_memory_available
from vllm.v1.sample.logits_processor import (
BatchUpdate,
BatchUpdateBuilder,
LogitBiasLogitsProcessor,
LogitsProcessor,
MinPLogitsProcessor,
MinTokensLogitsProcessor,
MoveDirectionality,
build_logitsprocs,
)
from vllm.v1.sample.metadata import SamplingMetadata
PIN_MEMORY_AVAILABLE = is_pin_memory_available()
MAX_NUM_REQS = 256
VOCAB_SIZE = 1024
NUM_OUTPUT_TOKENS = 20
CUDA_DEVICES = [
f"{current_platform.device_type}:{i}"
for i in range(1 if current_platform.device_count() == 1 else 2)
]
MAX_NUM_PROMPT_TOKENS = 64
MIN_TOKENS_LEN_THRESHOLD = 5
REQS_PER_LOGITPROC = 50
STR_NO_LOGITPROC = "none"
# LogitsProcessor subclass or "none"
LogitprocType: TypeAlias = type[LogitsProcessor] | str
class LogitsProcsRequestParams:
"""Encapsulates key params for a single request in a batch.
Params can be customized based on the enabled logitproc
"""
workload_index: int
logitproc_type: LogitprocType # Logitproc enabled, specified by str id
out_tokens: list[int] # Output tokens required for min tokens test
prompt_tokens: list[int] # Dummy prompt tokens placeholder
params: SamplingParams # Settings customized for logitproc
def __init__(self, workload_index: int, logitproc_type: LogitprocType):
self.workload_index = workload_index
self.logitproc_type = logitproc_type
# Number of output tokens is randomly 0 or twice the min-tokens
# threshold which will be used in testing. Output token values
# don't matter *for these tests* so use 0 as a dummy value
self.out_tokens = [0] * (MIN_TOKENS_LEN_THRESHOLD * random.randint(0, 2))
self.prompt_tokens = []
self.params = _sampling_params_from_logitproc(logitproc_type)
def __str__(self):
"""For debugging"""
summ = ", ".join(f"{k}={v}" for k, v in vars(self).items())
return f"MyClass({summ})"
def _generate_fake_sampling_metadata(
num_output_tokens: int,
batch_size: int,
vocab_size: int,
device: torch.device,
) -> SamplingMetadata:
"""Generate fake sampling metadata with fake logitsprocs"""
output_token_ids: list[list[int]] = []
prompt_token_ids: list[list[int]] = []
for _ in range(batch_size):
output_token_ids.append(
np.random.randint(0, vocab_size, size=num_output_tokens).tolist()
)
prompt_token_ids.append(
np.random.randint(
0, vocab_size, size=np.random.randint(1, MAX_NUM_PROMPT_TOKENS)
).tolist()
)
logitsprocs = build_logitsprocs(
vllm_config=VllmConfig(),
device=device,
is_pin_memory=PIN_MEMORY_AVAILABLE,
is_pooling_model=False,
)
fake_sampling_metadata = SamplingMetadata(
temperature=torch.full((batch_size,), 0.0),
all_greedy=True,
all_random=False,
top_p=None,
top_k=None,
generators={},
max_num_logprobs=0,
prompt_token_ids=create_prompt_tokens_tensor(
prompt_token_ids, vocab_size, device
),
output_token_ids=output_token_ids,
frequency_penalties=create_penalty_tensor(batch_size, 0.0, device),
presence_penalties=create_penalty_tensor(batch_size, 0.0, device),
repetition_penalties=create_penalty_tensor(batch_size, 1.0, device),
no_penalties=True,
allowed_token_ids_mask=None,
bad_words_token_ids={},
logitsprocs=logitsprocs,
)
return fake_sampling_metadata
def _generate_test_fakes(batch_size: int, device: str) -> LogitsprocsTestFakes:
"""Generate fake logits and sampling metadata"""
fake_logits = create_fake_logits(batch_size, VOCAB_SIZE)
# Create one dominant token per batch, to support min-p test
for i in range(batch_size):
fake_logits[i, 0] = 10.0 # High logit for first token
fake_logits[i, 1:] = 1e-2 # Others remain low
sampling_metadata = _generate_fake_sampling_metadata(
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device)
)
return LogitsprocsTestFakes(
logits=fake_logits,
sampling_metadata=sampling_metadata,
)
def _sampling_params_from_logitproc(logitproc_type: LogitprocType) -> SamplingParams:
"""Customize request SamplingParams for a specified logitproc"""
# SamplingParams for req with no logitproc
kwargs = {"min_p": 0.0, "logit_bias": None, "min_tokens": 0}
if fxn := logitsprocs_test_mapping[logitproc_type].gen_request_fxn:
fxn(kwargs)
return SamplingParams(**kwargs)
def _generate_mixed_logitsprocs_batch_params(
reqs_per_logitproc: int,
logitsprocs_types: list[str],
) -> list[LogitsProcsRequestParams]:
"""Define key params for a batch of requests with a different
logitproc enabled per request.
The batch will have `reqs_per_logitproc` repeats for all
`logitsprocs_types` under test, including the case where
no logitsproc is enabled. The batch is randomly shuffled. The
size of the batch is `reqs_per_logitproc` times
`n = len(logitsprocs_types)`
Args:
reqs_per_logitproc: number of requests using each logitproc
logitsprocs_types: logitsprocs under test
Returns:
List of per-request params which configure the engine for that request's
enabled logitproc
"""
batch_size = len(logitsprocs_types) * reqs_per_logitproc
# Generate multiple repeats of key params for each logitproc;
# apply random inverse permutation to the iteration
# over logitsprocs, such that logitsprocs are shuffled.
batch_perm = random.sample(range(batch_size), k=batch_size)
return [
LogitsProcsRequestParams(
workload_index=idx,
logitproc_type=logitsprocs_types[pdx // reqs_per_logitproc],
)
for idx, pdx in enumerate(batch_perm)
]
def _raise_error_invalid(
msg_suffix: str,
batch_index: int,
request_params: LogitsProcsRequestParams,
step_idx: int,
err_cls: type[Exception] = ValueError,
) -> None:
raise err_cls(
f"Validation failed for step={step_idx}, "
f"batch_index={batch_index}, "
f"workload_index={request_params.workload_index}, "
f"req_params={request_params}. Reason: {msg_suffix}"
)
def _logit_bias_params(kwargs: dict) -> None:
"""Logit bias config"""
kwargs["logit_bias"] = {
random.randint(0, VOCAB_SIZE - 1): random.choice([-0.1, 0.2])
}
def _logit_bias_validate(
test_fakes: LogitsprocsTestFakes,
persistent_batch: list[LogitsProcsRequestParams],
logits_new: torch.Tensor,
batch_index: int,
request_params: LogitsProcsRequestParams,
step_idx: int,
) -> None:
"""Validate logit bias logitproc applied correctly"""
logit_bias = request_params.params.logit_bias
logits_old = test_fakes.logits[persistent_batch[batch_index].workload_index].cpu()
logits_new = logits_new[batch_index].cpu()
for token_id in range(VOCAB_SIZE):
logit_old_value = logits_old[token_id]
logit_new_value = logits_new[token_id]
if token_id in logit_bias:
bias_value = logit_bias[token_id]
exp_value = bias_value + logit_old_value
if logit_new_value != pytest.approx(exp_value):
_raise_error_invalid(
msg_suffix=(
f"Biased token {token_id} logit value {logit_new_value} "
f"does not match expected value {exp_value} "
f"given bias {bias_value}"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
else:
if logit_new_value != pytest.approx(logit_old_value):
_raise_error_invalid(
msg_suffix=(
f"Unbiased token {token_id} logit value {logit_new_value} "
f"does not match expected value {logit_old_value}"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
def _min_p_params(kwargs: dict) -> None:
"""Min-p logitproc config"""
kwargs["min_p"] = 0.1
def _min_p_validate(
test_fakes: LogitsprocsTestFakes,
persistent_batch: list[LogitsProcsRequestParams],
logits_new: torch.Tensor,
batch_index: int,
request_params: LogitsProcsRequestParams,
step_idx: int,
) -> None:
"""Validate min-p logitproc applied correctly"""
for token_id in range(VOCAB_SIZE):
logits_for_token = logits_new[batch_index][token_id]
if token_id == 0:
# Dominant token should always be unmasked
if logits_for_token == -float("inf"):
_raise_error_invalid(
msg_suffix="Invalid: dominant token 0 masked (-inf)",
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
else:
if request_params.params.min_p > 0.0:
# Non-dominant tokens should be masked when min_p > 0
if logits_for_token != -float("inf"):
_raise_error_invalid(
msg_suffix=f"Invalid: non-dominant token {token_id} not masked",
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
else:
# No masking when min_p is 0
if logits_for_token == -float("inf"):
_raise_error_invalid(
msg_suffix=f"Invalid: token {token_id} masked when min_p=0.0",
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
def _min_tokens_params(kwargs: dict) -> None:
"""Min-tokens logitproc config"""
kwargs["min_tokens"] = MIN_TOKENS_LEN_THRESHOLD
kwargs["stop_token_ids"] = [
np.random.randint(0, VOCAB_SIZE - 1)
for _ in range(np.random.randint(0, VOCAB_SIZE))
]
def _min_tokens_validate(
test_fakes: LogitsprocsTestFakes,
persistent_batch: list[LogitsProcsRequestParams],
logits_new: torch.Tensor,
batch_index: int,
request_params: LogitsProcsRequestParams,
step_idx: int,
) -> None:
"""Validate min-tokens logitsproc applied correctly"""
ref_num_out_tokens = len(request_params.out_tokens)
min_reached = ref_num_out_tokens >= MIN_TOKENS_LEN_THRESHOLD
ref_all_stop_token_ids = request_params.params.all_stop_token_ids
mt_lp: MinTokensLogitsProcessor = next(
test_fakes.get_logitsprocs_by_cls(MinTokensLogitsProcessor)
)
assert isinstance(mt_lp, MinTokensLogitsProcessor)
min_tok = mt_lp.min_toks.get(batch_index, None)
# Validate min-token logits processor state
if min_tok:
(_, out_tok, all_stop_token_ids) = min_tok
num_out_tokens = len(out_tok)
if num_out_tokens != ref_num_out_tokens:
_raise_error_invalid(
msg_suffix=(
"Number of output tokens in min-token logit processor "
f"request metadata ({num_out_tokens}) does not match "
f"reference ({ref_num_out_tokens})."
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
if ref_all_stop_token_ids != all_stop_token_ids:
_raise_error_invalid(
msg_suffix=(
"Stop token ids do not match reference; all_stop_token_ids: "
f"{sorted(all_stop_token_ids)}, ref_all_stop_token_ids: "
f"{sorted(ref_all_stop_token_ids)}"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
if min_reached:
_raise_error_invalid(
msg_suffix=(
"Expected min-tokens request with min reached, but batch "
"index is recognized by min-tokens logits processor."
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
err_cls=RuntimeError,
)
elif not min_reached:
_raise_error_invalid(
msg_suffix=(
"Expected min-tokens request with min not reached, but batch "
"index is not recognized by min-tokens logits processor."
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
err_cls=RuntimeError,
)
# Validate min-token logits
for token_id in range(VOCAB_SIZE):
logits_for_token = logits_new[batch_index][token_id]
if token_id in ref_all_stop_token_ids and not min_reached:
if logits_for_token != -float("inf"):
_raise_error_invalid(
msg_suffix=(
f"Token {token_id} is a stop token and "
"the sequence has not reached min length, "
"but the token is not masked "
f"(logit={logits_for_token})"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
else:
if logits_for_token == -float("inf"):
_raise_error_invalid(
msg_suffix=(
f"Token {token_id} should not be masked but "
f"is (output len={ref_num_out_tokens})"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
def _none_validate(
test_fakes: LogitsprocsTestFakes,
persistent_batch: list[LogitsProcsRequestParams],
logits_new: torch.Tensor,
batch_index: int,
request_params: LogitsProcsRequestParams,
step_idx: int,
) -> None:
"""Validate that no logits processors are applied"""
logits = test_fakes.logits[persistent_batch[batch_index].workload_index].cpu()
ref_logits = logits_new[batch_index]
if not torch.all(ref_logits == logits):
mismatch_toks = (ref_logits != logits).nonzero(as_tuple=True)[0].tolist()
mismatch_strs = []
for token in mismatch_toks:
val = float(logits[token])
ref_val = float(ref_logits[token])
mismatch_strs.append(f"({token=},{val=},{ref_val=})")
_raise_error_invalid(
msg_suffix=(
f"Unexpected modification of logits: {','.join(mismatch_strs)}"
),
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
class LogitsprocTestHelpers(NamedTuple):
"""Supports setting up and validating logitsprocs unit tests."""
eval_fxn: Callable
gen_request_fxn: Callable | None = None
logitsprocs_test_mapping = {
STR_NO_LOGITPROC: LogitsprocTestHelpers(eval_fxn=_none_validate),
LogitBiasLogitsProcessor: LogitsprocTestHelpers(
gen_request_fxn=_logit_bias_params, eval_fxn=_logit_bias_validate
),
MinPLogitsProcessor: LogitsprocTestHelpers(
gen_request_fxn=_min_p_params, eval_fxn=_min_p_validate
),
MinTokensLogitsProcessor: LogitsprocTestHelpers(
gen_request_fxn=_min_tokens_params, eval_fxn=_min_tokens_validate
),
}
def _get_test_cases() -> list[list[str]]:
"""Each test case is a set of logitsprocs"""
logitsprocs_types = list(logitsprocs_test_mapping.keys())
return (
[[STR_NO_LOGITPROC]]
+ [
[logitproc_type, STR_NO_LOGITPROC]
for logitproc_type in logitsprocs_types
if logitproc_type != STR_NO_LOGITPROC
]
+ [logitsprocs_types]
)
def _generate_fake_step_update(
persistent_batch: list[LogitsProcsRequestParams],
workload_params: list[LogitsProcsRequestParams],
wdx: int,
batch_update_builder: BatchUpdateBuilder,
) -> tuple[BatchUpdate | None, int, int]:
batch_size = len(persistent_batch)
workload_size = len(workload_params)
workload_reqs_remaining = workload_size - wdx
max_add_remove_per_step = max(1, int(0.2 * workload_size))
# 50% of steps: add no reqs
# Other 50%: add a limited number of reqs (less than the number
# of workload reqs remaining, less than an arbitrary max)
# If no workload reqs remain: 100% of steps have 0 adds
num_step_add = (
random.choice(
[
0,
random.randint(
1, min(max_add_remove_per_step, workload_reqs_remaining)
),
]
)
if workload_reqs_remaining
else 0
)
# 50% of steps: remove no requests
# Other 50%: remove a limited number of reqs (less than the number
# persistent batch reqs remaining, less than an arbitrary max)
# If persistent batch is empty: 100% of steps have 0 removals until
# more requests are added. Assume that removed requests are always
# drawn from the current batch, before new adds
num_step_remove = (
random.choice([0, random.randint(1, min(max_add_remove_per_step, batch_size))])
if batch_size
else 0
)
num_step_add_replace = min(num_step_add, num_step_remove)
# Generate fake removed request indices drawn from persistent batch indices
for removal in random.sample(range(batch_size), num_step_remove):
batch_update_builder.removed_append(removal)
# Get added requests from workload
for add_req_params in workload_params[wdx : (wdx + num_step_add_replace)]:
# Replace as many removed requests as possible with added requests
add_remove_idx = batch_update_builder.pop_removed()
batch_update_builder.added.append(
(
add_remove_idx,
add_req_params.params,
add_req_params.prompt_tokens,
add_req_params.out_tokens,
)
)
persistent_batch[add_remove_idx] = add_req_params
# Append remaining added requests to end of batch
add_reqs_append = workload_params[
(wdx + num_step_add_replace) : (wdx + num_step_add)
]
batch_update_builder.added.extend(
[
(
adx + batch_size,
add_req_params.params,
add_req_params.prompt_tokens,
add_req_params.out_tokens,
)
for adx, add_req_params in enumerate(add_reqs_append)
]
)
persistent_batch.extend(add_reqs_append)
pre_condense_batch_size = len(persistent_batch)
wdx += num_step_add # Update workload offset
# Simulate condensing persistent batch
last_nonempty_index = pre_condense_batch_size - 1
condensed_to_idxs = set()
while batch_update_builder.removed:
if (
last_nonempty_index in batch_update_builder.removed
or last_nonempty_index in condensed_to_idxs
):
last_nonempty_index -= 1
continue
# last_nonempty_index is the highest persistent batch index that was
# not removed
first_empty_index = batch_update_builder.peek_removed()
assert first_empty_index is not None
if first_empty_index > last_nonempty_index:
break
# first_empty_index is the lowest removed persistent batch index
# that is less than last_nonempty_index
#
# move last_nonempty_index -> first_empty_index
batch_update_builder.pop_removed()
condensed_to_idxs.add(first_empty_index)
persistent_batch[first_empty_index] = persistent_batch[last_nonempty_index]
batch_update_builder.moved.append(
(last_nonempty_index, first_empty_index, MoveDirectionality.UNIDIRECTIONAL)
)
last_nonempty_index -= 1
# Now removed requests & gaps left by non-removed requests that got
# moved downward are grouped consecutively in the upper indices of
# the persistent batch. Truncate them to get condensed persistent batch
condensed_batch_size = batch_size + num_step_add - num_step_remove
persistent_batch[:] = persistent_batch[0:condensed_batch_size]
if condensed_batch_size > 1:
# Simulate arbitrary batch ordering in the kernel backend
# Generate a random number k of non-overlapping swap tuples
k = random.randint(0, condensed_batch_size // 2)
idxs = list(range(condensed_batch_size))
random.shuffle(idxs)
swaps = [tuple(sorted([idxs[2 * i], idxs[2 * i + 1]])) for i in range(k)]
batch_update_builder.moved.extend(
[(sw[0], sw[1], MoveDirectionality.SWAP) for sw in swaps]
)
for adx, bdx in swaps:
persistent_batch[adx], persistent_batch[bdx] = (
persistent_batch[bdx],
persistent_batch[adx],
)
return (
batch_update_builder.get_and_reset(condensed_batch_size),
wdx,
workload_size - wdx,
)
def _assert_valid(
batch_size: int,
persistent_batch: list[LogitsProcsRequestParams],
test_fakes: LogitsprocsTestFakes,
slice_idxs: list[int],
logits_w_lp: torch.Tensor,
step_idx: int,
) -> None:
if not slice_idxs:
# Trivial case of empty persistent batch
assert len(persistent_batch) == 0
if logits_w_lp.shape[0] != 0:
raise ValueError(
"Fake persistent batch is empty but logitsprocs "
f"output batch has shape {logits_w_lp.shape}"
)
return
# Validate logits for each fake request
for batch_index in range(batch_size):
request_params = persistent_batch[batch_index]
# Invoke the appropriate validation function for
# the logitproc employed by this request
fxn = logitsprocs_test_mapping[request_params.logitproc_type].eval_fxn
fxn(
test_fakes=test_fakes,
persistent_batch=persistent_batch,
logits_new=logits_w_lp,
batch_index=batch_index,
request_params=request_params,
step_idx=step_idx,
)
@create_new_process_for_each_test()
@pytest.mark.parametrize("device", CUDA_DEVICES)
@pytest.mark.parametrize("reqs_per_logitproc", [REQS_PER_LOGITPROC])
@pytest.mark.parametrize("logitsprocs_under_test", _get_test_cases())
def test_logitsprocs(
device: str, reqs_per_logitproc: int, logitsprocs_under_test: list[str]
):
random.seed(40)
torch.set_default_device(device)
# Define a shuffled batch of requests which individually use a different
# logitproc, or no logitproc at all
workload_params = _generate_mixed_logitsprocs_batch_params(
reqs_per_logitproc=reqs_per_logitproc, logitsprocs_types=logitsprocs_under_test
)
workload_size = len(workload_params)
# Create fake test data structures for testing.
test_fakes = _generate_test_fakes(workload_size, device)
wdx = 0 # Next request index in workload to add
persistent_batch: list[
LogitsProcsRequestParams
] = [] # Persistent batch state, as list of workload indices
# Generate fake removed request indices from current persistent
# batch before adds
batch_update_builder = BatchUpdateBuilder()
# Break when entire workload has been added previously and persistent
# batch is empty
workload_reqs_remaining = workload_size
batch_size = 0
step_idx = 0
while True:
if not (workload_reqs_remaining or batch_size):
break
(
batch_update,
wdx,
workload_reqs_remaining,
) = _generate_fake_step_update(
persistent_batch=persistent_batch,
workload_params=workload_params,
wdx=wdx,
batch_update_builder=batch_update_builder,
)
batch_size = len(persistent_batch)
# Apply fake batch update to logitsprocs
fake_update_logitsprocs_state(test_fakes, batch_update)
# Emulate application of logits processors in engine
slice_idxs = [req.workload_index for req in persistent_batch]
logits_w_lp = fake_apply_logitsprocs(test_fakes, slice_idxs).cpu()
_assert_valid(
batch_size=batch_size,
persistent_batch=persistent_batch,
test_fakes=test_fakes,
slice_idxs=slice_idxs,
logits_w_lp=logits_w_lp,
step_idx=step_idx,
)
step_idx += 1
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/logits_processors/utils.py | tests/v1/logits_processors/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import types
from enum import Enum, auto
from typing import Any
import torch
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.sampling_params import SamplingParams
from vllm.v1.sample.logits_processor import (
LOGITSPROCS_GROUP,
AdapterLogitsProcessor,
BatchUpdate,
LogitsProcessor,
RequestLogitsProcessor,
)
from vllm.v1.sample.logits_processor.builtin import process_dict_updates
logger = init_logger(__name__)
MODEL_NAME = "facebook/opt-125m"
POOLING_MODEL_NAME = "BAAI/bge-base-en-v1.5"
DUMMY_LOGITPROC_ARG = "target_token"
TEMP_GREEDY = 0.0
MAX_TOKENS = 20
DUMMY_LOGITPROC_ENTRYPOINT = "dummy_logitproc"
DUMMY_LOGITPROC_MODULE = "tests.v1.logits_processors.utils"
DUMMY_LOGITPROC_FQCN = f"{DUMMY_LOGITPROC_MODULE}:DummyLogitsProcessor"
class CustomLogitprocSource(Enum):
"""How to source a logitproc for testing purposes"""
LOGITPROC_SOURCE_NONE = auto() # No custom logitproc
LOGITPROC_SOURCE_ENTRYPOINT = auto() # Via entrypoint
LOGITPROC_SOURCE_FQCN = auto() # Via fully-qualified class name (FQCN)
LOGITPROC_SOURCE_CLASS = auto() # Via provided class object
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
class DummyLogitsProcessor(LogitsProcessor):
"""Fake logit processor to support unit testing and examples"""
@classmethod
def validate_params(cls, params: SamplingParams):
target_token: int | None = params.extra_args and params.extra_args.get(
"target_token"
)
if target_token is not None and not isinstance(target_token, int):
raise ValueError(
f"target_token value {target_token} {type(target_token)} is not int"
)
def __init__(
self, vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool
):
self.req_info: dict[int, int] = {}
def is_argmax_invariant(self) -> bool:
"""Never impacts greedy sampling"""
return False
def update_state(self, batch_update: BatchUpdate | None):
def extract_extra_arg(params: SamplingParams) -> int | None:
self.validate_params(params)
return params.extra_args and params.extra_args.get("target_token")
process_dict_updates(
self.req_info,
batch_update,
lambda params, _, __: extract_extra_arg(params),
)
def apply(self, logits: torch.Tensor) -> torch.Tensor:
if not self.req_info:
return logits
# Save target values before modification
cols = torch.tensor(
list(self.req_info.values()), dtype=torch.long, device=logits.device
)
rows = torch.tensor(
list(self.req_info.keys()), dtype=torch.long, device=logits.device
)
values_to_keep = logits[rows, cols].clone()
# Mask all but target tokens
logits[rows] = float("-inf")
logits[rows, cols] = values_to_keep
return logits
"""Dummy module with dummy logitproc class"""
dummy_module = types.ModuleType(DUMMY_LOGITPROC_MODULE)
dummy_module.DummyLogitsProcessor = DummyLogitsProcessor # type: ignore
class EntryPoint:
"""Dummy entrypoint class for logitsprocs testing"""
def __init__(self):
self.name = DUMMY_LOGITPROC_ENTRYPOINT
self.value = DUMMY_LOGITPROC_FQCN
def load(self):
return DummyLogitsProcessor
class EntryPoints(list):
"""Dummy EntryPoints class for logitsprocs testing"""
def __init__(self, group: str):
# Emulate list-like functionality
eps = [EntryPoint()] if group == LOGITSPROCS_GROUP else []
super().__init__(eps)
# Extra attributes
self.names = [ep.name for ep in eps]
class DummyPerReqLogitsProcessor:
"""The request-level logits processor masks out all logits except the
token id identified by `target_token`"""
def __init__(self, target_token: int) -> None:
"""Specify `target_token`"""
self.target_token = target_token
def __call__(
self,
output_ids: list[int],
logits: torch.Tensor,
) -> torch.Tensor:
val_to_keep = logits[self.target_token].item()
logits[:] = float("-inf")
logits[self.target_token] = val_to_keep
return logits
class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor):
"""Example of wrapping a fake request-level logit processor to create a
batch-level logits processor"""
def is_argmax_invariant(self) -> bool:
return False
def new_req_logits_processor(
self,
params: SamplingParams,
) -> RequestLogitsProcessor | None:
"""This method returns a new request-level logits processor, customized
to the `target_token` value associated with a particular request.
Returns None if the logits processor should not be applied to the
particular request. To use the logits processor the request must have
a "target_token" custom argument with an integer value.
Args:
params: per-request sampling params
Returns:
`Callable` request logits processor, or None
"""
target_token: Any | None = params.extra_args and params.extra_args.get(
"target_token"
)
if target_token is None:
return None
if not isinstance(target_token, int):
logger.warning(
"target_token value %s is not int; not applying logits"
" processor to request.",
target_token,
)
return None
return DummyPerReqLogitsProcessor(target_token)
"""Fake version of importlib.metadata.entry_points"""
entry_points = lambda group: EntryPoints(group)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/logits_processors/test_custom_offline.py | tests/v1/logits_processors/test_custom_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import random
from typing import Any
import pytest
from tests.utils import create_new_process_for_each_test
from tests.v1.logits_processors.utils import (
DUMMY_LOGITPROC_ARG,
DUMMY_LOGITPROC_FQCN,
MAX_TOKENS,
MODEL_NAME,
POOLING_MODEL_NAME,
TEMP_GREEDY,
CustomLogitprocSource,
DummyLogitsProcessor,
WrappedPerReqLogitsProcessor,
prompts,
)
from tests.v1.logits_processors.utils import entry_points as fake_entry_points
from vllm import LLM, SamplingParams
from vllm.v1.sample.logits_processor import (
STR_POOLING_REJECTS_LOGITSPROCS,
STR_SPEC_DEC_REJECTS_LOGITSPROCS,
LogitsProcessor,
)
# Create a mixture of requests which do and don't utilize the dummy logitproc
sampling_params_list = [
SamplingParams(
temperature=TEMP_GREEDY,
max_tokens=MAX_TOKENS,
extra_args={DUMMY_LOGITPROC_ARG: 128},
),
SamplingParams(temperature=TEMP_GREEDY, max_tokens=MAX_TOKENS),
SamplingParams(
temperature=TEMP_GREEDY,
max_tokens=MAX_TOKENS,
extra_args={DUMMY_LOGITPROC_ARG: 67},
),
SamplingParams(temperature=TEMP_GREEDY, max_tokens=MAX_TOKENS),
]
def _run_test(kwargs: dict, logitproc_loaded: bool) -> None:
"""Compare `LLM` instance initialized with specified `kwargs` against
reference `LLM` instance.
Two scenarios:
1. Server has loaded dummy logitproc; test that requests which specify
dummy logitproc arg value behave as if logitproc is operating (output
token value should repeat), while requests that don't specify dummy
logitproc arg value should match reference `LLM` output.
2. Server has *not* loaded dummy logitproc; test that all requests
behave as if logitproc is *not* operating (output matches reference
`LLM` output.)
Args:
kwargs: `LLM` constructor kwargs
logitproc_loaded: server has loaded dummy logitproc if True
"""
# Create a vLLM instance and load custom logitproc
llm_logitproc = LLM(
model=MODEL_NAME,
gpu_memory_utilization=0.1,
**kwargs,
)
# Create a reference vLLM instance without custom logitproc
llm_ref = LLM(model=MODEL_NAME, gpu_memory_utilization=0.1)
# Run inference with logitproc loaded
outputs_logitproc = llm_logitproc.generate(prompts, sampling_params_list)
# Reference run
outputs_ref = llm_ref.generate(prompts, sampling_params_list)
# Validate outputs
for bdx, (out_lp, out_ref, params) in enumerate(
zip(outputs_logitproc, outputs_ref, sampling_params_list)
):
lp_toks = out_lp.outputs[0].token_ids
if logitproc_loaded and params.extra_args:
# This request exercises custom logitproc; validate that logitproc
# forces `target_token` to be decoded in each step
target_token = params.extra_args[DUMMY_LOGITPROC_ARG]
if not all(x == target_token for x in lp_toks):
raise AssertionError(
f"Request {bdx} generated {lp_toks}, should all be {target_token}"
)
else:
# This request does not exercise custom logitproc (or custom
# logitproc is not enabled on this server); validate against
# reference result
ref_toks = out_ref.outputs[0].token_ids
if lp_toks != ref_toks:
raise AssertionError(
f"Request {bdx} generated {lp_toks}, should match {ref_toks}"
)
@create_new_process_for_each_test()
@pytest.mark.parametrize("logitproc_source", list(CustomLogitprocSource))
def test_custom_logitsprocs(monkeypatch, logitproc_source: CustomLogitprocSource):
"""Test offline Python interface for passing custom logitsprocs
Construct an `LLM` instance which loads a custom logitproc that has a
well-defined behavior (mask out all tokens except one `target_token`)
Construct a reference `LLM` instance with no custom logitproc
Pass in a batch of requests, 50% of which pass a `target_token` value
in through `SamplingParams.extra_args`, 50% of which do not.
Validate that
* Requests which do not activate the custom logitproc, yield the same
results for both `LLM` instances
* Requests which activate the custom logitproc, only output `target_token`
Test four scenarios, corresponding to `logitproc_source` value
* No logitsprocs loaded - test that generated tokens match reference `LLM`
instance output
* Logitproc passed in via {entrypoint, class object, fully-qualified class
name (FQCN)} - test that dummy logitproc is utilized correctly when
provided via any of these three possible sources
Args:
monkeypatch: for setting env vars
logitproc_source: what source (entrypoint, fully-qualified class name
(FQCN), class object, or None) the user pulls the
logitproc from
"""
# Test that logitproc info is passed to workers
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1")
random.seed(40)
# Choose LLM args based on logitproc source
if logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_NONE:
# Scenario: the server does not load any custom logitproc
# Every other scenario is a different way of loading a custom logitproc
_run_test({}, logitproc_loaded=False)
return
if logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_ENTRYPOINT:
# Scenario: vLLM loads a logitproc from a preconfigured entrypoint
# To that end, mock a dummy logitproc entrypoint
import importlib.metadata
importlib.metadata.entry_points = fake_entry_points # type: ignore
# fork is required for workers to see entrypoint patch
monkeypatch.setenv("VLLM_WORKER_MULTIPROC_METHOD", "fork")
_run_test({}, logitproc_loaded=True)
return
kwargs: dict[str, list[str | type[LogitsProcessor]]] = {}
if logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_FQCN:
# Scenario: load logitproc based on fully-qualified class name (FQCN)
kwargs["logits_processors"] = [DUMMY_LOGITPROC_FQCN]
elif logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_CLASS:
# Scenario: load logitproc from provided class object
kwargs["logits_processors"] = [DummyLogitsProcessor]
_run_test(kwargs, logitproc_loaded=True)
@create_new_process_for_each_test()
def test_custom_logitsprocs_req(monkeypatch):
"""Test passing request-level logits processor to offline Python interface
Wrap a request-level logits processor to create a batch level logits
processor that has a well-defined behavior (mask out all tokens except one
`target_token`)
Construct an `LLM` instance which loads the wrapped logits processor. Pass
the custom logitproc as a class object.
Construct a reference `LLM` instance with no custom logitproc
Pass in a batch of requests, 50% of which pass a `target_token` value
in through `SamplingParams.extra_args`, 50% of which do not.
Validate that
* Requests which do not activate the custom logitproc, yield the same
results for both `LLM` instances
* Requests which activate the custom logitproc, only output `target_token`
Args:
monkeypatch: for setting env vars
"""
# Test that logitproc info is passed to workers
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1")
random.seed(40)
_run_test(
{"logits_processors": [WrappedPerReqLogitsProcessor]}, logitproc_loaded=True
)
@create_new_process_for_each_test()
@pytest.mark.parametrize("model_scenario", ["pooling", "spec_dec"])
@pytest.mark.parametrize(
"logitproc_source",
[
CustomLogitprocSource.LOGITPROC_SOURCE_ENTRYPOINT,
CustomLogitprocSource.LOGITPROC_SOURCE_FQCN,
CustomLogitprocSource.LOGITPROC_SOURCE_CLASS,
],
)
def test_rejects_custom_logitsprocs(
monkeypatch, model_scenario: str, logitproc_source: CustomLogitprocSource
):
"""Validate that vLLM engine initialization properly rejects custom
logitsprocs when the model is a pooling model or speculative decoding
enabled.
Use `LLM` entrypoint. We expect `LLM` initialization to fail before the
logitproc is actually loaded.
Scenario 1:
* Mock a logitproc entrypoint
* Validate that `LLM` does not load the logitproc
Scenario 2:
* Pass custom logitproc to `LLM` constructor
* Scenario 2a: via FQCN
* Scenario 2b: via class object
* Validate that initialization fails with appropriate exception
Args:
monkeypatch: used to set environment variables
logitproc_source: what source (entrypoint, fully-qualified class name
(FQCN), or class object) the user pulls the
logitproc from
"""
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
random.seed(40)
test_params: dict[str, dict[str, Any]] = {
"pooling": {
"runner": "pooling",
"model": POOLING_MODEL_NAME,
"error_message": STR_POOLING_REJECTS_LOGITSPROCS,
"speculative_config": None,
},
"spec_dec": {
"runner": "auto",
"model": MODEL_NAME,
"error_message": STR_SPEC_DEC_REJECTS_LOGITSPROCS,
"speculative_config": {"model": "ngram", "num_speculative_tokens": 1},
},
}
config = test_params[model_scenario]
llm_kwargs: dict[str, Any] = {
"runner": config["runner"],
"model": config["model"],
"gpu_memory_utilization": 0.1,
"speculative_config": config["speculative_config"],
}
if logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_ENTRYPOINT:
# Scenario: vLLM loads a model and ignores a logitproc that is
# available at a preconfigured entrypoint
# Patch in dummy logitproc entrypoint
import importlib.metadata
importlib.metadata.entry_points = fake_entry_points # type: ignore
# fork is required for entrypoint patch to be visible to workers,
# although they should ignore the entrypoint patch anyway
monkeypatch.setenv("VLLM_WORKER_MULTIPROC_METHOD", "fork")
llm = LLM(**llm_kwargs)
# Require that no logitsprocs have been loaded
worker = llm.llm_engine.model_executor.driver_worker.worker
assert sum([1 for _ in worker.model_runner.input_batch.logitsprocs.all]) == 0
return
if logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_FQCN:
# Scenario: load logitproc based on fully-qualified class name (FQCN)
llm_kwargs["logits_processors"] = [DUMMY_LOGITPROC_FQCN]
elif logitproc_source == CustomLogitprocSource.LOGITPROC_SOURCE_CLASS:
# Scenario: load logitproc from provided class object
llm_kwargs["logits_processors"] = [DummyLogitsProcessor]
with pytest.raises(ValueError, match=config["error_message"]):
# Require that loading a model alongside the logitproc raises
# the appropriate exception.
LLM(**llm_kwargs)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/logits_processors/__init__.py | tests/v1/logits_processors/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/logits_processors/test_custom_online.py | tests/v1/logits_processors/test_custom_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import random
import sys
from typing import Any
import openai
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServerCustom, create_new_process_for_each_test
from tests.v1.logits_processors.utils import (
DUMMY_LOGITPROC_ARG,
DUMMY_LOGITPROC_FQCN,
MAX_TOKENS,
MODEL_NAME,
TEMP_GREEDY,
prompts,
)
from tests.v1.logits_processors.utils import entry_points as fake_entry_points
def _server_with_logitproc_entrypoint(
env_dict: dict[str, str] | None,
model: str,
vllm_serve_args: list[str],
) -> None:
"""Start vLLM server, inject dummy logitproc entrypoint"""
# Patch `entry_points` to inject logitproc entrypoint
import importlib.metadata
importlib.metadata.entry_points = fake_entry_points # type: ignore
from vllm.entrypoints.cli import main
# fork is required for workers to see entrypoint patch
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "fork"
if env_dict is not None:
os.environ.update(env_dict)
# Emulate `vllm serve <model> <CLI args>`
sys.argv = ["vllm", "serve", model] + vllm_serve_args
main.main()
def _server_with_logitproc_fqcn(
env_dict: dict[str, str] | None,
model: str,
vllm_serve_args: list[str],
) -> None:
"""Start vLLM server, inject module with dummy logitproc"""
from vllm.entrypoints.cli import main
if env_dict is not None:
os.environ.update(env_dict)
# Emulate `vllm serve <model> <CLI args>`
sys.argv = ["vllm", "serve", model] + vllm_serve_args
main.main()
@pytest.fixture(scope="module")
def default_server_args():
return [
# use half precision for speed and memory savings in CI environment
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--max-num-seqs",
"128",
]
@pytest.fixture(
scope="function", params=[[], ["--logits-processors", DUMMY_LOGITPROC_FQCN]]
)
def server(default_server_args, request, monkeypatch):
"""Consider two server configurations:
(1) --logits-processors cli arg specifies dummy logits processor via fully-
qualified class name (FQCN); patch in a dummy logits processor module
(2) No --logits-processors cli arg; patch in a dummy logits processor
entrypoint
"""
# Test that logitproc info is passed to workers
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1")
if request.param:
# Launch server, append FQCN argument, inject dummy logitproc module
args = default_server_args + request.param
_server_fxn = _server_with_logitproc_fqcn
else:
# Launch server, inject dummy logitproc entrypoint
args = default_server_args
_server_fxn = _server_with_logitproc_entrypoint
with RemoteOpenAIServerCustom(MODEL_NAME, args, _server_fxn) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
# General request argument values for these tests
api_keyword_args = {
# Greedy sampling ensures that requests which receive the `target_token`
# arg will decode it in every step
"temperature": TEMP_GREEDY,
# Since EOS will never be decoded (unless `target_token` is EOS)
"max_tokens": MAX_TOKENS,
# Return decoded token logprobs (as a way of getting token id)
"logprobs": 0,
}
@create_new_process_for_each_test()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_custom_logitsprocs(client: openai.AsyncOpenAI, model_name: str):
"""Test custom logitsprocs when starting OpenAI server from CLI
Launch vLLM OpenAI-compatible server, configured to load a custom logitproc
that has a well-defined behavior (mask out all tokens except one
`target_token`).
Pass in requests, 50% of which pass a `target_token` value
in through `extra_body["vllm_xargs"]`, 50% of which do not.
Validate that requests which activate the custom logitproc, repeat the same
token
"""
use_dummy_logitproc = True
for prompt in prompts:
# Build request arguments
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
if use_dummy_logitproc:
# 50% of requests pass target_token custom arg
target_token = random.choice([128, 67])
# For requests which activate the dummy logitproc, choose one of
# two `target_token` values which are known not to be EOS tokens
request_keyword_args["extra_body"] = {
"vllm_xargs": {DUMMY_LOGITPROC_ARG: target_token}
}
batch = await client.completions.create(
model=model_name,
prompt=prompt,
**request_keyword_args,
)
if use_dummy_logitproc:
# Only for requests which activate dummy logitproc - validate that
# output token is repeated
choices: openai.types.CompletionChoice = batch.choices
toks = choices[0].logprobs.tokens
if not all([x == toks[0] for x in toks]):
raise AssertionError(f"Generated {toks} should all be {toks[0]}")
# Alternate whether to activate dummy logitproc for each request
use_dummy_logitproc = not use_dummy_logitproc
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_invalid_custom_logitsproc_arg(
client: openai.AsyncOpenAI, model_name: str
):
"""Test that request with invalid custom logitsproc is rejected"""
prompt = "Hello, my name is"
# Pass invalid (non-int) target_token value to dummy logits processor
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
"extra_body": {
"vllm_xargs": {DUMMY_LOGITPROC_ARG: "invalid_target_token_value"}
},
}
with pytest.raises(openai.OpenAIError) as exc_info:
await client.completions.create(
model=model_name,
prompt=prompt,
**request_keyword_args,
)
assert "is not int" in str(exc_info.value)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_tpu_int8.py | tests/v1/tpu/test_tpu_int8.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests whether TPU Int8 computation is enabled correctly.
Run `pytest tests/quantization/test_tpu_int8.py`.
"""
import pytest
from vllm.model_executor.layers.linear import LinearBase
from vllm.model_executor.layers.quantization.tpu_int8 import TPUInt8LinearMethod
from vllm.platforms import current_platform
from ...models.registry import HF_EXAMPLE_MODELS
MODELS = ["Qwen/Qwen2.5-0.5B-Instruct"]
@pytest.mark.skipif(
not current_platform.is_tpu(), reason="TPU Int8 is only enabled for TPUs."
)
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [10])
@pytest.mark.parametrize(
"hf_overrides",
[
# w8a8 dynamic activation
{
"quantization_config": {
"quant_method": "tpu_int8",
"activation_scheme": "dynamic",
}
}
],
)
def test_model_tpu_int8(
vllm_runner,
model: str,
dtype: str,
max_tokens: int,
hf_overrides: dict,
monkeypatch,
) -> None:
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_transformers_version(on_fail="skip")
activation_scheme = hf_overrides.get("quantization_config", {}).get(
"activation_scheme"
)
quantize_activation = activation_scheme == "dynamic"
# Allows using apply_model
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
# Prevent error from re-initializing cache
monkeypatch.setenv("VLLM_XLA_CACHE_PATH", "")
prompts = [
"A robot may not injure a human being",
]
answers = [
"or kill a human being",
]
with vllm_runner(model, dtype=dtype, hf_overrides=hf_overrides) as vllm:
def check_model(model):
for name, module in model.named_modules():
if not isinstance(module, LinearBase):
continue
quant_method = module.quant_method
assert isinstance(quant_method, TPUInt8LinearMethod)
assert quant_method.quantize_activation == quantize_activation
vllm.apply_model(check_model)
outputs = vllm.generate_greedy(prompts, max_tokens)
for (_, output), answer in zip(outputs, answers):
assert answer in output
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_pallas.py | tests/v1/tpu/test_pallas.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import ANY, patch
import torch
from vllm.attention.backends.abstract import AttentionType
from vllm.v1.attention.backends.pallas import PallasAttentionBackendImpl, PallasMetadata
def test_ragged_paged_attention():
# We verify that the kernel inputs such as sliding_window, etc. are passed
# in from the model correctly.
# The correctness of the paged attention kernel is tested in the kernel
# library.
num_heads = 4
head_size = 128
scale = 1.0
num_kv_heads = 4
sliding_window = 128
logits_soft_cap = 50.0
attn_impl = PallasAttentionBackendImpl(
num_heads=num_heads,
head_size=head_size,
scale=scale,
num_kv_heads=num_kv_heads,
alibi_slopes=None,
sliding_window=sliding_window,
kv_cache_dtype="auto",
logits_soft_cap=logits_soft_cap,
attn_type=AttentionType.DECODER,
)
class FakeAttentionLayer:
_q_scale_float: float
_k_scale_float: float
_v_scale_float: float
layer = FakeAttentionLayer()
layer._q_scale_float = 1.0
layer._k_scale_float = 1.0
layer._v_scale_float = 1.0
num_tokens = 16
num_blocks = 1024
block_size = 16
query = torch.zeros(num_tokens, num_heads * head_size)
key = torch.zeros(num_tokens, num_kv_heads * head_size)
value = torch.zeros(num_tokens, num_kv_heads * head_size)
kv_cache = torch.zeros(num_blocks, block_size, num_kv_heads * 2, head_size)
slot_mapping = torch.zeros((3, num_tokens), dtype=torch.int64)
max_num_reqs = 8
max_num_blocks_per_req = 8
num_kv_update_slices = torch.tensor([num_tokens], dtype=torch.int32)
block_tables = torch.zeros(
(max_num_reqs, max_num_blocks_per_req), dtype=torch.int32
)
context_lens = torch.ones((max_num_reqs,), dtype=torch.int32)
query_lens = [1] * max_num_reqs
query_start_loc = torch.cumsum(
torch.tensor([0] + query_lens, dtype=torch.int32), dim=0, dtype=torch.int32
)
num_seqs = torch.tensor([max_num_reqs], dtype=torch.int32)
attn_metadata = PallasMetadata(
slot_mapping=slot_mapping,
block_tables=block_tables,
context_lens=context_lens,
query_start_loc=query_start_loc,
num_seqs=num_seqs,
num_kv_update_slices=num_kv_update_slices,
num_slices_per_kv_cache_update_block=8,
)
with patch("torch.ops.xla.ragged_paged_attention") as mock_ragged_paged_attention:
attn_impl.forward(
layer=layer,
query=query,
key=key,
value=value,
kv_cache=kv_cache,
attn_metadata=attn_metadata,
)
mock_ragged_paged_attention.assert_called_once_with(
ANY, # query
ANY, # kv_cache
ANY, # context_lens
ANY, # block_tables
ANY, # query_start_loc
ANY, # num_seqs
num_kv_pages_per_block=None,
num_queries_per_block=None,
vmem_limit_bytes=None,
use_kernel=True,
sm_scale=scale,
sliding_window=sliding_window,
soft_cap=logits_soft_cap,
k_scale=1.0,
v_scale=1.0,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_mha_attn.py | tests/v1/tpu/test_mha_attn.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Test:
* Tests for MMEncoderAttention layer
"""
import pytest
import torch
import torch_xla
import torch_xla.core
import torch_xla.core.xla_model
from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention
from vllm.attention.selector import _cached_get_attn_backend
from vllm.platforms import current_platform
@pytest.fixture(autouse=True)
def clear_cache():
"""Clear lru cache to ensure each test case runs without caching."""
_cached_get_attn_backend.cache_clear()
def ref_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
scale: float,
) -> torch.Tensor:
"""
Native implementation of scaled dot product attention without mask:
- query, key, value: [batch_size, seq_len, num_heads, head_size]
- attn_mask: [batch_size, seq_len, seq_len]
"""
query, key, value = (x.transpose(1, 2) for x in (query, key, value))
attn_weights = scale * torch.matmul(query, key.transpose(2, 3))
attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype)
out = torch.matmul(attn_weights, value).transpose(1, 2)
return out
BATCH_SIZES = [1, 16]
SEQ_LENS = [1]
NUM_HEADS = [1, 16]
NUM_KV_HEADS = [1]
HEAD_SIZES = [64, 80]
@pytest.mark.skipif(not current_platform.is_tpu(), reason="This test needs a TPU")
@pytest.mark.parametrize("batch_size", BATCH_SIZES)
@pytest.mark.parametrize("seq_len", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("num_kv_heads", NUM_KV_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("device", [torch_xla.core.xla_model.xla_device()])
def test_mha_attn_forward(
batch_size: int,
seq_len: int,
num_heads: int,
num_kv_heads: int,
head_size: int,
device: str,
):
current_platform.seed_everything(0)
# These are expected to be f32
q = torch.randn(batch_size, seq_len, num_heads * head_size, device=device)
k = torch.randn(batch_size, seq_len, num_kv_heads * head_size, device=device)
v = torch.randn(batch_size, seq_len, num_kv_heads * head_size, device=device)
scale = 1.0 / head_size**0.5
attn = MMEncoderAttention(
num_heads, head_size, scale=scale, num_kv_heads=num_kv_heads
)
output = attn(q, k, v)
assert num_heads % num_kv_heads == 0
num_queries_per_kv = num_heads // num_kv_heads
q = q.reshape(batch_size, seq_len, num_heads, head_size)
k = k.reshape(batch_size, seq_len, num_kv_heads, head_size)
v = v.reshape(batch_size, seq_len, num_kv_heads, head_size)
if num_queries_per_kv > 1:
k = torch.repeat_interleave(k, num_queries_per_kv, dim=2)
v = torch.repeat_interleave(v, num_queries_per_kv, dim=2)
ref_output = ref_attention(
q,
k,
v,
scale=scale,
).reshape(batch_size, seq_len, num_heads * head_size)
# torch_xla flash_attn kernel is less accurate but much faster
torch.testing.assert_close(output, ref_output, atol=1e-2, rtol=1e-3)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_spmd_model_weight_loading.py | tests/v1/tpu/test_spmd_model_weight_loading.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import gc
import tempfile
import numpy as np
import pytest
import torch_xla.distributed.spmd as xs
import torch_xla.runtime as xr
from vllm.config import set_current_vllm_config
from vllm.distributed.parallel_state import (
ensure_model_parallel_initialized,
init_distributed_environment,
)
from vllm.engine.arg_utils import EngineArgs
from vllm.model_executor.model_loader.tpu import TPUModelLoader
def _setup_environment(model):
engine_args = EngineArgs(
model=model,
)
vllm_config = engine_args.create_engine_config()
with set_current_vllm_config(vllm_config):
temp_file = tempfile.mkstemp()[1]
init_distributed_environment(
1,
0,
local_rank=0,
distributed_init_method=f"file://{temp_file}",
backend="gloo",
)
# Under single worker mode, full model is init first and then
# partitioned using GSPMD.
ensure_model_parallel_initialized(1, 1)
return vllm_config
MESH = None
def _get_spmd_mesh():
global MESH
if MESH is None:
xr.use_spmd()
num_devices = xr.global_runtime_device_count()
mesh_shape = (num_devices, 1)
device_ids = np.array(range(num_devices))
MESH = xs.Mesh(device_ids, mesh_shape, ("x", "y"))
return MESH
@pytest.mark.parametrize(
"model",
[
"Qwen/Qwen2-1.5B-Instruct",
# Skip large models due to CI runner disk space limitations
# "meta-llama/Llama-3.1-8B-Instruct",
# "meta-llama/Llama-3.1-70B-Instruct",
],
)
def test_tpu_model_loader(model):
# Skip the 70B test if there are less than 8 chips
# TODO: Query using torch xla API, the query API is not working
# with SPMD now. However, This test is running under SPMD mode.
if "70B" in model and xr.global_runtime_device_count() < 8:
pytest.skip(
"Skipping 70B model if the TPU VM has less than 8 chips to \
avoid OOM."
)
vllm_config = _setup_environment(model)
loader = TPUModelLoader(load_config=vllm_config.load_config)
mesh = _get_spmd_mesh()
model = loader.load_model(vllm_config, vllm_config.model_config, mesh)
del model
gc.collect()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_tpu_qkv_linear.py | tests/v1/tpu/test_tpu_qkv_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import tempfile
import numpy as np
import pytest
import torch
import torch_xla.distributed.spmd as xs
import torch_xla.runtime as xr
from vllm.config import set_current_vllm_config
from vllm.distributed.parallel_state import (
ensure_model_parallel_initialized,
init_distributed_environment,
)
from vllm.distributed.tpu_distributed_utils import XlaQKVParallelLinear
from vllm.engine.arg_utils import EngineArgs
from vllm.model_executor.layers.linear import QKVParallelLinear
@pytest.fixture(autouse=True)
def setup_environment():
# This is a fake config used for init dist env.
# QKVParallelLinear needs dist env to be initialized.
engine_args = EngineArgs(
model="Qwen/Qwen2-1.5B-Instruct",
max_model_len=64,
max_num_batched_tokens=64,
max_num_seqs=4,
)
vllm_config = engine_args.create_engine_config()
with set_current_vllm_config(vllm_config):
temp_file = tempfile.mkstemp()[1]
init_distributed_environment(
1,
0,
local_rank=0,
distributed_init_method=f"file://{temp_file}",
backend="gloo",
)
ensure_model_parallel_initialized(1, 1)
yield
MESH = None
def _get_spmd_mesh():
global MESH
if MESH is None:
xr.use_spmd()
num_devices = xr.global_runtime_device_count()
mesh_shape = (num_devices, 1)
device_ids = np.array(range(num_devices))
MESH = xs.Mesh(device_ids, mesh_shape, ("x", "y"))
return MESH
@pytest.mark.parametrize("bias", [False, True])
# `xr.use_spmd()` will set a global state, and this state is not reversible.
# Therefore, non-SPMD tests should be run before SPMD tests.
@pytest.mark.parametrize("mesh", [None, _get_spmd_mesh()])
@pytest.mark.parametrize("device", ["cpu", "xla"])
@torch.no_grad()
def test_xla_qkv_linear(bias, mesh, device):
torch.manual_seed(123)
qkv_linear = QKVParallelLinear(
hidden_size=4096,
head_size=128,
total_num_heads=32,
total_num_kv_heads=8,
bias=bias,
params_dtype=torch.bfloat16,
return_bias=False,
)
qkv_linear.weight.data = torch.rand_like(qkv_linear.weight.data) / 10
if bias:
qkv_linear.bias.data = torch.rand_like(qkv_linear.bias.data)
xla_qkv_linear = XlaQKVParallelLinear(qkv_linear, mesh=mesh)
qkv_linear = qkv_linear.to(device)
xla_qkv_linear = xla_qkv_linear.to(device)
input_tensor = torch.rand(10, 4096, dtype=torch.bfloat16) / 10
input_tensor = input_tensor.to(device)
output = qkv_linear(input_tensor)
xla_output = xla_qkv_linear(input_tensor)
assert torch.allclose(output.cpu(), xla_output.cpu())
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/__init__.py | tests/v1/tpu/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_perf.py | tests/v1/tpu/test_perf.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""A basic performance regression test for TPUs
Run `pytest tests/v1/tpu/test_perf.py`.
"""
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING
import numpy as np
import pytest
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams
from vllm.tokenizers import get_tokenizer
if TYPE_CHECKING:
from tests.conftest import VllmRunner
else:
VllmRunner = object
@dataclass
class TestParams:
model: str
num_prompts: int
prefix_len: int
decode_len: int
expected_avg_time: float
err_tol: float
TEST_PARAMS = [
# TODO: Cannot run a series of tests because:
# RuntimeError: Bad StatusOr access: UNKNOWN: TPU initialization failed:
# open(/dev/vfio/0): Device or resource busy: Device or resource busy;
# Couldn't open iommu group /dev/vfio/0
# => Investigate
# TestParams(
# model="Qwen/Qwen2.5-1.5B-Instruct",
# num_prompts=1,
# prefix_len=10,
# decode_len=5,
# expected_avg_time=0.03,
# err_tol=0.01,
# ),
# TestParams(
# model="Qwen/Qwen2.5-1.5B-Instruct",
# num_prompts=10,
# prefix_len=100,
# decode_len=50,
# expected_avg_time=0.234,
# err_tol=0.020,
# ),
TestParams(
model="Qwen/Qwen2.5-1.5B-Instruct",
num_prompts=64,
prefix_len=500,
decode_len=50,
# commit id: ccb246776d93ef105904a8ec015b3587240a1183
# tpu: v5lite (old vllm CI/CD)
# expected_avg_time=1.4,
# err_tol=0.30,
# (This is the active CI/CD instance)
# commit id: ccb246776d93ef105904a8ec015b3587240a1183
# tpu: v6e (current vllm CI/CD)
expected_avg_time=1.7, # measured with VLLM_XLA_CACHE_PATH=
err_tol=0.20,
),
]
NUM_WARMUPS = 5
NUM_RUNS = 10
MAX_MODEL_LEN = 1024
MAX_NUM_SEQS = 32
GPU_UTIL = 0.9
@pytest.mark.skipif(
not current_platform.is_tpu(),
reason="This is a basic performance test for TPU only",
)
@pytest.mark.parametrize("params", TEST_PARAMS)
def test_perf(
vllm_runner: type[VllmRunner],
params: TestParams,
) -> None:
tokenizer = get_tokenizer(
params.model, tokenizer_mode="auto", trust_remote_code=True
)
prompts = []
for i in range(params.num_prompts):
prefix_token_ids = np.random.randint(
0, tokenizer.vocab_size, size=params.prefix_len
).tolist()
prompt = tokenizer.decode(prefix_token_ids)
prompts.append(prompt)
print(
"-- Running: num_prompts = {} prefix_len = {} decode_len = {}".format(
len(prompts), params.prefix_len, params.decode_len
)
)
sampling_params = SamplingParams(
max_tokens=params.decode_len, temperature=1.0, min_p=0.0
)
with vllm_runner(
params.model,
max_num_batched_tokens=MAX_MODEL_LEN,
max_model_len=MAX_MODEL_LEN,
max_num_seqs=MAX_NUM_SEQS,
gpu_memory_utilization=GPU_UTIL,
enforce_eager=False,
tensor_parallel_size=1,
) as vllm_model:
print(" -- Warmup / Compile")
for i in range(NUM_WARMUPS):
_ = vllm_model.generate(prompts, sampling_params)
print(" -- Benchmarking... ")
times = []
for i in range(NUM_RUNS):
start_time = time.time()
_ = vllm_model.generate(prompts, sampling_params)
times.append(time.time() - start_time)
avg_time = sum(times) / len(times)
print(" -- avg_time = {}".format(avg_time))
print(
" -- expected_avg_time = {} with err_tol = {}".format(
params.expected_avg_time, params.err_tol
)
)
diff = avg_time - params.expected_avg_time
ok = diff < params.err_tol
if diff < -params.err_tol:
print(
" !! WARNING !! Performance has improved by {}, "
"it may be necessary to fine-tune the "
"expected_avg_time = {}".format(-diff, params.expected_avg_time)
)
assert ok, " !! ERROR !! Regression detected"
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/v1/tpu/test_kv_cache_update_kernel.py | tests/v1/tpu/test_kv_cache_update_kernel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import pytest
import torch
import torch_xla
import vllm.v1.attention.backends.pallas # noqa: F401
from vllm.platforms import current_platform
@pytest.mark.skipif(not current_platform.is_tpu(), reason="This is a test for TPU only")
@pytest.mark.parametrize("page_size", [32, 33])
@pytest.mark.parametrize("combined_kv_head_num", [2, 16])
@pytest.mark.parametrize("head_dim", [128, 256])
@pytest.mark.parametrize("num_slices_per_block", [4, 8])
def test_kv_cache_update_kernel(
page_size: int, combined_kv_head_num: int, head_dim: int, num_slices_per_block: int
):
page_num = 1000
padded_num_tokens = 128
kv_cache_cpu = torch.zeros(
(page_num * page_size, combined_kv_head_num, head_dim),
dtype=torch.bfloat16,
device="cpu",
)
kv_cache_xla = kv_cache_cpu.to(torch_xla.device())
new_kv_cpu = torch.randn(
(padded_num_tokens, combined_kv_head_num, head_dim),
dtype=torch.bfloat16,
device="cpu",
)
new_kv_xla = new_kv_cpu.to(torch_xla.device())
slice_lens = np.array([7, page_size, page_size, 1, 1, 1, 9], dtype=np.int32)
num_kv_update_slices = len(slice_lens)
kv_cache_start_indices = np.array(
[
page_size * 2 - 7,
page_size * 2,
page_size * 3,
page_size * 4 + 6,
page_size * 5 + 7,
page_size * 6 + 8,
page_size * 15 + 3,
],
dtype=np.int32,
)
new_kv_cache_indices = np.concatenate(
[np.array([0], dtype=np.int32), np.cumsum(slice_lens[:-1])]
)
slot_mapping = np.stack(
[kv_cache_start_indices, new_kv_cache_indices, slice_lens], axis=1
)
slot_mapping = np.transpose(slot_mapping)
slot_mapping_cpu = torch.tensor(slot_mapping, device="cpu", dtype=torch.int32)
slot_mapping_xla = slot_mapping_cpu.to(torch_xla.device())
num_kv_update_slices_xla = torch.tensor(
[num_kv_update_slices], device=torch_xla.device(), dtype=torch.int32
)
torch_xla.sync()
torch.ops.xla.dynamo_set_buffer_donor_(kv_cache_xla, True)
new_kv_cache_xla = torch.ops.xla.kv_cache_update_op(
new_kv_xla,
slot_mapping_xla,
kv_cache_xla,
num_kv_update_slices_xla,
page_size,
num_slices_per_block,
)
kv_cache_xla.copy_(new_kv_cache_xla)
torch_xla.sync()
for ni, ci, sl in zip(new_kv_cache_indices, kv_cache_start_indices, slice_lens):
kv_cache_cpu[ci : ci + sl, :, :] = new_kv_cpu[ni : ni + sl, :, :]
assert torch.allclose(kv_cache_xla.cpu(), kv_cache_cpu, atol=1e-4, rtol=1e-4)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.