sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
vllm-project/vllm:vllm/entrypoints/openai/chat_completion/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from http import HTTPStatus
from fastapi import APIRouter, Depends, FastAPI, Request
from fastapi.responses import JSONResponse, StreamingResponse
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
)
from vllm.entrypoints.openai.chat_completion.serving import OpenAIServingChat
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
from vllm.entrypoints.openai.orca_metrics import metrics_header
from vllm.entrypoints.openai.utils import validate_json_request
from vllm.entrypoints.utils import (
load_aware_call,
with_cancellation,
)
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = "endpoint-load-metrics-format"
def chat(request: Request) -> OpenAIServingChat | None:
return request.app.state.openai_serving_chat
@router.post(
"/v1/chat/completions",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
@with_cancellation
@load_aware_call
async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request):
metrics_header_format = raw_request.headers.get(
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
)
handler = chat(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Chat Completions API"
)
try:
generator = await handler.create_chat_completion(request, raw_request)
except Exception as e:
generator = handler.create_error_response(e)
if isinstance(generator, ErrorResponse):
return JSONResponse(
content=generator.model_dump(), status_code=generator.error.code
)
elif isinstance(generator, ChatCompletionResponse):
return JSONResponse(
content=generator.model_dump(),
headers=metrics_header(metrics_header_format),
)
return StreamingResponse(content=generator, media_type="text/event-stream")
@router.post(
"/v1/chat/completions/render",
dependencies=[Depends(validate_json_request)],
response_model=list,
responses={
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
async def render_chat_completion(request: ChatCompletionRequest, raw_request: Request):
"""Render chat completion request and return conversation and engine
prompts without generating."""
handler = chat(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Chat Completions API"
)
try:
result = await handler.render_chat_request(request)
except Exception as e:
result = handler.create_error_response(e)
if isinstance(result, ErrorResponse):
return JSONResponse(content=result.model_dump(), status_code=result.error.code)
return JSONResponse(content=result)
def attach_router(app: FastAPI):
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/chat_completion/api_router.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/chat_completion/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
import json
import time
from typing import Annotated, Any, ClassVar, Literal
import torch
from openai.types.chat.chat_completion_audio import (
ChatCompletionAudio as OpenAIChatCompletionAudio,
)
from openai.types.chat.chat_completion_message import Annotation as OpenAIAnnotation
from pydantic import Field, model_validator
from vllm.config import ModelConfig
from vllm.config.utils import replace
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ChatTemplateContentFormatOption,
)
from vllm.entrypoints.openai.engine.protocol import (
AnyResponseFormat,
DeltaMessage,
FunctionCall,
FunctionDefinition,
LegacyStructuralTagResponseFormat,
OpenAIBaseModel,
StreamOptions,
StructuralTagResponseFormat,
ToolCall,
UsageInfo,
)
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
from vllm.logprobs import Logprob
from vllm.renderers import ChatParams, TokenizeParams, merge_kwargs
from vllm.sampling_params import (
BeamSearchParams,
RequestOutputKind,
SamplingParams,
StructuredOutputsParams,
)
from vllm.utils import random_uuid
logger = init_logger(__name__)
_LONG_INFO = torch.iinfo(torch.long)
class ChatMessage(OpenAIBaseModel):
role: str
content: str | None = None
refusal: str | None = None
annotations: OpenAIAnnotation | None = None
audio: OpenAIChatCompletionAudio | None = None
function_call: FunctionCall | None = None
tool_calls: list[ToolCall] = Field(default_factory=list)
# vLLM-specific fields that are not in OpenAI spec
reasoning: str | None = None
class ChatCompletionLogProb(OpenAIBaseModel):
token: str
logprob: float = -9999.0
bytes: list[int] | None = None
class ChatCompletionLogProbsContent(ChatCompletionLogProb):
# Workaround: redefine fields name cache so that it's not
# shared with the super class.
field_names: ClassVar[set[str] | None] = None
top_logprobs: list[ChatCompletionLogProb] = Field(default_factory=list)
class ChatCompletionLogProbs(OpenAIBaseModel):
content: list[ChatCompletionLogProbsContent] | None = None
class ChatCompletionResponseChoice(OpenAIBaseModel):
index: int
message: ChatMessage
logprobs: ChatCompletionLogProbs | None = None
# per OpenAI spec this is the default
finish_reason: str | None = "stop"
# not part of the OpenAI spec but included in vLLM for legacy reasons
stop_reason: int | str | None = None
# not part of the OpenAI spec but is useful for tracing the tokens
# in agent scenarios
token_ids: list[int] | None = None
class ChatCompletionResponse(OpenAIBaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
object: Literal["chat.completion"] = "chat.completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: list[ChatCompletionResponseChoice]
service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
system_fingerprint: str | None = None
usage: UsageInfo
# vLLM-specific fields that are not in OpenAI spec
prompt_logprobs: list[dict[int, Logprob] | None] | None = None
prompt_token_ids: list[int] | None = None
kv_transfer_params: dict[str, Any] | None = Field(
default=None, description="KVTransfer parameters."
)
class ChatCompletionResponseStreamChoice(OpenAIBaseModel):
index: int
delta: DeltaMessage
logprobs: ChatCompletionLogProbs | None = None
finish_reason: str | None = None
stop_reason: int | str | None = None
# not part of the OpenAI spec but for tracing the tokens
token_ids: list[int] | None = None
class ChatCompletionStreamResponse(OpenAIBaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: list[ChatCompletionResponseStreamChoice]
usage: UsageInfo | None = Field(default=None)
# not part of the OpenAI spec but for tracing the tokens
prompt_token_ids: list[int] | None = None
class ChatCompletionToolsParam(OpenAIBaseModel):
type: Literal["function"] = "function"
function: FunctionDefinition
class ChatCompletionNamedFunction(OpenAIBaseModel):
name: str
class ChatCompletionNamedToolChoiceParam(OpenAIBaseModel):
function: ChatCompletionNamedFunction
type: Literal["function"] = "function"
class ChatCompletionRequest(OpenAIBaseModel):
# Ordered by official OpenAI API documentation
# https://platform.openai.com/docs/api-reference/chat/create
messages: list[ChatCompletionMessageParam]
model: str | None = None
frequency_penalty: float | None = 0.0
logit_bias: dict[str, float] | None = None
logprobs: bool | None = False
top_logprobs: int | None = 0
max_tokens: int | None = Field(
default=None,
deprecated="max_tokens is deprecated in favor of "
"the max_completion_tokens field",
)
max_completion_tokens: int | None = None
n: int | None = 1
presence_penalty: float | None = 0.0
response_format: AnyResponseFormat | None = None
seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
stop: str | list[str] | None = []
stream: bool | None = False
stream_options: StreamOptions | None = None
temperature: float | None = None
top_p: float | None = None
tools: list[ChatCompletionToolsParam] | None = None
tool_choice: (
Literal["none"]
| Literal["auto"]
| Literal["required"]
| ChatCompletionNamedToolChoiceParam
| None
) = "none"
reasoning_effort: Literal["low", "medium", "high"] | None = None
include_reasoning: bool = True
parallel_tool_calls: bool | None = True
# NOTE this will be ignored by vLLM
user: str | None = None
# --8<-- [start:chat-completion-sampling-params]
use_beam_search: bool = False
top_k: int | None = None
min_p: float | None = None
repetition_penalty: float | None = None
length_penalty: float = 1.0
stop_token_ids: list[int] | None = []
include_stop_str_in_output: bool = False
ignore_eos: bool = False
min_tokens: int = 0
skip_special_tokens: bool = True
spaces_between_special_tokens: bool = True
truncate_prompt_tokens: Annotated[int, Field(ge=-1, le=_LONG_INFO.max)] | None = (
None
)
prompt_logprobs: int | None = None
allowed_token_ids: list[int] | None = None
bad_words: list[str] = Field(default_factory=list)
# --8<-- [end:chat-completion-sampling-params]
# --8<-- [start:chat-completion-extra-params]
echo: bool = Field(
default=False,
description=(
"If true, the new message will be prepended with the last message "
"if they belong to the same role."
),
)
add_generation_prompt: bool = Field(
default=True,
description=(
"If true, the generation prompt will be added to the chat template. "
"This is a parameter used by chat template in tokenizer config of the "
"model."
),
)
continue_final_message: bool = Field(
default=False,
description=(
"If this is set, the chat will be formatted so that the final "
"message in the chat is open-ended, without any EOS tokens. The "
"model will continue this message rather than starting a new one. "
'This allows you to "prefill" part of the model\'s response for it. '
"Cannot be used at the same time as `add_generation_prompt`."
),
)
add_special_tokens: bool = Field(
default=False,
description=(
"If true, special tokens (e.g. BOS) will be added to the prompt "
"on top of what is added by the chat template. "
"For most models, the chat template takes care of adding the "
"special tokens so this should be set to false (as is the "
"default)."
),
)
documents: list[dict[str, str]] | None = Field(
default=None,
description=(
"A list of dicts representing documents that will be accessible to "
"the model if it is performing RAG (retrieval-augmented generation)."
" If the template does not support RAG, this argument will have no "
"effect. We recommend that each document should be a dict containing "
'"title" and "text" keys.'
),
)
chat_template: str | None = Field(
default=None,
description=(
"A Jinja template to use for this conversion. "
"As of transformers v4.44, default chat template is no longer "
"allowed, so you must provide a chat template if the tokenizer "
"does not define one."
),
)
chat_template_kwargs: dict[str, Any] | None = Field(
default=None,
description=(
"Additional keyword args to pass to the template renderer. "
"Will be accessible by the chat template."
),
)
mm_processor_kwargs: dict[str, Any] | None = Field(
default=None,
description=("Additional kwargs to pass to the HF processor."),
)
structured_outputs: StructuredOutputsParams | None = Field(
default=None,
description="Additional kwargs for structured outputs",
)
priority: int = Field(
default=0,
description=(
"The priority of the request (lower means earlier handling; "
"default: 0). Any priority other than 0 will raise an error "
"if the served model does not use priority scheduling."
),
)
request_id: str = Field(
default_factory=random_uuid,
description=(
"The request_id related to this request. If the caller does "
"not set it, a random_uuid will be generated. This id is used "
"through out the inference process and return in response."
),
)
return_tokens_as_token_ids: bool | None = Field(
default=None,
description=(
"If specified with 'logprobs', tokens are represented "
" as strings of the form 'token_id:{token_id}' so that tokens "
"that are not JSON-encodable can be identified."
),
)
return_token_ids: bool | None = Field(
default=None,
description=(
"If specified, the result will include token IDs alongside the "
"generated text. In streaming mode, prompt_token_ids is included "
"only in the first chunk, and token_ids contains the delta tokens "
"for each chunk. This is useful for debugging or when you "
"need to map generated text back to input tokens."
),
)
cache_salt: str | None = Field(
default=None,
description=(
"If specified, the prefix cache will be salted with the provided "
"string to prevent an attacker to guess prompts in multi-user "
"environments. The salt should be random, protected from "
"access by 3rd parties, and long enough to be "
"unpredictable (e.g., 43 characters base64-encoded, corresponding "
"to 256 bit)."
),
)
kv_transfer_params: dict[str, Any] | None = Field(
default=None,
description="KVTransfer parameters used for disaggregated serving.",
)
vllm_xargs: dict[str, str | int | float | list[str | int | float]] | None = Field(
default=None,
description=(
"Additional request parameters with (list of) string or "
"numeric values, used by custom extensions."
),
)
# --8<-- [end:chat-completion-extra-params]
def build_chat_params(
self,
default_template: str | None,
default_template_content_format: ChatTemplateContentFormatOption,
) -> ChatParams:
return ChatParams(
chat_template=self.chat_template or default_template,
chat_template_content_format=default_template_content_format,
chat_template_kwargs=merge_kwargs(
self.chat_template_kwargs,
dict(
add_generation_prompt=self.add_generation_prompt,
continue_final_message=self.continue_final_message,
documents=self.documents,
reasoning_effort=self.reasoning_effort,
),
),
)
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
if self.max_completion_tokens is not None:
max_output_tokens: int | None = self.max_completion_tokens
max_output_tokens_param = "max_completion_tokens"
else:
max_output_tokens = self.max_tokens
max_output_tokens_param = "max_tokens"
return TokenizeParams(
max_total_tokens=model_config.max_model_len,
max_output_tokens=max_output_tokens or 0,
truncate_prompt_tokens=self.truncate_prompt_tokens,
add_special_tokens=self.add_special_tokens,
needs_detokenization=bool(self.echo and not self.return_token_ids),
max_total_tokens_param="max_model_len",
max_output_tokens_param=max_output_tokens_param,
)
# Default sampling parameters for chat completion requests
_DEFAULT_SAMPLING_PARAMS: dict = {
"repetition_penalty": 1.0,
"temperature": 1.0,
"top_p": 1.0,
"top_k": 0,
"min_p": 0.0,
}
def to_beam_search_params(
self, max_tokens: int, default_sampling_params: dict
) -> BeamSearchParams:
n = self.n if self.n is not None else 1
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get(
"temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
)
return BeamSearchParams(
beam_width=n,
max_tokens=max_tokens,
ignore_eos=self.ignore_eos,
temperature=temperature,
length_penalty=self.length_penalty,
include_stop_str_in_output=self.include_stop_str_in_output,
)
def to_sampling_params(
self,
max_tokens: int,
default_sampling_params: dict,
) -> SamplingParams:
# Default parameters
if (repetition_penalty := self.repetition_penalty) is None:
repetition_penalty = default_sampling_params.get(
"repetition_penalty",
self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
)
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get(
"temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
)
if (top_p := self.top_p) is None:
top_p = default_sampling_params.get(
"top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
)
if (top_k := self.top_k) is None:
top_k = default_sampling_params.get(
"top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
)
if (min_p := self.min_p) is None:
min_p = default_sampling_params.get(
"min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
)
prompt_logprobs = self.prompt_logprobs
if prompt_logprobs is None and self.echo:
prompt_logprobs = self.top_logprobs
response_format = self.response_format
if response_format is not None:
structured_outputs_kwargs = dict[str, Any]()
# Set structured output params for response format
if response_format.type == "json_object":
structured_outputs_kwargs["json_object"] = True
elif response_format.type == "json_schema":
json_schema = response_format.json_schema
assert json_schema is not None
structured_outputs_kwargs["json"] = json_schema.json_schema
elif response_format.type == "structural_tag":
structural_tag = response_format
assert structural_tag is not None and isinstance(
structural_tag,
(
LegacyStructuralTagResponseFormat,
StructuralTagResponseFormat,
),
)
s_tag_obj = structural_tag.model_dump(by_alias=True)
structured_outputs_kwargs["structural_tag"] = json.dumps(s_tag_obj)
# If structured outputs wasn't already enabled,
# we must enable it for these features to work
if len(structured_outputs_kwargs) > 0:
self.structured_outputs = (
StructuredOutputsParams(**structured_outputs_kwargs)
if self.structured_outputs is None
else replace(self.structured_outputs, **structured_outputs_kwargs)
)
extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
if self.kv_transfer_params:
# Pass in kv_transfer_params via extra_args
extra_args["kv_transfer_params"] = self.kv_transfer_params
return SamplingParams.from_optional(
n=self.n,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
repetition_penalty=repetition_penalty,
temperature=temperature,
top_p=top_p,
top_k=top_k,
min_p=min_p,
seed=self.seed,
stop=self.stop,
stop_token_ids=self.stop_token_ids,
logprobs=self.top_logprobs if self.logprobs else None,
prompt_logprobs=prompt_logprobs,
ignore_eos=self.ignore_eos,
max_tokens=max_tokens,
min_tokens=self.min_tokens,
skip_special_tokens=self.skip_special_tokens,
spaces_between_special_tokens=self.spaces_between_special_tokens,
include_stop_str_in_output=self.include_stop_str_in_output,
output_kind=RequestOutputKind.DELTA
if self.stream
else RequestOutputKind.FINAL_ONLY,
structured_outputs=self.structured_outputs,
logit_bias=self.logit_bias,
bad_words=self.bad_words,
allowed_token_ids=self.allowed_token_ids,
extra_args=extra_args or None,
skip_clone=True, # Created fresh per request, safe to skip clone
)
@model_validator(mode="before")
@classmethod
def validate_response_format(cls, data):
response_format = data.get("response_format")
if response_format is None:
return data
rf_type = (
response_format.get("type")
if isinstance(response_format, dict)
else getattr(response_format, "type", None)
)
if rf_type == "json_schema":
json_schema = (
response_format.get("json_schema")
if isinstance(response_format, dict)
else getattr(response_format, "json_schema", None)
)
if json_schema is None:
raise VLLMValidationError(
"When response_format type is 'json_schema', the "
"'json_schema' field must be provided.",
parameter="response_format",
)
return data
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
if data.get("stream_options") and not data.get("stream"):
raise VLLMValidationError(
"Stream options can only be defined when `stream=True`.",
parameter="stream_options",
)
return data
@model_validator(mode="before")
@classmethod
def check_logprobs(cls, data):
if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
raise VLLMValidationError(
"`prompt_logprobs` are not available when `stream=True`.",
parameter="prompt_logprobs",
)
if prompt_logprobs < 0 and prompt_logprobs != -1:
raise VLLMValidationError(
"`prompt_logprobs` must be a positive value or -1.",
parameter="prompt_logprobs",
value=prompt_logprobs,
)
if (top_logprobs := data.get("top_logprobs")) is not None:
if top_logprobs < 0 and top_logprobs != -1:
raise VLLMValidationError(
"`top_logprobs` must be a positive value or -1.",
parameter="top_logprobs",
value=top_logprobs,
)
if (top_logprobs == -1 or top_logprobs > 0) and not data.get("logprobs"):
raise VLLMValidationError(
"when using `top_logprobs`, `logprobs` must be set to true.",
parameter="top_logprobs",
)
return data
@model_validator(mode="before")
@classmethod
def check_structured_outputs_count(cls, data):
if isinstance(data, ValueError):
raise data
if data.get("structured_outputs", None) is None:
return data
structured_outputs_kwargs = data["structured_outputs"]
# structured_outputs may arrive as a dict (from JSON/raw kwargs) or
# as a StructuredOutputsParams dataclass instance.
is_dataclass = isinstance(structured_outputs_kwargs, StructuredOutputsParams)
count = sum(
(
getattr(structured_outputs_kwargs, k, None)
if is_dataclass
else structured_outputs_kwargs.get(k)
)
is not None
for k in ("json", "regex", "choice")
)
# you can only use one kind of constraints for structured outputs
if count > 1:
raise ValueError(
"You can only use one kind of constraints for structured "
"outputs ('json', 'regex' or 'choice')."
)
# you can only either use structured outputs or tools, not both
if count > 1 and data.get("tool_choice", "none") not in (
"none",
"auto",
"required",
):
raise ValueError(
"You can only either use constraints for structured outputs "
"or tools, not both."
)
return data
@model_validator(mode="before")
@classmethod
def check_tool_usage(cls, data):
# if "tool_choice" is not specified but tools are provided,
# default to "auto" tool_choice
if "tool_choice" not in data and data.get("tools"):
data["tool_choice"] = "auto"
# if "tool_choice" is "none" -- no validation is needed for tools
if "tool_choice" in data and data["tool_choice"] == "none":
return data
# if "tool_choice" is specified -- validation
if "tool_choice" in data and data["tool_choice"] is not None:
# ensure that if "tool choice" is specified, tools are present
if "tools" not in data or data["tools"] is None:
raise ValueError("When using `tool_choice`, `tools` must be set.")
# make sure that tool choice is either a named tool
# OR that it's set to "auto" or "required"
if data["tool_choice"] not in ["auto", "required"] and not isinstance(
data["tool_choice"], dict
):
raise ValueError(
f"Invalid value for `tool_choice`: {data['tool_choice']}! "
'Only named tools, "none", "auto" or "required" '
"are supported."
)
# if tool_choice is "required" but the "tools" list is empty,
# override the data to behave like "none" to align with
# OpenAI’s behavior.
if (
data["tool_choice"] == "required"
and isinstance(data["tools"], list)
and len(data["tools"]) == 0
):
data["tool_choice"] = "none"
del data["tools"]
return data
# ensure that if "tool_choice" is specified as an object,
# it matches a valid tool
correct_usage_message = (
'Correct usage: `{"type": "function",'
' "function": {"name": "my_function"}}`'
)
if isinstance(data["tool_choice"], dict):
valid_tool = False
function = data["tool_choice"].get("function")
if not isinstance(function, dict):
raise ValueError(
f"Invalid value for `function`: `{function}` in "
f"`tool_choice`! {correct_usage_message}"
)
if "name" not in function:
raise ValueError(
f"Expected field `name` in `function` in "
f"`tool_choice`! {correct_usage_message}"
)
function_name = function["name"]
if not isinstance(function_name, str) or len(function_name) == 0:
raise ValueError(
f"Invalid `name` in `function`: `{function_name}`"
f" in `tool_choice`! {correct_usage_message}"
)
for tool in data["tools"]:
if tool["function"]["name"] == function_name:
valid_tool = True
break
if not valid_tool:
raise ValueError(
"The tool specified in `tool_choice` does not match any"
" of the specified `tools`"
)
return data
@model_validator(mode="before")
@classmethod
def check_generation_prompt(cls, data):
if data.get("continue_final_message") and data.get("add_generation_prompt"):
raise ValueError(
"Cannot set both `continue_final_message` and "
"`add_generation_prompt` to True."
)
return data
@model_validator(mode="before")
@classmethod
def check_cache_salt_support(cls, data):
if data.get("cache_salt") is not None and (
not isinstance(data["cache_salt"], str) or not data["cache_salt"]
):
raise ValueError(
"Parameter 'cache_salt' must be a non-empty string if provided."
)
return data
@model_validator(mode="before")
@classmethod
def check_system_message_content_type(cls, data):
"""Warn if system messages contain non-text content.
According to OpenAI API spec, system messages can only be of type
'text'. We log a warning instead of rejecting to avoid breaking
users who intentionally send multimodal system messages.
See: https://platform.openai.com/docs/api-reference/chat/create#chat_create-messages-system_message
"""
if not isinstance(data, dict):
return data
messages = data.get("messages", [])
for msg in messages:
# Check if this is a system message
if isinstance(msg, dict) and msg.get("role") == "system":
content = msg.get("content")
# If content is a list (multimodal format)
if isinstance(content, list):
for part in content:
if isinstance(part, dict):
part_type = part.get("type")
# Infer type when 'type' field is not explicit
if part_type is None:
if "image_url" in part or "image_pil" in part:
part_type = "image_url"
elif "image_embeds" in part:
part_type = "image_embeds"
elif "audio_url" in part:
part_type = "audio_url"
elif "input_audio" in part:
part_type = "input_audio"
elif "audio_embeds" in part:
part_type = "audio_embeds"
elif "video_url" in part:
part_type = "video_url"
# Warn about non-text content in system messages
if part_type and part_type != "text":
logger.warning_once(
"System messages should only contain text "
"content according to the OpenAI API spec. "
"Found content type: '%s'.",
part_type,
)
return data
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/chat_completion/protocol.py",
"license": "Apache License 2.0",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/serve/tokenize/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Annotated, Any, TypeAlias
from pydantic import ConfigDict, Field, model_validator
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ChatTemplateContentFormatOption,
)
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionToolsParam,
)
from vllm.entrypoints.openai.engine.protocol import (
OpenAIBaseModel,
)
from vllm.renderers import ChatParams, TokenizeParams, merge_kwargs
class TokenizeCompletionRequest(OpenAIBaseModel):
model: str | None = None
prompt: str
add_special_tokens: bool = Field(
default=True,
description=(
"If true (the default), special tokens (e.g. BOS) will be added to "
"the prompt."
),
)
return_token_strs: bool | None = Field(
default=False,
description=(
"If true, also return the token strings corresponding to the token ids."
),
)
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
return TokenizeParams(
max_total_tokens=None,
max_output_tokens=0,
add_special_tokens=self.add_special_tokens,
)
class TokenizeChatRequest(OpenAIBaseModel):
model: str | None = None
messages: list[ChatCompletionMessageParam]
add_generation_prompt: bool = Field(
default=True,
description=(
"If true, the generation prompt will be added to the chat template. "
"This is a parameter used by chat template in tokenizer config of the "
"model."
),
)
return_token_strs: bool | None = Field(
default=False,
description=(
"If true, also return the token strings corresponding to the token ids."
),
)
continue_final_message: bool = Field(
default=False,
description=(
"If this is set, the chat will be formatted so that the final "
"message in the chat is open-ended, without any EOS tokens. The "
"model will continue this message rather than starting a new one. "
'This allows you to "prefill" part of the model\'s response for it. '
"Cannot be used at the same time as `add_generation_prompt`."
),
)
add_special_tokens: bool = Field(
default=False,
description=(
"If true, special tokens (e.g. BOS) will be added to the prompt "
"on top of what is added by the chat template. "
"For most models, the chat template takes care of adding the "
"special tokens so this should be set to false (as is the "
"default)."
),
)
chat_template: str | None = Field(
default=None,
description=(
"A Jinja template to use for this conversion. "
"As of transformers v4.44, default chat template is no longer "
"allowed, so you must provide a chat template if the tokenizer "
"does not define one."
),
)
chat_template_kwargs: dict[str, Any] | None = Field(
default=None,
description=(
"Additional keyword args to pass to the template renderer. "
"Will be accessible by the chat template."
),
)
mm_processor_kwargs: dict[str, Any] | None = Field(
default=None,
description="Additional kwargs to pass to the HF processor.",
)
tools: list[ChatCompletionToolsParam] | None = Field(
default=None,
description="A list of tools the model may call.",
)
@model_validator(mode="before")
@classmethod
def check_generation_prompt(cls, data):
if data.get("continue_final_message") and data.get("add_generation_prompt"):
raise ValueError(
"Cannot set both `continue_final_message` and "
"`add_generation_prompt` to True."
)
return data
def build_chat_params(
self,
default_template: str | None,
default_template_content_format: ChatTemplateContentFormatOption,
) -> ChatParams:
return ChatParams(
chat_template=self.chat_template or default_template,
chat_template_content_format=default_template_content_format,
chat_template_kwargs=merge_kwargs(
self.chat_template_kwargs,
dict(
add_generation_prompt=self.add_generation_prompt,
continue_final_message=self.continue_final_message,
),
),
)
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
return TokenizeParams(
max_total_tokens=None,
max_output_tokens=0,
add_special_tokens=self.add_special_tokens,
)
TokenizeRequest: TypeAlias = TokenizeCompletionRequest | TokenizeChatRequest
class TokenizeResponse(OpenAIBaseModel):
count: int
max_model_len: int
tokens: list[int]
token_strs: list[str] | None = None
class DetokenizeRequest(OpenAIBaseModel):
model: str | None = None
# TODO: Factor `torch.iinfo` out. `torch.iinfo` pulls torch into a
# Pydantic protocol file that currently has no torch dependency.
# See: https://github.com/vllm-project/vllm/pull/34468#discussion_r2801173630
tokens: list[Annotated[int, Field(ge=0, le=2**63 - 1)]]
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
return TokenizeParams(
max_total_tokens=None,
max_output_tokens=0,
needs_detokenization=True,
)
class DetokenizeResponse(OpenAIBaseModel):
prompt: str
class TokenizerInfoResponse(OpenAIBaseModel):
"""
Response containing tokenizer configuration
equivalent to tokenizer_config.json
"""
model_config = ConfigDict(extra="allow")
tokenizer_class: str
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/tokenize/protocol.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/mm/mrope_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.model_executor.models.interfaces import SupportsMRoPE
from vllm.triton_utils import tl, triton
from vllm.v1.worker.gpu.buffer_utils import StagedWriteTensor, UvaBackedTensor
class MRopeState:
def __init__(
self,
max_num_reqs: int,
max_num_tokens: int,
max_model_len: int,
device: torch.device,
):
self.max_num_reqs = max_num_reqs
self.max_num_tokens = max_num_tokens
self.max_model_len = max_model_len
self.device = device
# NOTE(woosuk): This tensor can be extremely large (e.g., several GBs)
# wasting a lot of CPU memory.
self.prefill_mrope_positions = StagedWriteTensor(
(max_num_reqs * 3, max_model_len),
dtype=torch.int32,
device=device,
uva_instead_of_gpu=True,
)
self.prefill_mrope_delta = UvaBackedTensor(max_num_reqs, dtype=torch.int32)
# NOTE: `mrope_positions` is implemented with one additional dummy
# position on purpose to make it non-contiguous so that it can work
# with torch compile.
# See detailed explanation in https://github.com/vllm-project/vllm/pull/12128#discussion_r1926431923
# NOTE: When M-RoPE is enabled, position ids are 3D regardless of
# the modality of inputs. For text-only inputs, each dimension has
# identical position IDs, making M-RoPE functionally equivalent to
# 1D-RoPE.
# See page 5 of https://arxiv.org/abs/2409.12191
self.mrope_positions = torch.zeros(
(3, max_num_tokens + 1), dtype=torch.int64, device=device
)
def init_prefill_mrope_positions(
self,
req_idx: int,
mrope_model: SupportsMRoPE,
prefill_token_ids: list[int],
mm_features: list,
) -> None:
prefill_mrope_positions, prefill_mrope_delta = (
mrope_model.get_mrope_input_positions(prefill_token_ids, mm_features)
)
for i in range(3):
pos = prefill_mrope_positions[i].tolist()
self.prefill_mrope_positions.stage_write(3 * req_idx + i, 0, pos)
self.prefill_mrope_delta.np[req_idx] = prefill_mrope_delta
def apply_staged_writes(self) -> None:
self.prefill_mrope_positions.apply_write()
self.prefill_mrope_delta.copy_to_uva()
def prepare_mrope_positions(
self,
idx_mapping: torch.Tensor,
query_start_loc: torch.Tensor,
prefill_lens: torch.Tensor,
num_computed_tokens: torch.Tensor,
) -> None:
num_reqs = idx_mapping.shape[0]
_prepare_mrope_positions_kernel[(num_reqs,)](
self.mrope_positions,
self.mrope_positions.stride(0),
self.prefill_mrope_positions.gpu,
3 * self.max_model_len,
self.max_model_len,
self.prefill_mrope_delta.gpu,
idx_mapping,
query_start_loc,
prefill_lens,
num_computed_tokens,
BLOCK_SIZE=1024,
)
@triton.jit
def _prepare_mrope_positions_kernel(
mrope_positions_ptr,
mrope_positions_stride,
prefill_mrope_positions_ptr,
prefill_mrope_positions_stride0,
prefill_mrope_positions_stride1,
prefill_mrope_delta_ptr,
idx_mapping_ptr,
query_start_loc_ptr,
prefill_lens_ptr,
num_computed_tokens_ptr,
BLOCK_SIZE: tl.constexpr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
prefill_len = tl.load(prefill_lens_ptr + req_state_idx)
num_computed = tl.load(num_computed_tokens_ptr + req_state_idx)
is_prefill = num_computed < prefill_len
query_start = tl.load(query_start_loc_ptr + batch_idx)
query_end = tl.load(query_start_loc_ptr + batch_idx + 1)
query_len = query_end - query_start
mrope_delta = tl.load(prefill_mrope_delta_ptr + req_state_idx)
for i in range(0, query_len, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < query_len
orig_pos = num_computed + block
for j in tl.static_range(3):
if is_prefill:
# Read from pre-computed M-RoPE positions.
pos = tl.load(
prefill_mrope_positions_ptr
+ req_state_idx * prefill_mrope_positions_stride0
+ j * prefill_mrope_positions_stride1
+ orig_pos,
mask=mask,
)
else:
# Apply M-RoPE delta.
pos = orig_pos + mrope_delta
tl.store(
mrope_positions_ptr + j * mrope_positions_stride + query_start + block,
pos,
mask=mask,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/mm/mrope_utils.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/sample/logit_bias.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.sampling_params import SamplingParams
from vllm.triton_utils import tl, triton
from vllm.v1.worker.gpu.buffer_utils import StagedWriteTensor, UvaBackedTensor
MAX_NUM_ALLOWED_TOKEN_IDS = 1024
MAX_NUM_LOGIT_BIAS_TOKENS = 1024
MAX_NUM_STOP_TOKEN_IDS = 128
class LogitBiasState:
def __init__(self, max_num_reqs: int, device: torch.device):
self.max_num_reqs = max_num_reqs
# Allowed token IDs.
self.num_allowed_token_ids = UvaBackedTensor(
self.max_num_reqs, dtype=torch.int32
)
self.allowed_token_ids = StagedWriteTensor(
(self.max_num_reqs, MAX_NUM_ALLOWED_TOKEN_IDS),
dtype=torch.int32,
device=device,
)
# Logit bias.
self.num_logit_bias = UvaBackedTensor(self.max_num_reqs, dtype=torch.int32)
self.logit_bias_token_ids = StagedWriteTensor(
(self.max_num_reqs, MAX_NUM_LOGIT_BIAS_TOKENS),
dtype=torch.int32,
device=device,
)
self.logit_bias = StagedWriteTensor(
(self.max_num_reqs, MAX_NUM_LOGIT_BIAS_TOKENS),
dtype=torch.float32,
device=device,
)
# Min tokens.
self.min_lens = UvaBackedTensor(self.max_num_reqs, dtype=torch.int32)
self.num_stop_token_ids = UvaBackedTensor(self.max_num_reqs, dtype=torch.int32)
self.stop_token_ids = StagedWriteTensor(
(self.max_num_reqs, MAX_NUM_STOP_TOKEN_IDS),
dtype=torch.int32,
device=device,
)
# Using any of the above.
self.use_logit_bias = np.zeros(max_num_reqs, dtype=bool)
def add_request(
self, req_idx: int, prompt_len: int, sampling_params: SamplingParams
) -> None:
# Using any logit bias.
use_logit_bias = False
# Allowed token IDs.
allowed_token_ids = sampling_params.allowed_token_ids
if allowed_token_ids:
num_allowed_token_ids = len(allowed_token_ids)
if num_allowed_token_ids > MAX_NUM_ALLOWED_TOKEN_IDS:
raise ValueError(
f"Too many allowed token IDs: {num_allowed_token_ids}. "
f"The max size is {MAX_NUM_ALLOWED_TOKEN_IDS}."
)
self.num_allowed_token_ids.np[req_idx] = num_allowed_token_ids
self.allowed_token_ids.stage_write(req_idx, 0, allowed_token_ids)
use_logit_bias = True
else:
self.num_allowed_token_ids.np[req_idx] = 0
# Logit bias.
logit_bias = sampling_params.logit_bias
if logit_bias:
num_logit_bias = len(logit_bias)
if num_logit_bias > MAX_NUM_LOGIT_BIAS_TOKENS:
raise ValueError(
f"Too many logit bias tokens: {num_logit_bias}. "
f"The max size is {MAX_NUM_LOGIT_BIAS_TOKENS}."
)
self.num_logit_bias.np[req_idx] = num_logit_bias
self.logit_bias_token_ids.stage_write(req_idx, 0, logit_bias.keys())
self.logit_bias.stage_write(req_idx, 0, logit_bias.values())
use_logit_bias = True
else:
self.num_logit_bias.np[req_idx] = 0
# Min tokens.
min_tokens = sampling_params.min_tokens
min_len = prompt_len + min_tokens
self.min_lens.np[req_idx] = min_len
stop_token_ids = sampling_params.all_stop_token_ids
if min_tokens > 0 and stop_token_ids:
num_stop_token_ids = len(stop_token_ids)
if num_stop_token_ids > MAX_NUM_STOP_TOKEN_IDS:
raise ValueError(
f"Too many stop tokens: {num_stop_token_ids}. "
f"The max size is {MAX_NUM_STOP_TOKEN_IDS}."
)
self.num_stop_token_ids.np[req_idx] = num_stop_token_ids
self.stop_token_ids.stage_write(req_idx, 0, stop_token_ids)
use_logit_bias = True
else:
self.num_stop_token_ids.np[req_idx] = 0
self.use_logit_bias[req_idx] = use_logit_bias
def apply_staged_writes(self) -> None:
self.num_allowed_token_ids.copy_to_uva()
self.allowed_token_ids.apply_write()
self.num_logit_bias.copy_to_uva()
self.logit_bias_token_ids.apply_write()
self.logit_bias.apply_write()
self.min_lens.copy_to_uva()
self.num_stop_token_ids.copy_to_uva()
self.stop_token_ids.apply_write()
def apply_logit_bias(
self,
logits: torch.Tensor,
idx_mapping: torch.Tensor,
idx_mapping_np: np.ndarray,
pos: torch.Tensor,
) -> None:
if not np.any(self.use_logit_bias[idx_mapping_np]):
# No request uses logit bias. Skip the kernel launch.
return
apply_logit_bias(
logits,
idx_mapping,
pos,
self.num_allowed_token_ids.gpu,
self.allowed_token_ids.gpu,
self.num_logit_bias.gpu,
self.logit_bias_token_ids.gpu,
self.logit_bias.gpu,
self.min_lens.gpu,
self.num_stop_token_ids.gpu,
self.stop_token_ids.gpu,
)
@triton.jit
def _bias_kernel(
logits_ptr,
logits_stride,
vocab_size,
idx_mapping_ptr,
# Allowed token IDs.
num_allowed_token_ids_ptr,
allowed_token_ids_ptr,
allowed_token_ids_stride,
# Logit bias.
num_logit_bias_ptr,
bias_token_ids_ptr,
bias_token_ids_stride,
bias_ptr,
bias_stride,
# Min tokens.
pos_ptr,
min_lens_ptr,
num_stop_token_ids_ptr,
stop_token_ids_ptr,
stop_token_ids_stride,
BLOCK_SIZE: tl.constexpr,
LOGITS_BLOCK_SIZE: tl.constexpr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
block = tl.arange(0, BLOCK_SIZE)
# Allowed token IDs.
num_allowed_token_ids = tl.load(num_allowed_token_ids_ptr + req_state_idx)
if num_allowed_token_ids > 0:
block = tl.arange(0, BLOCK_SIZE)
mask = block < num_allowed_token_ids
# Save logits for allowed token IDs.
allowed_token_ids = tl.load(
allowed_token_ids_ptr + req_state_idx * allowed_token_ids_stride + block,
mask=mask,
)
logits = tl.load(
logits_ptr + batch_idx * logits_stride + allowed_token_ids, mask=mask
)
# Set logits to -inf for all tokens.
for i in range(0, vocab_size, LOGITS_BLOCK_SIZE):
offset = i + tl.arange(0, LOGITS_BLOCK_SIZE)
tl.store(
logits_ptr + batch_idx * logits_stride + offset,
-float("inf"),
mask=offset < vocab_size,
)
# Restore logits for allowed token IDs.
tl.store(
logits_ptr + batch_idx * logits_stride + allowed_token_ids,
logits,
mask=mask,
)
# Logit bias.
num_logit_bias = tl.load(num_logit_bias_ptr + req_state_idx)
if num_logit_bias > 0:
mask = block < num_logit_bias
token_ids = tl.load(
bias_token_ids_ptr + req_state_idx * bias_token_ids_stride + block,
mask=mask,
)
bias = tl.load(bias_ptr + req_state_idx * bias_stride + block, mask=mask)
logits = tl.load(logits_ptr + batch_idx * logits_stride + token_ids, mask=mask)
logits += bias
tl.store(logits_ptr + batch_idx * logits_stride + token_ids, logits, mask=mask)
# Apply min tokens.
num_stop_token_ids = tl.load(num_stop_token_ids_ptr + req_state_idx)
pos = tl.load(pos_ptr + batch_idx)
min_len = tl.load(min_lens_ptr + req_state_idx)
if num_stop_token_ids > 0 and pos < min_len:
mask = block < num_stop_token_ids
stop_token_ids = tl.load(
stop_token_ids_ptr + req_state_idx * stop_token_ids_stride + block,
mask=mask,
)
tl.store(
logits_ptr + batch_idx * logits_stride + stop_token_ids,
-float("inf"),
mask=mask,
)
def apply_logit_bias(
logits: torch.Tensor,
idx_mapping: torch.Tensor,
pos: torch.Tensor,
num_allowed_token_ids: torch.Tensor,
allowed_token_ids: torch.Tensor,
num_logit_bias: torch.Tensor,
logit_bias_token_ids: torch.Tensor,
logit_bias: torch.Tensor,
min_lens: torch.Tensor,
num_stop_token_ids: torch.Tensor,
stop_token_ids: torch.Tensor,
) -> None:
num_reqs, vocab_size = logits.shape
BLOCK_SIZE = triton.next_power_of_2(
max(
allowed_token_ids.shape[-1],
logit_bias_token_ids.shape[-1],
stop_token_ids.shape[-1],
)
)
LOGITS_BLOCK_SIZE = 8192
_bias_kernel[(num_reqs,)](
logits,
logits.stride(0),
vocab_size,
idx_mapping,
num_allowed_token_ids,
allowed_token_ids,
allowed_token_ids.stride(0),
num_logit_bias,
logit_bias_token_ids,
logit_bias_token_ids.stride(0),
logit_bias,
logit_bias.stride(0),
pos,
min_lens,
num_stop_token_ids,
stop_token_ids,
stop_token_ids.stride(0),
BLOCK_SIZE=BLOCK_SIZE,
LOGITS_BLOCK_SIZE=LOGITS_BLOCK_SIZE,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/sample/logit_bias.py",
"license": "Apache License 2.0",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_triton_moe_no_act_mul.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for MoE with non-gated activations (*_no_mul).
These tests verify that MoE layers work correctly with activations like
silu_no_mul, gelu_no_mul, relu2_no_mul where the activation output dimension
equals N (not N // 2 like gated activations).
"""
import pytest
import torch
from tests.kernels.moe.utils import make_dummy_moe_config
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FUSED_MOE_UNQUANTIZED_CONFIG,
)
from vllm.model_executor.layers.fused_moe.fused_moe import TritonExperts
from vllm.platforms import current_platform
# Test parameters
M_SIZES = [1, 16, 64]
N_SIZES = [128, 256]
K_SIZES = [64, 128]
TOPK_VALUES = [1, 2]
NUM_EXPERTS = 8
NO_MUL_ACTIVATIONS = [
MoEActivation.SILU_NO_MUL,
MoEActivation.GELU_NO_MUL,
MoEActivation.RELU2_NO_MUL,
]
def make_test_tensors(
m: int,
n: int,
k: int,
num_experts: int,
topk: int,
dtype: torch.dtype = torch.bfloat16,
device: str = "cuda",
):
"""Create test tensors for MoE with non-gated activation.
For non-gated activations (*_no_mul):
- w1: (E, N, K) - projects from K to N
- w2: (E, K, N) - projects from N back to K (note: N, not N//2)
"""
hidden_states = torch.randn(m, k, dtype=dtype, device=device)
# For non-gated: w1 projects K -> N, w2 projects N -> K
w1 = torch.randn(num_experts, n, k, dtype=dtype, device=device) * 0.1
w2 = torch.randn(num_experts, k, n, dtype=dtype, device=device) * 0.1
topk_weights = torch.ones(m, topk, dtype=torch.float32, device=device) / topk
topk_ids = torch.randint(0, num_experts, (m, topk), device=device)
return hidden_states, w1, w2, topk_weights, topk_ids
@pytest.mark.skipif(
not current_platform.has_device_capability(80),
reason="Requires compute capability >= 8.0",
)
@pytest.mark.parametrize("m", M_SIZES)
@pytest.mark.parametrize("n", N_SIZES)
@pytest.mark.parametrize("k", K_SIZES)
@pytest.mark.parametrize("topk", TOPK_VALUES)
@pytest.mark.parametrize("activation", NO_MUL_ACTIVATIONS)
@torch.inference_mode()
def test_triton_experts_no_mul_activation(
m: int,
n: int,
k: int,
topk: int,
activation: MoEActivation,
):
hidden_states, w1, w2, topk_weights, topk_ids = make_test_tensors(
m, n, k, NUM_EXPERTS, topk
)
experts = TritonExperts(
moe_config=make_dummy_moe_config(),
quant_config=FUSED_MOE_UNQUANTIZED_CONFIG,
)
ws1_shape, ws2_shape, out_shape = experts.workspace_shapes(
M=m,
N=n,
K=k,
topk=topk,
global_num_experts=NUM_EXPERTS,
local_num_experts=NUM_EXPERTS,
expert_tokens_meta=None,
activation=activation,
)
# Verify workspace shapes are correct for no_mul activation
# workspace1 should handle activation_out_dim = N (not N//2)
assert ws1_shape == (m, topk, max(n, k)), (
f"workspace1 shape mismatch: expected {(m, topk, max(n, k))}, got {ws1_shape}"
)
# workspace2 should handle max(N, K) for intermediate_cache1/cache3
assert ws2_shape == (m, topk, max(n, k)), (
f"workspace2 shape mismatch: expected {(m, topk, max(n, k))}, got {ws2_shape}"
)
assert out_shape == (m, k), (
f"output shape mismatch: expected {(m, k)}, got {out_shape}"
)
workspace1 = torch.empty(
ws1_shape[0] * ws1_shape[1] * ws1_shape[2],
dtype=hidden_states.dtype,
device=hidden_states.device,
)
workspace2 = torch.empty(
ws2_shape[0] * ws2_shape[1] * ws2_shape[2],
dtype=hidden_states.dtype,
device=hidden_states.device,
)
output = torch.zeros(m, k, dtype=hidden_states.dtype, device=hidden_states.device)
experts.apply(
output=output,
hidden_states=hidden_states,
w1=w1,
w2=w2,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=activation,
global_num_experts=NUM_EXPERTS,
expert_map=None,
a1q_scale=None,
a2_scale=None,
workspace13=workspace1,
workspace2=workspace2,
expert_tokens_meta=None,
apply_router_weight_on_input=False,
)
assert output.shape == (m, k), f"Expected shape {(m, k)}, got {output.shape}"
assert not torch.isnan(output).any(), "Output contains NaN"
assert not torch.isinf(output).any(), "Output contains Inf"
assert output.abs().sum() > 0, "Output is all zeros"
@pytest.mark.skipif(
not current_platform.has_device_capability(80),
reason="Requires compute capability >= 8.0",
)
@torch.inference_mode()
def test_workspace_shapes_no_mul_vs_gated():
"""Test that workspace shapes differ correctly between gated and non-gated."""
from vllm.model_executor.layers.fused_moe.fused_moe import TritonExperts
M, N, K, topk = 64, 256, 128, 2
experts = TritonExperts(
moe_config=make_dummy_moe_config(),
quant_config=FUSED_MOE_UNQUANTIZED_CONFIG,
)
ws1_no_mul, _, out_no_mul = experts.workspace_shapes(
M, N, K, topk, 8, 8, None, MoEActivation.SILU_NO_MUL
)
ws1_gated, _, out_gated = experts.workspace_shapes(
M, N, K, topk, 8, 8, None, MoEActivation.SILU
)
# For no_mul: activation_out_dim = N
# For gated: activation_out_dim = N // 2
# workspace1 should use max(activation_out_dim, K)
activation_out_dim_no_mul = N
activation_out_dim_gated = N // 2
assert ws1_no_mul[2] == max(activation_out_dim_no_mul, K), (
f"no_mul workspace1 last dim should be max({activation_out_dim_no_mul}, {K})"
)
assert ws1_gated[2] == max(activation_out_dim_gated, K), (
f"gated workspace1 last dim should be max({activation_out_dim_gated}, {K})"
)
# Output shapes should be the same
assert out_no_mul == out_gated == (M, K)
@pytest.mark.skipif(
not current_platform.has_device_capability(80),
reason="Requires compute capability >= 8.0",
)
@torch.inference_mode()
def test_adjust_n_for_activation():
"""Test the adjust_N_for_activation method."""
from vllm.model_executor.layers.fused_moe.fused_moe import TritonExperts
experts = TritonExperts(
moe_config=make_dummy_moe_config(),
quant_config=FUSED_MOE_UNQUANTIZED_CONFIG,
)
N = 256
# Gated activations should return N // 2
assert experts.adjust_N_for_activation(N, MoEActivation.SILU) == N // 2
assert experts.adjust_N_for_activation(N, MoEActivation.GELU) == N // 2
# Non-gated activations should return N
assert experts.adjust_N_for_activation(N, MoEActivation.SILU_NO_MUL) == N
assert experts.adjust_N_for_activation(N, MoEActivation.GELU_NO_MUL) == N
assert experts.adjust_N_for_activation(N, MoEActivation.RELU2_NO_MUL) == N
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_triton_moe_no_act_mul.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/kanana_v.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable, Mapping, Sequence
from functools import partial
from typing import Annotated, Literal, TypeAlias
import numpy as np
import regex as re
import torch
from einops import rearrange
from PIL import Image
from timm.layers import LayerNorm2d
from timm.layers.pos_embed import resample_abs_pos_embed
from timm.models.regnet import RegStage
from torch import nn
from transformers import BatchFeature
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLVisionConfig
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.logger import init_logger
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import ImageSize, MultiModalDataItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
)
from vllm.sequence import IntermediateTensors
from vllm.utils.import_utils import resolve_obj_by_qualname
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
from .qwen2_vl import Qwen2VisionTransformer
from .utils import AutoWeightsLoader, init_vllm_registered_model, maybe_prefix
logger = init_logger(__name__)
class KananaVImagePixelInputs(TensorSchema):
"""
Dimensions:
- np: The total number of patches over all images in the batch
- cps: Number of channels * patch_size * patch_size
- ni: Number of images
"""
type: Literal["pixel_values"]
pixel_values: Annotated[
torch.Tensor,
TensorShape("np", "cps"),
]
vision_grid_thw: Annotated[
torch.Tensor,
TensorShape("ni", 3),
]
KananaVImageInputs: TypeAlias = KananaVImagePixelInputs
def build_pos_embeds(
config: Qwen2VLVisionConfig,
num_input_tokens: int,
vision_hidden_size: int,
) -> nn.Parameter | None:
"""Build positional embeddings for the visual encoder output."""
if config.pos_emb:
pos_emb = nn.Parameter(torch.zeros(1, num_input_tokens, vision_hidden_size))
nn.init.trunc_normal_(pos_emb, mean=0.0, std=0.02)
else:
pos_emb = None
return pos_emb
def build_mlp(
depth: int,
hidden_size: int,
output_hidden_size: int,
) -> nn.Sequential:
"""Simple SiLU-activated MLP used as a projector readout."""
layers = [nn.Linear(hidden_size, output_hidden_size)]
for _ in range(1, depth):
layers.append(nn.SiLU())
layers.append(nn.Linear(output_hidden_size, output_hidden_size))
return nn.Sequential(*layers)
class PatchMerge(nn.Module):
"""Merge neighboring patches spatially to reduce resolution."""
def __init__(self, merge_size: int) -> None:
super().__init__()
self.merge_size = merge_size
def forward(
self,
x: torch.Tensor,
channel_last: bool = False,
) -> torch.Tensor:
"""Merge patches by `merge_size x merge_size`."""
if channel_last:
x = rearrange(x, "B H W D -> B D H W")
_, _, H, W = x.shape
merged_x = rearrange(
x,
"B D (H h2) (W w2) -> B (D h2 w2) H W",
h2=self.merge_size,
w2=self.merge_size,
)
return merged_x
class DynamicCAbstractor(nn.Module):
"""Dynamic C-Abstractor based on RegNet blocks."""
def __init__(
self,
config: Qwen2VLVisionConfig,
num_input_tokens: int,
) -> None:
super().__init__()
assert hasattr(config, "merge_size"), "merge_size must be provided."
self.config = config
self.merge_size = config.merge_size
self.pos_emb_size = config.pos_emb_size
if num_input_tokens == -1:
num_input_tokens = config.pos_emb_size
self.num_input_tokens = num_input_tokens
self.pos_emb = build_pos_embeds(
config, num_input_tokens, config.encoder_hidden_size
)
self.build_net()
def _load_from_state_dict(self, state_dict, *args, **kwargs) -> None:
if not state_dict:
return
if self.pos_emb is not None:
key_re = re.compile(r"[\w,.]*abstractor[\w,.]*pos_emb")
pos_emb_key = None
for key in state_dict:
if key_re.match(key):
pos_emb_key = key
break
assert pos_emb_key is not None
# update old ckpt compatible with current code
pos_emb = state_dict[pos_emb_key]
if pos_emb.size(1) == self.pos_emb.size(1) + 1:
# remove obsolete first pos emb (for cls token originally)
state_dict[pos_emb_key] = pos_emb[:, 1:]
super()._load_from_state_dict(state_dict, *args, **kwargs)
def build_net(self) -> None:
encoder_hidden_size = self.config.encoder_hidden_size
hidden_size = self.config.hidden_size
output_hidden_size = self.config.output_hidden_size
depth = self.config.depth
mlp_depth = self.config.mlp_depth
RegBlock = partial(
RegStage,
stride=1,
dilation=1,
act_layer=nn.SiLU,
norm_layer=LayerNorm2d,
)
s1 = RegBlock(
depth,
encoder_hidden_size,
hidden_size,
)
sampler = PatchMerge(merge_size=self.merge_size)
s2 = RegBlock(
depth,
self.merge_size**2 * hidden_size,
hidden_size,
)
if depth:
self.net = nn.ModuleList([s1, sampler, s2])
self.readout = build_mlp(mlp_depth, hidden_size, output_hidden_size)
else:
self.net = sampler
self.readout = build_mlp(mlp_depth, encoder_hidden_size, output_hidden_size)
def forward(
self,
flattened_visual_embeds: torch.Tensor,
grid_thw: torch.Tensor,
**unused_kwargs: object,
) -> BaseModelOutput:
"""Apply the dynamic abstractor over flattened visual embeddings."""
n_token_loc = torch.prod(grid_thw, dim=1)
split_visual_embeds = torch.split(flattened_visual_embeds, n_token_loc.tolist())
flattened_visual_embeds = []
for _visual_embeds, _grid_thw in zip(split_visual_embeds, grid_thw):
T, H, W = _grid_thw
assert T == 1, "T must be 1. Video is not supported yet."
reshaped_visual_embeds = rearrange(
_visual_embeds, "(t h w) d -> 1 t h w d", t=T, h=H, w=W
)
# remove temporal dim
reshaped_visual_embeds = reshaped_visual_embeds[:, 0]
if self.pos_emb is not None:
# interpolate pos emb and add to visual embeds
_local_pos_emb = resample_abs_pos_embed(
posemb=self.pos_emb,
old_size=tuple([int(self.pos_emb_size**0.5)] * 2),
new_size=(H, W),
num_prefix_tokens=0,
)
_local_pos_emb = rearrange(
_local_pos_emb,
"1 (h w) d -> 1 h w d",
h=H,
w=W,
)
reshaped_visual_embeds = reshaped_visual_embeds + _local_pos_emb
reshaped_visual_embeds = self._forward(
reshaped_visual_embeds,
input_size=(H, W),
)
flattened_visual_embeds.append(reshaped_visual_embeds)
reshaped_visual_embeds = torch.cat(flattened_visual_embeds, dim=0)
return BaseModelOutput(last_hidden_state=reshaped_visual_embeds)
def _forward(
self,
x: torch.Tensor,
input_size: tuple[int, int],
) -> torch.Tensor:
h, w = input_size
x = rearrange(x, "1 h w d -> 1 d h w", h=h, w=w)
if self.config.depth:
x = self.net[0](x)
x = self.net[1](x)
x = self.net[2](x)
else:
# When depth=0, self.net is a single PatchMerge module
x = self.net(x)
x = rearrange(x, "1 d h w -> (h w) d")
x = self.readout(x)
return x
class CustomQwen2VLVE(Qwen2VisionTransformer):
"""Thin wrapper around the Qwen2-VL used as a vision encoder.
This mirrors the original HF-based vision encoder used in Kanana-V, but
reuses vLLM's optimized `Qwen2VisionTransformer` building blocks.
"""
def __init__(self, config: Qwen2VLVisionConfig) -> None:
super().__init__(
vision_config=config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
quant_config=None,
prefix="",
)
# Kanana-V uses its own projector/abstractor instead of the Qwen2
# built-in patch merger, so we drop the merger module to keep the
# parameter set compatible with the original checkpoint.
if hasattr(self, "merger"):
del self.merger
@classmethod
def _from_config(cls, config: Qwen2VLVisionConfig) -> "CustomQwen2VLVE":
"""Drop-in replacement for the HF `_from_config` constructor."""
return cls(config)
def forward(
self,
pixel_values: torch.Tensor,
grid_thw: torch.Tensor,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
) -> tuple | BaseModelOutput:
"""Run the vision transformer and optionally return intermediate states.
Unlike the base `Qwen2VisionTransformer`, this wrapper exposes the
pre-merger patch-level representations and a HF-style `BaseModelOutput`
so that the existing projector / abstractor code can be reused.
"""
assert return_dict, "Only return_dict=True is supported."
# Patchify
x = pixel_values.to(device=self.device, dtype=self.dtype)
x = self.patch_embed(x) # (num_patches, embed_dim)
# Prepare grid and rotary embeddings – mirror base implementation.
if isinstance(grid_thw, list):
grid_thw_list = grid_thw
grid_thw_np = np.array(grid_thw, dtype=np.int32)
else:
grid_thw_list = grid_thw.tolist()
grid_thw_np = grid_thw.cpu().numpy()
rotary_pos_emb_cos, rotary_pos_emb_sin = self.rot_pos_emb(grid_thw_list)
# Compute cu_seqlens in numpy then move to device, same as base model.
cu_seqlens = np.repeat(
grid_thw_np[:, 1] * grid_thw_np[:, 2],
grid_thw_np[:, 0],
).cumsum(axis=0, dtype=np.int32)
cu_seqlens = np.concatenate([np.zeros(1, dtype=np.int32), cu_seqlens])
cu_seqlens = torch.from_numpy(cu_seqlens).to(
self.device,
non_blocking=True,
)
# Shape to (S, B, D) with batch dimension 1 as expected by the blocks.
x = x.unsqueeze(1)
# Pre-compute seqlens for attention backend.
max_seqlen = self.compute_attn_mask_seqlen(cu_seqlens)
encoder_states = () if output_hidden_states else None
for blk in self.blocks:
if output_hidden_states:
# Store patch-level states (S, D).
encoder_states = encoder_states + (x.squeeze(1),)
x = blk(
x,
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
# Final hidden state at patch level (S, D).
hidden_states = x.squeeze(1)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
)
def get_num_tokens(self) -> int:
# Not used in the current Kanana-V pipeline, kept for API compatibility.
return -1
class KananaVProcessingInfo(BaseProcessingInfo):
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_image_size_with_most_features(self) -> ImageSize:
max_image_size, _ = self._get_vision_info(
image_width=9999,
image_height=9999,
num_frames=1,
)
return max_image_size
def _get_vision_info(
self,
*,
image_width: int,
image_height: int,
num_frames: int = 1,
do_resize: bool = True,
) -> tuple[ImageSize, int]:
image_processor = self.ctx.get_hf_processor().image_processor
smart_resize = resolve_obj_by_qualname(
f"{type(image_processor).__module__}.smart_resize"
)
hf_config = self.get_hf_config()
vision_config = hf_config.vision_config
patch_size = vision_config.patch_size
merge_size = vision_config.spatial_merge_size
temporal_patch_size = vision_config.temporal_patch_size
if do_resize:
resized_height, resized_width = smart_resize(
height=image_height,
width=image_width,
factor=patch_size * merge_size,
min_pixels=image_processor.min_pixels,
max_pixels=image_processor.max_pixels,
)
preprocessed_size = ImageSize(width=resized_width, height=resized_height)
else:
preprocessed_size = ImageSize(width=image_width, height=image_height)
# NOTE: Frames are padded to be divisible by `temporal_patch_size`
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294
padded_num_frames = num_frames + num_frames % temporal_patch_size
grid_t = max(padded_num_frames // temporal_patch_size, 1)
grid_h = preprocessed_size.height // patch_size
grid_w = preprocessed_size.width // patch_size
num_patches = grid_t * grid_h * grid_w
num_vision_tokens = num_patches // (merge_size**2)
return preprocessed_size, num_vision_tokens
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int]:
target_width, target_height = self.get_image_size_with_most_features()
num_vision_tokens = self._get_vision_info(
image_width=target_width,
image_height=target_height,
num_frames=1,
)[1]
return {"image": num_vision_tokens}
class KananaVDummyInputsBuilder(BaseDummyInputsBuilder[KananaVProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
return "<image>" * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
return {
"image": self._get_dummy_images(
width=9999, height=9999, num_images=num_images
),
}
class KananaVMultiModalProcessor(BaseMultiModalProcessor[KananaVProcessingInfo]):
"""vLLM multimodal processor for Kanana-V (text + image)."""
@property
def media_token_id(self) -> int:
return self.info.get_hf_config().text_config.eos_token_id + 1
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
"""Run the underlying HF processor on text and image data."""
# Text-only input is handled as a special case here.
if not mm_data or not mm_data.get("images", []):
prompt_ids = self.info.get_tokenizer().encode(prompt)
return BatchFeature(dict(input_ids=[prompt_ids]), tensor_type="pt")
# Images
image_inputs = mm_data.get("images", [])
pixel_sizes = []
if not isinstance(image_inputs[0], Image.Image):
image_inputs = [Image.fromarray(image) for image in image_inputs]
image_processor = self.info.get_hf_processor().image_processor
processor_output = [image_processor(image) for image in image_inputs]
pixel_values = [o["pixel_values"] for o in processor_output]
image_meta = [o["image_meta"] for o in processor_output]
# list of dict -> dict of list
image_meta = {k: [d[k] for d in image_meta] for k in image_meta[0]}
for pixel_value in pixel_values:
pixel_sizes.append(pixel_value.shape[0])
# flattened pixel_values for single example (already includes batch dim)
pixel_values = torch.concat(pixel_values, dim=0)
tokenizer = self.info.get_tokenizer()
media_token = tokenizer.convert_ids_to_tokens([self.media_token_id])[0]
prompt_replaced = prompt.replace("<image>", media_token)
input_ids = tokenizer.encode(prompt_replaced)
input_ids = torch.tensor(input_ids)
# Ensure HF output is consistent with vLLM prompt-update expectations:
# if the HF tokenizer emits exactly 1 placeholder token per image, expand
# it to `T*H*W` placeholder tokens per image so placeholder detection works.
num_images = len(image_inputs)
image_token_thw = torch.tensor(image_meta["image_token_thw"])
per_image_token_counts = image_token_thw.prod(dim=1).tolist()
expected_total = int(sum(int(x) for x in per_image_token_counts))
n_placeholders = int((input_ids == self.media_token_id).sum().item())
if n_placeholders == num_images and expected_total != num_images:
expanded: list[int] = []
img_i = 0
for tok in input_ids.tolist():
if tok == self.media_token_id and img_i < num_images:
expanded.extend(
[self.media_token_id] * int(per_image_token_counts[img_i])
)
img_i += 1
else:
expanded.append(tok)
input_ids = input_ids.new_tensor(expanded)
combined_outputs = dict(
# Add batch dimension to input_ids.
input_ids=input_ids.unsqueeze(0),
pixel_values=pixel_values,
vision_grid_thw=torch.tensor(image_meta["vision_grid_thw"]),
image_token_thw=torch.tensor(image_meta["image_token_thw"]),
pixel_sizes=torch.tensor(pixel_sizes),
)
return BatchFeature(combined_outputs, tensor_type="pt")
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
def get_replacement(idx: int) -> Sequence[int]:
out_item = out_mm_kwargs["image"][idx]
image_token_thw = out_item["image_token_thw"].data
assert isinstance(image_token_thw, torch.Tensor)
num_tokens = int(image_token_thw.prod().item())
return [self.media_token_id] * num_tokens
return [
PromptReplacement(
modality="image",
target="<image>",
replacement=get_replacement,
),
]
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
pixel_sizes = hf_inputs.get("pixel_sizes", torch.empty(0))
mm_fields_config = dict(
pixel_values=MultiModalFieldConfig.flat_from_sizes("image", pixel_sizes),
vision_grid_thw=MultiModalFieldConfig.batched("image"),
image_token_thw=MultiModalFieldConfig.batched("image"),
)
return mm_fields_config
@MULTIMODAL_REGISTRY.register_processor(
KananaVMultiModalProcessor,
info=KananaVProcessingInfo,
dummy_inputs=KananaVDummyInputsBuilder,
)
class KananaVForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
else:
raise ValueError(f"Unsupported modality: {modality}")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.config = config
with self._mark_tower_model(vllm_config, "image"):
self.vision_model = CustomQwen2VLVE._from_config(config.vision_config)
self.abstractor = DynamicCAbstractor(
config.projector_config,
num_input_tokens=self.vision_model.get_num_tokens(),
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "model"),
architectures=["LlamaForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _parse_and_validate_image_input(
self, **kwargs: object
) -> KananaVImageInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
vision_grid_thw = kwargs.pop("vision_grid_thw", None)
if pixel_values is None:
return None
if vision_grid_thw is None:
raise ValueError(
"vision_grid_thw is required when pixel_values is provided"
)
# Normalize pixel_values to 2D tensor (num_patches, channels*patch*patch)
if isinstance(pixel_values, torch.Tensor):
if pixel_values.ndim == 2:
pass # Already in expected shape
elif pixel_values.ndim == 3:
pixel_values = pixel_values.flatten(0, 1)
else:
raise ValueError(
f"pixel_values should be 2D or batched 3D tensor. "
f"Got ndim: {pixel_values.ndim} "
f"(shape={pixel_values.shape})"
)
else:
pixel_values = torch.concat(pixel_values)
# Normalize vision_grid_thw to 2D tensor (num_images, 3)
if isinstance(vision_grid_thw, torch.Tensor):
if vision_grid_thw.ndim == 3:
vision_grid_thw = vision_grid_thw.flatten(0, 1)
else:
vision_grid_thw = torch.concat(vision_grid_thw)
return KananaVImagePixelInputs(
type="pixel_values",
pixel_values=pixel_values,
vision_grid_thw=vision_grid_thw,
)
def _process_image_input(self, image_input: KananaVImageInputs) -> torch.Tensor:
pixel_values = image_input["pixel_values"]
vision_grid_thw = image_input["vision_grid_thw"]
image_metas = {"vision_grid_thw": vision_grid_thw}
visual_embeds = self.forward_and_project_vision(pixel_values, image_metas)
merge_size = self.abstractor.merge_size
batch_size = vision_grid_thw.size(0)
multi_modal_embeddings: tuple[torch.Tensor, ...] = ()
sample_index = 0
for i in range(batch_size):
t, h, w = (
vision_grid_thw[i][0],
vision_grid_thw[i][1] // merge_size,
vision_grid_thw[i][2] // merge_size,
)
num_tokens = t * h * w
visual_embed = visual_embeds[sample_index : sample_index + num_tokens]
multi_modal_embeddings += (visual_embed,)
sample_index += num_tokens
return multi_modal_embeddings
def _get_visual_feature_at(
self,
v_output: Sequence[torch.Tensor],
layer_index: int | Sequence[int],
) -> torch.Tensor:
if isinstance(layer_index, (list, tuple)):
visual_features = torch.stack(v_output, dim=1)[
:, layer_index
] # [B, n_scales, L, dim]
else:
visual_features = v_output[layer_index] # [B, L, dim]
return visual_features
def forward_vision(
self,
pixel_values: torch.Tensor,
image_metas: dict | None = None,
) -> torch.Tensor:
vision_model_args = {
"pixel_values": pixel_values,
"return_dict": True,
"output_hidden_states": True,
"grid_thw": image_metas["vision_grid_thw"],
}
v_outputs = self.vision_model(**vision_model_args)
layer_index = self.config.projector_config.feature_layer_index
visual_features = self._get_visual_feature_at(
v_outputs.hidden_states, layer_index
)
return visual_features
def forward_projector(
self,
visual_features: torch.Tensor,
image_metas: dict | None = None,
) -> torch.Tensor:
visual_embeds = self.abstractor(
visual_features,
grid_thw=image_metas["vision_grid_thw"],
)["last_hidden_state"]
return visual_embeds
def forward_and_project_vision(
self,
pixel_values: torch.Tensor,
image_metas: dict | None = None,
) -> torch.Tensor:
assert pixel_values is not None
visual_features = self.forward_vision(pixel_values, image_metas=image_metas)
visual_embeds = self.forward_projector(visual_features, image_metas=image_metas)
return visual_embeds
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return []
return self._process_image_input(image_input)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
):
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/kanana_v.py",
"license": "Apache License 2.0",
"lines": 644,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/exaone_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only K-EXAONE-236B-A22B model compatible with HuggingFace weights."""
import typing
from collections.abc import Callable, Iterable
from itertools import islice
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config
from vllm.distributed import (
get_ep_group,
get_pp_group,
get_tensor_model_parallel_world_size,
)
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
DEFAULT_VOCAB_PADDING_SIZE,
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from .exaone4 import Exaone4Attention as ExaoneMoeAttention
from .exaone4 import Exaone4GatedMLP as ExaoneMoeGatedMLP
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class ExaoneMoe(nn.Module):
def __init__(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
enable_eplb: bool = False,
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.routed_scaling_factor = config.routed_scaling_factor
self.ep_group = get_ep_group().device_group
self.ep_rank = self.ep_group.rank()
self.ep_size = self.ep_group.size()
self.n_routed_experts = config.num_experts
if self.tp_size > config.num_experts:
raise ValueError(
f"Tensor parallel size {self.tp_size} is greater than "
f"the number of experts {config.num_experts}."
)
self.gate = ReplicatedLinear(
config.hidden_size,
config.num_experts,
bias=False,
quant_config=None,
prefix=f"{prefix}.gate",
)
self.e_score_correction_bias = nn.Parameter(
torch.empty(config.num_experts, dtype=torch.float32)
)
# Load balancing settings.
vllm_config = get_current_vllm_config()
eplb_config = vllm_config.parallel_config.eplb_config
self.enable_eplb = enable_eplb
self.n_logical_experts = self.n_routed_experts
eplb_config.num_redundant_experts = (
eplb_config.num_redundant_experts
if eplb_config.num_redundant_experts is not None
else 0
)
self.n_redundant_experts = eplb_config.num_redundant_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
self.experts = FusedMoE(
num_experts=self.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=False,
renormalize=config.norm_topk_prob,
quant_config=quant_config,
use_grouped_topk=True,
num_expert_group=config.n_group,
topk_group=config.topk_group,
prefix=f"{prefix}.experts",
scoring_func="sigmoid",
routed_scaling_factor=self.routed_scaling_factor,
e_score_correction_bias=self.e_score_correction_bias,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
)
if getattr(config, "num_shared_experts", 0) > 0:
intermediate_size = config.moe_intermediate_size * config.num_shared_experts
self.shared_experts = ExaoneMoeGatedMLP(
hidden_size=config.hidden_size,
intermediate_size=intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=self.experts.must_reduce_shared_expert_outputs(),
prefix=f"{prefix}.shared_experts",
)
else:
self.shared_experts = None
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# NOTE: hidden_states can have either 1D or 2D shape.
orig_shape = hidden_states.shape
hidden_dim = hidden_states.shape[-1]
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
final_hidden_states = final_hidden_states + shared_output
if self.tp_size > 1:
final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501
final_hidden_states
)
return final_hidden_states.view(orig_shape)
class ExaoneMoeDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
mtp_layer: bool = None,
prefix: str = "",
) -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
# Support abacusai/Smaug-72B-v0.1 with attention_bias
# Support internlm/internlm-7b with bias
attention_bias = getattr(config, "attention_bias", False) or getattr(
config, "bias", False
)
self.self_attn = ExaoneMoeAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=getattr(
config, "num_key_value_heads", config.num_attention_heads
),
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
bias=attention_bias,
cache_config=cache_config,
prefix=f"{prefix}.self_attn",
)
if config.is_moe_layer[layer_idx] and not mtp_layer:
self.mlp = ExaoneMoe(
config=config, quant_config=quant_config, prefix=f"{prefix}.mlp"
)
else:
self.mlp = ExaoneMoeGatedMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
bias=getattr(config, "mlp_bias", False),
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
# Self Attention
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
@support_torch_compile
class ExaoneMoeModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.num_redundant_experts = (
vllm_config.parallel_config.eplb_config.num_redundant_experts
)
self.config = config
self.quant_config = quant_config
lora_vocab = (
(lora_config.lora_extra_vocab_size * (lora_config.max_loras or 1))
if lora_config
else 0
)
self.vocab_size = config.vocab_size + lora_vocab
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
quant_config=quant_config,
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: ExaoneMoeDecoderLayer(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states, residual = layer(
positions,
hidden_states,
residual,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
return FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.num_experts,
num_redundant_experts=self.num_redundant_experts,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
# Skip loading extra parameters for GPTQ/modelopt models.
ignore_suffixes = (
".bias",
"_bias",
".k_scale",
"_k_scale",
".v_scale",
"_v_scale",
".weight_scale",
"_weight_scale",
".input_scale",
"_input_scale",
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
expert_params_mapping = self.get_expert_mapping()
for name, loaded_weight in weights:
if name.startswith("mtp."):
continue
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
if "mlp.experts" in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
is_expert_weight = False
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
# Anyway, this is an expert weight and should not be
# attempted to load as other weights later
is_expert_weight = True
# Do not modify `name` since the loop may continue here
# Instead, create a new variable
name_mapped = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_mapped, self):
continue
# Skip loading extra parameters for GPTQ/modelopt models.
if (
name_mapped.endswith(ignore_suffixes)
and name_mapped not in params_dict
):
continue
param = params_dict[name_mapped]
# We should ask the weight loader to return success or not
# here since otherwise we may skip experts with other
# available replicas.
weight_loader = typing.cast(
Callable[..., bool], param.weight_loader
)
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
name = name_mapped
break
else:
if is_expert_weight:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip loading extra parameters for GPTQ/modelopt models.
if name.endswith(ignore_suffixes) and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class ExaoneMoeForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
# LoRA specific attributes
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",
}
embedding_padding_modules = ["lm_head"]
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config.get_text_config()
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.config = config
self.lora_config = lora_config
self.quant_config = quant_config
self.model = ExaoneMoeModel(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
)
if get_pp_group().is_last_rank:
self.unpadded_vocab_size = config.vocab_size
if lora_config:
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size
self.lm_head = ParallelLMHead(
self.unpadded_vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE
# We need bigger padding if using lora for kernel
# compatibility
if not lora_config
else lora_config.lora_vocab_padding_size,
quant_config=quant_config,
)
if config.tie_word_embeddings:
self.lm_head.weight = self.model.embed_tokens.weight
logit_scale = getattr(config, "logit_scale", 1.0)
self.logits_processor = LogitsProcessor(
self.unpadded_vocab_size, config.vocab_size, logit_scale
)
else:
self.lm_head = PPMissingLayer()
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
model_output = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return model_output
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
# With tie_word_embeddings, we can skip lm_head.weight
# The weight might appear unnecessarily in the files if the model is
# processed with quantization, LoRA, fine-tuning, etc.
skip_prefixes=(
["lm_head.", "mtp."] if self.config.tie_word_embeddings else ["mtp."]
),
)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/exaone_moe.py",
"license": "Apache License 2.0",
"lines": 516,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/exaone_moe_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only ExaoneMoe MTP model."""
from collections.abc import Iterable
import torch
from torch import nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
from vllm.distributed.parallel_state import get_pp_group
from vllm.logger import init_logger
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ColumnParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.exaone_moe import ExaoneMoeDecoderLayer
from vllm.sequence import IntermediateTensors
from .utils import (
AutoWeightsLoader,
is_pp_missing_parameter,
maybe_prefix,
)
logger = init_logger(__name__)
KVCache = tuple[torch.Tensor, torch.Tensor]
@support_torch_compile
class ExaoneMoeMultiTokenPredictor(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
model_config = vllm_config.model_config
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
config = model_config.hf_config
self.config = config
lora_vocab = (
(lora_config.lora_extra_vocab_size * (lora_config.max_loras or 1))
if lora_config
else 0
)
self.vocab_size = config.vocab_size + lora_vocab
self.org_vocab_size = config.vocab_size
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = getattr(config, "num_nextn_predict_layers", 1)
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
)
self.fc = ColumnParallelLinear(
self.config.hidden_size * 2,
self.config.hidden_size,
gather_output=True,
bias=False,
return_bias=False,
quant_config=quant_config,
prefix=f"{prefix}.fc",
)
self.layers = nn.ModuleList(
ExaoneMoeDecoderLayer(
vllm_config.model_config.hf_config,
quant_config=quant_config,
prefix=f"{prefix}.layers.{idx}",
mtp_layer=True,
)
for idx in range(self.num_mtp_layers)
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_fc_norm_hidden = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_fc_norm_embedding = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings(input_ids)
assert hidden_states.shape[-1] == inputs_embeds.shape[-1]
inputs_embeds = self.pre_fc_norm_embedding(inputs_embeds)
hidden_states = self.pre_fc_norm_hidden(hidden_states)
hidden_states = torch.cat([inputs_embeds, hidden_states], dim=-1)
hidden_states = self.fc(hidden_states)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
current_step_idx = spec_step_idx % self.num_mtp_layers
hidden_states, residual = self.layers[current_step_idx](
positions=positions,
hidden_states=hidden_states,
residual=residual,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
if "mlp.experts" in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip layers on other devices.
if is_pp_missing_parameter(name, self):
continue
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
@support_torch_compile
class ExaoneMoeMTP(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
config = vllm_config.model_config.hf_config
self.vllm_config = vllm_config
cache_config = vllm_config.cache_config
assert not cache_config.enable_prefix_caching, (
"ExaoneMoeMTP currently does not support prefix caching"
)
self.quant_config = vllm_config.quant_config
super().__init__()
self.config = config
self.model = ExaoneMoeMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "mtp")
)
self.unpadded_vocab_size = config.vocab_size
self.lm_head = ParallelLMHead(
self.unpadded_vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
# padding_size=DEFAULT_VOCAB_PADDING_SIZE,
prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head.weight = self.model.embed_tokens.weight
self.logits_processor = LogitsProcessor(
self.unpadded_vocab_size, config.vocab_size
)
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.get_input_embeddings(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
**kwargs: object,
):
hidden_states = self.model(
input_ids,
positions,
hidden_states,
intermediate_tensors,
inputs_embeds,
spec_step_idx,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
shared_weight_names = ["embed_tokens", "lm_head"]
def remap_weight_names(weights):
for name, weight in weights:
if name.startswith("mtp."):
name = name.replace("mtp.", "model.")
elif not any(key in name for key in shared_weight_names):
continue
yield name, weight
loader = AutoWeightsLoader(self)
return loader.load_weights(remap_weight_names(weights))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/exaone_moe_mtp.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/routed_experts_capturer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/sgl-project/sglang/blob/bed301a5acaa9577c9aa706468bdf242f6a43051/python/sglang/srt/layers/moe/routed_experts_capturer.py
from __future__ import annotations
import fcntl
import logging
import os
import tempfile
from collections.abc import Generator
from contextlib import contextmanager
from multiprocessing import shared_memory
from unittest.mock import patch
import numpy as np
import torch
from vllm.config import VllmConfig
from vllm.distributed import get_tensor_model_parallel_rank
from vllm.forward_context import get_forward_context
from vllm.platforms import current_platform
logger = logging.getLogger(__name__)
# Constants
_TMP_DIR = tempfile.gettempdir()
_LOCK_FILE_PREFIX = os.path.join(_TMP_DIR, "vllm_routed_experts")
_BUFFER_PREFIX = "vllm_routed_experts_buffer"
# Global singleton instances
_global_experts_capturer: RoutedExpertsCapturer | None = None
_global_experts_reader: RoutedExpertsReader | None = None
@contextmanager
def _file_lock(lock_file: str, mode: str = "wb+") -> Generator[None, None, None]:
"""Context manager for file-based locking."""
with open(lock_file, mode) as fp:
fcntl.flock(fp, fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(fp, fcntl.LOCK_UN)
def _create_or_attach_shared_memory(
name: str, size: int, lock_file: str
) -> shared_memory.SharedMemory:
"""Create or attach to shared memory with proper locking."""
# Ensure lock file exists before acquiring lock
with open(lock_file, "wb"):
pass
with _file_lock(lock_file):
try:
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
except FileExistsError:
shm = shared_memory.SharedMemory(name=name, create=False, size=size)
if shm.size != size:
logger.warning(
"Shared memory %s size mismatch; recreating",
name,
)
shm.close()
shm.unlink()
try:
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
logger.info("Created shared memory %s", name)
except FileExistsError:
shm = shared_memory.SharedMemory(name=name, create=False, size=size)
logger.info("Linked to existing shared memory %s", name)
return shm
class RoutedExpertsCapturer:
"""
Capturer for routed experts with device and optional shared memory buffer.
This class captures expert routing decisions during model forward passes
and optionally stores them in shared memory for cross-process access.
"""
_instance: RoutedExpertsCapturer | None = None
def __init__(self) -> None:
self._device_buffer: torch.Tensor | None = None
self._shm: shared_memory.SharedMemory | None = None
self._host_buffer_view: np.ndarray | None = None
self._lock_file: str | None = None
@classmethod
def create(cls) -> RoutedExpertsCapturer:
"""Create a global singleton instance."""
global _global_experts_capturer
if _global_experts_capturer is not None:
raise RuntimeError("Experts capturer already created.")
_global_experts_capturer = cls()
return _global_experts_capturer
@staticmethod
def get_instance() -> RoutedExpertsCapturer | None:
"""Get the global singleton instance."""
return _global_experts_capturer
def init_buffer(
self,
max_num_batched_tokens: int,
max_num_kv_tokens: int,
vllm_config: VllmConfig,
) -> None:
"""
Initialize the device buffer and optionally shared memory buffer.
Args:
max_num_batched_tokens: Maximum number of tokens in a batch.
max_num_kv_tokens: Maximum number of KV tokens for shared memory.
vllm_config: vllm configuration containing layer and expert info.
"""
if self._device_buffer is not None:
raise RuntimeError("Device buffer has already been initialized")
hf_config = vllm_config.model_config.hf_text_config
num_layers = hf_config.num_hidden_layers
num_experts_per_tok = hf_config.num_experts_per_tok
# Initialize device buffer
self._device_buffer = torch.zeros(
(max_num_batched_tokens, num_layers, num_experts_per_tok),
dtype=torch.int32,
device=current_platform.device_type,
)
self.dp_rank = vllm_config.parallel_config.data_parallel_rank
if get_tensor_model_parallel_rank() != 0:
return
# Initialize shared memory
shape = (max_num_kv_tokens, num_layers, num_experts_per_tok)
buffer_size = int(np.prod(shape)) * np.dtype(np.int32).itemsize
instance_id = vllm_config.instance_id
self._lock_file = f"{_LOCK_FILE_PREFIX}_{instance_id}_{self.dp_rank}.lock"
shm_name = f"{_BUFFER_PREFIX}_{instance_id}_{self.dp_rank}"
self._shm = _create_or_attach_shared_memory(
shm_name, buffer_size, self._lock_file
)
self._host_buffer_view = np.ndarray(shape, dtype=np.int32, buffer=self._shm.buf)
self._host_buffer_view.fill(0)
logger.debug(
"Created shared memory buffer '%s' with shape %s",
shm_name,
shape,
)
def capture(self, layer_id: int, topk_ids: torch.Tensor) -> None:
"""
Capture expert routing decisions for a specific layer.
Args:
layer_id: The layer index.
topk_ids: Tensor of shape (batch_size, num_routed_experts).
"""
if self._device_buffer is None:
raise RuntimeError("Buffer not initialized. Call init_buffer() first.")
ctx = get_forward_context()
if ctx.dp_metadata is None: # single dp
start_loc = 0
end_loc = topk_ids.shape[0]
token_num_per_dp = topk_ids.shape[0]
else: # multi dp
token_num_per_dp = ctx.dp_metadata.num_tokens_across_dp_cpu[self.dp_rank]
cumsum = torch.cumsum(ctx.dp_metadata.num_tokens_across_dp_cpu, dim=0)
assert cumsum[-1] == topk_ids.shape[0]
end_loc = cumsum[self.dp_rank]
start_loc = end_loc - token_num_per_dp
if layer_id >= self._device_buffer.shape[1]:
return
self._device_buffer[:token_num_per_dp, layer_id, :] = topk_ids[
start_loc:end_loc, :
]
def clear_buffer(self) -> None:
"""Clear the device buffer."""
if self._device_buffer is not None:
self._device_buffer.zero_()
def save_captured_experts(self, indices: np.ndarray) -> None:
"""
Save captured experts from device buffer to shared memory.
Args:
indices: Array of indices indicating where to store the data.
"""
if get_tensor_model_parallel_rank() != 0:
return
if self._lock_file is None:
raise RuntimeError("Shared memory not initialized.")
if self._host_buffer_view is None:
return
if self._device_buffer is None:
raise RuntimeError("Device buffer not initialized.")
num_tokens = len(indices)
data = self._device_buffer[:num_tokens, :, :].cpu().numpy()
with _file_lock(self._lock_file):
self._host_buffer_view[indices, :, :] = data
def cleanup(self) -> None:
"""Explicitly clean up shared memory resources."""
if self._shm is not None:
try:
self._shm.close()
self._shm.unlink()
except Exception:
logger.debug("Exception during cleanup for capturer", exc_info=True)
finally:
self._shm = None
def __del__(self) -> None:
"""Clean up shared memory on destruction."""
self.cleanup()
class RoutedExpertsReader:
"""
Reader for routed experts from shared memory.
This class attaches to shared memory created by RoutedExpertsCapturer
and reads expert routing decisions.
"""
_instance: RoutedExpertsReader | None = None
def __init__(self) -> None:
self._shm: shared_memory.SharedMemory | None = None
self._host_buffer_view: np.ndarray | None = None
self._lock_file: str | None = None
@classmethod
def create(cls) -> RoutedExpertsReader:
"""Create a global singleton instance."""
global _global_experts_reader
if _global_experts_reader is not None:
raise RuntimeError("Experts reader already created.")
_global_experts_reader = cls()
return _global_experts_reader
@staticmethod
def get_instance() -> RoutedExpertsReader | None:
"""Get the global singleton instance."""
if _global_experts_reader is None:
logger.info("Experts reader not initialized.")
return _global_experts_reader
def attach_buffer(
self,
max_num_kv_tokens: int,
vllm_config: VllmConfig,
) -> None:
"""
Attach to an existing shared memory buffer.
Args:
max_num_kv_tokens: Maximum number of KV tokens.
vllm_config: vllm configuration.
"""
if self._shm is not None:
logger.warning("Already attached to shared memory buffer.")
return # Already attached
hf_config = vllm_config.model_config.hf_text_config
shape = (
max_num_kv_tokens,
hf_config.num_hidden_layers,
hf_config.num_experts_per_tok,
)
self.dp_rank = vllm_config.parallel_config.data_parallel_rank
instance_id = vllm_config.instance_id
self._lock_file = f"{_LOCK_FILE_PREFIX}_{instance_id}_{self.dp_rank}.lock"
shm_name = f"{_BUFFER_PREFIX}_{instance_id}_{self.dp_rank}"
with _file_lock(self._lock_file, mode="rb+"):
# Avoid resource_tracker registering the shared memory
with patch(
"multiprocessing.resource_tracker.register",
lambda *args, **kwargs: None,
):
self._shm = shared_memory.SharedMemory(name=shm_name)
self._host_buffer_view = np.ndarray(
shape, dtype=np.int32, buffer=self._shm.buf
)
def get_routed_experts(self, indices: np.ndarray) -> np.ndarray:
"""
Read routed expert data from shared memory.
Args:
indices: Array of indices to read.
Returns:
Copy of the expert routing data for the given indices.
"""
if self._host_buffer_view is None:
raise RuntimeError("Buffer not attached. Call attach_buffer() first.")
if self._lock_file is None:
raise RuntimeError("Lock file not initialized.")
with _file_lock(self._lock_file, mode="rb+"):
return self._host_buffer_view[indices, :, :].copy()
def cleanup(self) -> None:
"""Explicitly clean up resources (close without unlink)."""
if self._shm is not None:
try:
self._shm.close()
except Exception:
logger.debug("Exception during cleanup for reader", exc_info=True)
finally:
self._shm = None
def __del__(self) -> None:
"""Close shared memory on destruction (do not unlink)."""
self.cleanup()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/routed_experts_capturer.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/kernels/cpu/benchmark_cpu_fused_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import sys
import time
import numpy as np
import torch
from vllm.utils.argparse_utils import FlexibleArgumentParser
from vllm.utils.torch_utils import set_random_seed
# Check if CPU MoE operations are available
try:
from vllm._custom_ops import cpu_fused_moe, cpu_prepack_moe_weight
except (ImportError, AttributeError) as e:
print("ERROR: CPU fused MoE operations are not available on this platform.")
print("This benchmark requires x86 CPU with proper vLLM CPU extensions compiled.")
print(
"The cpu_fused_moe kernel is typically available on Linux x86_64 "
"with AVX2/AVX512."
)
print(f"Import error: {e}")
sys.exit(1)
# ISA selection following test_cpu_fused_moe.py pattern
ISA_CHOICES = ["amx", "vec"] if torch._C._cpu._is_amx_tile_supported() else ["vec"]
@torch.inference_mode()
def main(
batch_size: int,
expert_num: int,
hidden_size: int,
intermediate_size: int,
topk_num: int,
use_bias: bool = False,
dtype: torch.dtype = torch.bfloat16,
activation: str = "silu",
isa: str = "vec",
seed: int = 0,
iters: int = 20,
) -> None:
set_random_seed(seed)
# up_dim = 2 * intermediate_size for gate + up projection
up_dim = 2 * intermediate_size
input_tensor = torch.randn((batch_size, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
w13 = torch.randn((expert_num, up_dim, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
w2 = torch.randn((expert_num, hidden_size, intermediate_size), dtype=dtype) / (
0.5 * intermediate_size**0.5
)
w13_bias = None
w2_bias = None
if use_bias:
w13_bias = torch.randn((expert_num, up_dim), dtype=dtype) / (0.5 * up_dim**0.5)
w2_bias = torch.randn((expert_num, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
router_logits = torch.randn((batch_size, expert_num), dtype=dtype)
score = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
topk_weights, topk_ids = torch.topk(score, topk_num)
topk_ids = topk_ids.to(torch.int32)
packed_w13 = cpu_prepack_moe_weight(w13, isa)
packed_w2 = cpu_prepack_moe_weight(w2, isa)
def run_benchmark(iters: int) -> list[float]:
times = []
for _ in range(iters):
start_time = time.perf_counter_ns()
_ = cpu_fused_moe(
input_tensor,
packed_w13,
packed_w2,
w13_bias,
w2_bias,
topk_weights,
topk_ids,
activation,
isa,
)
end_time = time.perf_counter_ns()
times.append((end_time - start_time) / 1e6)
return times
# warmup
run_benchmark(5)
# benchmark
times = run_benchmark(iters)
if not times:
print("No iterations to measure. Set --iters > 0.")
return
time_min = min(times)
time_max = max(times)
time_mean = np.mean(times)
time_std = np.std(times)
print("\tmin (ms) = ", time_min)
print("\tmax (ms) = ", time_max)
print("\tmean (ms) = ", time_mean)
print("\tstd = ", time_std)
print("\tmedian (ms) = ", np.median(times))
# Calculate throughput metrics
# FLOPs estimation: 2 * batch * topk * (hidden * up_dim + intermediate * hidden)
flops_per_token = (
2 * topk_num * (hidden_size * up_dim + intermediate_size * hidden_size)
)
total_flops = batch_size * flops_per_token
tflops = total_flops / (time_mean * 1e-3) / 1e12
print(f"\tthroughput (TFLOP/s) = {tflops:.4f}")
if __name__ == "__main__":
parser = FlexibleArgumentParser(description="Benchmark the CPU fused MoE kernel.")
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--expert-num", type=int, default=8)
parser.add_argument("--hidden-size", type=int, default=2880)
parser.add_argument("--intermediate-size", type=int, default=2880)
parser.add_argument(
"--topk-num",
type=int,
default=None,
help="Number of experts to route each token to (default: expert_num // 2)",
)
parser.add_argument("--use-bias", action="store_true")
parser.add_argument(
"--activation",
type=str,
choices=["silu", "swigluoai"],
default="silu",
help="Activation function",
)
parser.add_argument(
"--isa",
type=str,
choices=ISA_CHOICES,
default=ISA_CHOICES[0],
help=f"ISA to use (available: {ISA_CHOICES})",
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--iters", type=int, default=20)
args = parser.parse_args()
# Default topk_num to expert_num // 2, minimum 1
topk_num = (
args.topk_num if args.topk_num is not None else max(args.expert_num // 2, 1)
)
print(args)
main(
batch_size=args.batch_size,
expert_num=args.expert_num,
hidden_size=args.hidden_size,
intermediate_size=args.intermediate_size,
topk_num=topk_num,
use_bias=args.use_bias,
dtype=torch.bfloat16, # Following test_cpu_fused_moe.py
activation=args.activation,
isa=args.isa,
seed=args.seed,
iters=args.iters,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/kernels/cpu/benchmark_cpu_fused_moe.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/embed/vision_embedding_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
This example shows how to use vLLM for running offline inference with
the correct prompt format on vision language models for multimodal embedding.
For most models, the prompt format should follow corresponding examples
on HuggingFace model repository.
"""
import argparse
from dataclasses import asdict
from pathlib import Path
from PIL.Image import Image
from vllm import LLM, EngineArgs
from vllm.multimodal.utils import fetch_image
from vllm.utils.print_utils import print_embeddings
ROOT_DIR = Path(__file__).parent.parent.parent
EMBED_TEMPLATE_DIR = ROOT_DIR / "pooling/embed/template/"
image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/cat_snow.jpg"
text = "A cat standing in the snow."
multi_modal_data = {"image": fetch_image(image_url)}
def run_clip(seed: int):
engine_args = EngineArgs(
model="openai/clip-vit-base-patch32",
runner="pooling",
limit_mm_per_prompt={"image": 1},
)
llm = LLM(**asdict(engine_args) | {"seed": seed})
print("Text embedding output:")
outputs = llm.embed(text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
prompt = "" # For image input, make sure that the prompt text is empty
outputs = llm.embed(
{
"prompt": prompt,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
def run_e5_v(seed: int):
engine_args = EngineArgs(
model="royokong/e5-v",
runner="pooling",
max_model_len=4096,
limit_mm_per_prompt={"image": 1},
)
llm = LLM(**asdict(engine_args) | {"seed": seed})
llama3_template = "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n" # noqa: E501
print("Text embedding output:")
prompt_text = llama3_template.format(
f"{text}\nSummary above sentence in one word: "
)
outputs = llm.embed(prompt_text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
prompt_image = llama3_template.format("<image>\nSummary above image in one word: ")
outputs = llm.embed(
{
"prompt": prompt_image,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
def run_qwen3_vl(seed: int):
try:
from qwen_vl_utils import smart_resize
except ModuleNotFoundError:
print(
"WARNING: `qwen-vl-utils` not installed, input images will not "
"be automatically resized. This can cause different results "
"comparing with HF repo's example. "
"You can enable this functionality by `pip install qwen-vl-utils`."
)
smart_resize = None
if smart_resize is not None:
def post_process_image(image: Image) -> Image:
width, height = image.size
resized_height, resized_width = smart_resize(
height,
width,
factor=32,
)
return image.resize((resized_width, resized_height))
multi_modal_data["image"] = post_process_image(multi_modal_data["image"])
engine_args = EngineArgs(
model="Qwen/Qwen3-VL-Embedding-2B",
runner="pooling",
max_model_len=8192,
limit_mm_per_prompt={"image": 1},
mm_processor_kwargs={"do_resize": False} if smart_resize is not None else None,
)
default_instruction = "Represent the user's input."
image_placeholder = "<|vision_start|><|image_pad|><|vision_end|>"
prompt_text = f"<|im_start|>system\n{default_instruction}<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant\n"
prompt_image = f"<|im_start|>system\n{default_instruction}<|im_end|>\n<|im_start|>user\n{image_placeholder}<|im_end|>\n<|im_start|>assistant\n"
prompt_image_text = f"<|im_start|>system\n{default_instruction}<|im_end|>\n<|im_start|>user\n{image_placeholder}{text}<|im_end|>\n<|im_start|>assistant\n"
llm = LLM(**asdict(engine_args) | {"seed": seed})
print("Text embedding output:")
outputs = llm.embed(prompt_text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
outputs = llm.embed(
{
"prompt": prompt_image,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
print("Image+Text embedding output:")
outputs = llm.embed(
{
"prompt": prompt_image_text,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
def run_siglip(seed: int):
engine_args = EngineArgs(
model="google/siglip-base-patch16-224",
runner="pooling",
limit_mm_per_prompt={"image": 1},
)
llm = LLM(**asdict(engine_args) | {"seed": seed})
print("Text embedding output:")
outputs = llm.embed(text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
prompt = "" # For image input, make sure that the prompt text is empty
outputs = llm.embed(
{
"prompt": prompt,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
def run_vlm2vec_phi3v(seed: int):
engine_args = EngineArgs(
model="TIGER-Lab/VLM2Vec-Full",
runner="pooling",
max_model_len=4096,
trust_remote_code=True,
mm_processor_kwargs={"num_crops": 4},
limit_mm_per_prompt={"image": 1},
)
llm = LLM(**asdict(engine_args) | {"seed": seed})
image_token = "<|image_1|>"
print("Text embedding output:")
prompt_text = f"Find me an everyday image that matches the given caption: {text}"
outputs = llm.embed(prompt_text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
prompt_image = f"{image_token} Find a day-to-day image that looks similar to the provided image." # noqa: E501
outputs = llm.embed(
{
"prompt": prompt_image,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
print("Image+Text embedding output:")
prompt_image_text = (
f"{image_token} Represent the given image with the following question: {text}" # noqa: E501
)
outputs = llm.embed(
{
"prompt": prompt_image_text,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
def run_vlm2vec_qwen2vl(seed: int):
# vLLM does not support LoRA adapters on multi-modal encoder,
# so we merge the weights first
from huggingface_hub.constants import HF_HUB_CACHE
from peft import PeftConfig, PeftModel
from transformers import AutoModelForImageTextToText, AutoProcessor
from vllm.entrypoints.chat_utils import load_chat_template
model_id = "TIGER-Lab/VLM2Vec-Qwen2VL-2B"
base_model = AutoModelForImageTextToText.from_pretrained(model_id)
lora_model = PeftModel.from_pretrained(
base_model,
model_id,
config=PeftConfig.from_pretrained(model_id),
)
model = lora_model.merge_and_unload().to(dtype=base_model.dtype)
model._hf_peft_config_loaded = False # Needed to save the merged model
processor = AutoProcessor.from_pretrained(
model_id,
# `min_pixels` and `max_pixels` are deprecated for
# transformers `preprocessor_config.json`
size={"shortest_edge": 3136, "longest_edge": 12845056},
)
processor.chat_template = load_chat_template(
# The original chat template is not correct
EMBED_TEMPLATE_DIR / "vlm2vec_qwen2vl.jinja",
)
merged_path = str(
Path(HF_HUB_CACHE) / ("models--" + model_id.replace("/", "--") + "-vllm")
)
print(f"Saving merged model to {merged_path}...")
print(
"NOTE: This directory is not tracked by `huggingface_hub` "
"so you have to delete this manually if you don't want it anymore."
)
model.save_pretrained(merged_path)
processor.save_pretrained(merged_path)
print("Done!")
engine_args = EngineArgs(
model=merged_path,
runner="pooling",
max_model_len=4096,
mm_processor_kwargs={
"min_pixels": 3136,
"max_pixels": 12845056,
},
limit_mm_per_prompt={"image": 1},
)
llm = LLM(**asdict(engine_args) | {"seed": seed})
image_token = "<|image_pad|>"
print("Text embedding output:")
prompt_text = f"Find me an everyday image that matches the given caption: {text}"
outputs = llm.embed(prompt_text, use_tqdm=False)
print_embeddings(outputs[0].outputs.embedding)
print("Image embedding output:")
prompt_image = f"{image_token} Find a day-to-day image that looks similar to the provided image." # noqa: E501
outputs = llm.embed(
{
"prompt": prompt_image,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
print("Image+Text embedding output:")
prompt_image_text = (
f"{image_token} Represent the given image with the following question: {text}" # noqa: E501
)
outputs = llm.embed(
{
"prompt": prompt_image_text,
"multi_modal_data": multi_modal_data,
},
use_tqdm=False,
)
print_embeddings(outputs[0].outputs.embedding)
model_example_map = {
"clip": run_clip,
"e5_v": run_e5_v,
"qwen3_vl": run_qwen3_vl,
"siglip": run_siglip,
"vlm2vec_phi3v": run_vlm2vec_phi3v,
"vlm2vec_qwen2vl": run_vlm2vec_qwen2vl,
}
def parse_args():
parser = argparse.ArgumentParser(
"Script to run a specified VLM through vLLM offline api."
)
parser.add_argument(
"--model",
"-m",
type=str,
default="vlm2vec_phi3v",
choices=model_example_map.keys(),
help="The name of the embedding model.",
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="Set the seed when initializing `vllm.LLM`.",
)
return parser.parse_args()
def main(args):
model_example_map[args.model](args.seed)
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/embed/vision_embedding_offline.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/vision_rerank_api_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
Example Python client for multimodal rerank API which is compatible with
Jina and Cohere https://jina.ai/reranker
Run `vllm serve <model> --runner pooling` to start up the server in vLLM.
e.g.
vllm serve jinaai/jina-reranker-m0 --runner pooling
vllm serve Qwen/Qwen3-VL-Reranker-2B \
--runner pooling \
--max-model-len 4096 \
--hf_overrides '{"architectures": ["Qwen3VLForSequenceClassification"],"classifier_from_token": ["no", "yes"],"is_original_qwen3_reranker": true}' \
--chat-template examples/pooling/score/template/qwen3_vl_reranker.jinja
"""
import argparse
import pprint
import requests
from vllm.multimodal.utils import encode_image_url, fetch_image
query = "A woman playing with her dog on a beach at sunset."
document = (
"A woman shares a joyful moment with her golden retriever on a sun-drenched beach at sunset, "
"as the dog offers its paw in a heartwarming display of companionship and trust."
)
image_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
video_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/draw.mp4"
documents = [
{
"type": "text",
"text": document,
},
{
"type": "image_url",
"image_url": {"url": image_url},
},
{
"type": "image_url",
"image_url": {"url": encode_image_url(fetch_image(image_url))},
},
{
"type": "video_url",
"video_url": {"url": video_url},
},
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8000)
return parser.parse_args()
def main(args):
base_url = f"http://{args.host}:{args.port}"
models_url = base_url + "/v1/models"
rerank_url = base_url + "/rerank"
response = requests.get(models_url)
model = response.json()["data"][0]["id"]
print("Query: string & Document: list of string")
prompt = {"model": model, "query": query, "documents": [document]}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: text")
prompt = {"model": model, "query": query, "documents": {"content": [documents[0]]}}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: image url")
prompt = {
"model": model,
"query": query,
"documents": {"content": [documents[1]]},
}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: image base64")
prompt = {
"model": model,
"query": query,
"documents": {"content": [documents[2]]},
}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: video url")
prompt = {
"model": model,
"query": query,
"documents": {"content": [documents[3]]},
}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: text + image url")
prompt = {
"model": model,
"query": query,
"documents": {"content": [documents[0], documents[1]]},
}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: list")
prompt = {
"model": model,
"query": query,
"documents": [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
],
}
response = requests.post(rerank_url, json=prompt)
pprint.pprint(response.json())
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/vision_rerank_api_online.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/vision_score_api_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
Example online usage of Score API.
Run `vllm serve <model> --runner pooling` to start up the server in vLLM.
e.g.
vllm serve jinaai/jina-reranker-m0 --runner pooling
vllm serve Qwen/Qwen3-VL-Reranker-2B \
--runner pooling \
--max-model-len 4096 \
--hf_overrides '{"architectures": ["Qwen3VLForSequenceClassification"],"classifier_from_token": ["no", "yes"],"is_original_qwen3_reranker": true}' \
--chat-template examples/pooling/score/template/qwen3_vl_reranker.jinja
"""
import argparse
import pprint
import requests
from vllm.multimodal.utils import encode_image_url, fetch_image
query = "A woman playing with her dog on a beach at sunset."
document = (
"A woman shares a joyful moment with her golden retriever on a sun-drenched beach at sunset, "
"as the dog offers its paw in a heartwarming display of companionship and trust."
)
image_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
video_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/draw.mp4"
documents = [
{
"type": "text",
"text": document,
},
{
"type": "image_url",
"image_url": {"url": image_url},
},
{
"type": "image_url",
"image_url": {"url": encode_image_url(fetch_image(image_url))},
},
{
"type": "video_url",
"video_url": {"url": video_url},
},
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8000)
return parser.parse_args()
def main(args):
base_url = f"http://{args.host}:{args.port}"
models_url = base_url + "/v1/models"
score_url = base_url + "/score"
response = requests.get(models_url)
model = response.json()["data"][0]["id"]
print("Query: string & Document: string")
prompt = {"model": model, "queries": query, "documents": document}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: text")
prompt = {
"model": model,
"queries": query,
"documents": {"content": [documents[0]]},
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: image url")
prompt = {
"model": model,
"queries": query,
"documents": {"content": [documents[1]]},
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: image base64")
prompt = {
"model": model,
"queries": query,
"documents": {"content": [documents[2]]},
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: video url")
prompt = {
"model": model,
"queries": query,
"documents": {"content": [documents[3]]},
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: text + image url")
prompt = {
"model": model,
"queries": query,
"documents": {"content": [documents[0], documents[1]]},
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: string & Document: list")
prompt = {
"model": model,
"queries": query,
"documents": [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
],
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
print("Query: list & Document: list")
data = [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
]
prompt = {
"model": model,
"queries": data,
"documents": data,
}
response = requests.post(score_url, json=prompt)
pprint.pprint(response.json())
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/vision_score_api_online.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/buffer_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable, Sequence
from functools import partial
import numpy as np
import torch
from vllm.triton_utils import tl, triton
from vllm.utils.platform_utils import is_uva_available
from vllm.utils.torch_utils import (
async_tensor_h2d,
get_accelerator_view_from_cpu_tensor,
)
def async_copy_to_gpu(
x: torch.Tensor | np.ndarray,
out: torch.Tensor | None = None,
device: torch.device | None = None,
) -> torch.Tensor:
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
assert x.is_cpu
if out is None:
assert device is not None
out = torch.empty_like(x, device=device)
# CPU-to-CPU copy
tmp = x.pin_memory()
assert tmp is not x
# CPU-to-GPU copy
return out.copy_(tmp, non_blocking=True)
class UvaBuffer:
def __init__(self, size: int | Sequence[int], dtype: torch.dtype):
if not is_uva_available():
raise RuntimeError("UVA is not available")
self.cpu = torch.zeros(size, dtype=dtype, device="cpu", pin_memory=True)
self.np = self.cpu.numpy()
self.uva = get_accelerator_view_from_cpu_tensor(self.cpu)
class UvaBufferPool:
def __init__(
self,
size: int | Sequence[int],
dtype: torch.dtype,
max_concurrency: int = 2,
):
self.size = size
self.dtype = dtype
self.max_concurrency = max_concurrency
# UVA buffers for concurrency
self._uva_bufs = [UvaBuffer(size, dtype) for _ in range(max_concurrency)]
# Current buffer index
self._curr = 0
def copy_to_uva(self, x: torch.Tensor | np.ndarray | list) -> torch.Tensor:
# Round robin to the next buffer.
self._curr = (self._curr + 1) % self.max_concurrency
buf = self._uva_bufs[self._curr]
# CPU-to-CPU copy
dst = buf.cpu if isinstance(x, torch.Tensor) else buf.np
n = len(x)
dst[:n] = x
return buf.uva[:n]
def copy_to_gpu(
self,
x: torch.Tensor | np.ndarray,
out: torch.Tensor | None = None,
) -> torch.Tensor:
uva = self.copy_to_uva(x)
# CPU-to-GPU copy
return uva.clone() if out is None else out.copy_(uva, non_blocking=True)
class UvaBackedTensor:
def __init__(
self, size: int | Sequence[int], dtype: torch.dtype, max_concurrency: int = 2
):
self.dtype = dtype
self.max_concurrency = max_concurrency
# Source of truth
self.cpu = torch.zeros(size, dtype=dtype, device="cpu", pin_memory=False)
self.np = self.cpu.numpy()
# Buffers for concurrency
self.pool = UvaBufferPool(size, dtype, max_concurrency)
self.gpu = self.pool.copy_to_uva(self.np)
def copy_to_uva(self, n: int | None = None) -> torch.Tensor:
# CPU-to-CPU copy
self.gpu = self.pool.copy_to_uva(self.np[:n] if n is not None else self.np)
return self.gpu
class StagedWriteTensor:
def __init__(
self,
size: int | Sequence[int],
dtype: torch.dtype,
device: torch.device,
max_concurrency: int = 2,
uva_instead_of_gpu: bool = False,
):
supported_dtypes = [torch.int32, torch.int64, torch.float32]
if dtype not in supported_dtypes:
raise ValueError(
f"Unsupported dtype {dtype}: should be one of {supported_dtypes}"
)
self.num_rows = size if isinstance(size, int) else size[0]
self.dtype = dtype
self.device = device
self.max_concurrency = max_concurrency
if not uva_instead_of_gpu:
# Create a GPU tensor (default)
self.gpu = torch.zeros(size, dtype=dtype, device=device)
else:
# For a large but not-frequently-accessed tensor, we can use UVA instead of
# GPU to save GPU memory
self._uva_buf = UvaBuffer(size, dtype)
self.gpu = self._uva_buf.uva
self._staged_write_indices: list[int] = []
self._staged_write_starts: list[int] = []
self._staged_write_contents: list[int | float] = []
self._staged_write_cu_lens: list[int] = []
new_buffer = partial(UvaBufferPool, max_concurrency=max_concurrency)
self.write_indices = new_buffer(self.num_rows, dtype=torch.int32)
self.write_starts = new_buffer(self.num_rows, dtype=torch.int32)
self.write_cu_lens = new_buffer(self.num_rows, dtype=torch.int32)
def stage_write(
self, index: int, start: int, x: Iterable[int] | Iterable[float]
) -> None:
assert index >= 0
assert start >= 0
if not x:
return
self._staged_write_indices.append(index)
self._staged_write_starts.append(start)
self._staged_write_contents.extend(x)
self._staged_write_cu_lens.append(len(self._staged_write_contents))
def stage_write_elem(self, index: int, x: int) -> None:
assert index >= 0
self._staged_write_indices.append(index)
self._staged_write_starts.append(0)
self._staged_write_contents.append(x)
self._staged_write_cu_lens.append(len(self._staged_write_contents))
def apply_write(self) -> None:
n = len(self._staged_write_indices)
if n == 0:
return
indices_uva = self.write_indices.copy_to_uva(self._staged_write_indices)
starts_uva = self.write_starts.copy_to_uva(self._staged_write_starts)
cu_lens_uva = self.write_cu_lens.copy_to_uva(self._staged_write_cu_lens)
# Special handling for write_contents
write_contents = async_tensor_h2d(
self._staged_write_contents, self.dtype, self.device, pin_memory=True
)
# Write diffs to the GPU buffer
_apply_write_kernel[(n,)](
self.gpu,
self.gpu.stride(0),
indices_uva,
starts_uva,
write_contents,
cu_lens_uva,
BLOCK_SIZE=1024,
)
# Clear the staged writes
self.clear_staged_writes()
def clear_staged_writes(self) -> None:
self._staged_write_indices.clear()
self._staged_write_starts.clear()
self._staged_write_contents.clear()
self._staged_write_cu_lens.clear()
@triton.jit
def _apply_write_kernel(
output_ptr,
output_stride,
write_indices_ptr,
write_starts_ptr,
write_contents_ptr,
write_cu_lens_ptr,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(0)
row_idx = tl.load(write_indices_ptr + pid)
start_idx = tl.load(write_starts_ptr + pid)
cu_start = tl.load(write_cu_lens_ptr + pid - 1) if pid > 0 else 0
cu_end = tl.load(write_cu_lens_ptr + pid)
content_len = cu_end - cu_start
for i in range(0, content_len, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < content_len
content = tl.load(write_contents_ptr + cu_start + block, mask=mask)
tl.store(
output_ptr + row_idx * output_stride + start_idx + block, content, mask=mask
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/buffer_utils.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
from torch.nn.parameter import Parameter
from vllm.model_executor.layers.quantization.compressed_tensors.schemes import (
CompressedTensorsScheme,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
apply_fp4_marlin_linear,
prepare_fp4_layer_for_marlin,
)
from vllm.model_executor.parameter import (
GroupQuantScaleParameter,
ModelWeightParameter,
)
__all__ = ["CompressedTensorsW4A16Mxfp4"]
class CompressedTensorsW4A16Mxfp4(CompressedTensorsScheme):
"""
Compressed tensors scheme for MXFP4 weight-only quantization.
Supports models quantized with the compressed-tensors mxfp4-pack-quantized
format.
MXFP4 format:
- 4-bit float weights (E2M1) packed into uint8
- Per-group E8M0 scales with group_size=32
- No global scale (unlike NVFP4)
"""
def __init__(self):
self.group_size = 32
@classmethod
def get_min_capability(cls) -> int:
return 80
def create_weights(
self,
layer: torch.nn.Module,
output_partition_sizes: list[int],
input_size_per_partition: int,
params_dtype: torch.dtype,
weight_loader: Callable,
**kwargs,
):
output_size_per_partition = sum(output_partition_sizes)
layer.logical_widths = output_partition_sizes
layer.input_size_per_partition = input_size_per_partition
layer.output_size_per_partition = output_size_per_partition
layer.params_dtype = params_dtype
# Packed FP4 weights (2 values per byte)
weight = ModelWeightParameter(
data=torch.empty(
output_size_per_partition,
input_size_per_partition // 2,
dtype=torch.uint8,
),
input_dim=1,
output_dim=0,
weight_loader=weight_loader,
)
layer.register_parameter("weight_packed", weight)
# Per-group E8M0 scales
weight_scale = GroupQuantScaleParameter(
data=torch.empty(
output_size_per_partition,
input_size_per_partition // self.group_size,
dtype=torch.uint8,
),
input_dim=1,
output_dim=0,
weight_loader=weight_loader,
)
layer.register_parameter("weight_scale", weight_scale)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
# Rename weight_packed to weight that marlin expects
layer.weight = Parameter(layer.weight_packed.data, requires_grad=False)
del layer.weight_packed
prepare_fp4_layer_for_marlin(layer)
def apply_weights(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
return apply_fp4_marlin_linear(
input=x,
weight=layer.weight,
weight_scale=layer.weight_scale,
weight_global_scale=None,
workspace=layer.workspace,
size_n=layer.output_size_per_partition,
size_k=layer.input_size_per_partition,
bias=bias,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/gemma3n_audio_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Lightweight utility functions for Gemma3n audio processing.
This module is separate from gemma3n_mm.py to avoid heavy CUDA dependencies,
making it testable without a full vLLM build.
"""
import torch
def adjust_audio_features_to_expected_length(
audio_features: torch.Tensor,
expected_tokens: int,
audio_padding_embs: torch.Tensor,
) -> tuple[torch.Tensor, int]:
"""Adjust audio features to expected token length via padding or truncation.
The Gemma3nProcessor expects all audio will be ~30s in length and inserts
a fixed number of audio soft tokens into the text. However, the audio
preprocessing and encoder do not guarantee they will produce exactly that
many soft tokens; they may produce fewer tokens (for shorter audio) or more
tokens (for longer audio or due to BOA/EOA special tokens).
This function handles both cases:
- If fewer tokens: pad with the provided padding embeddings
- If more tokens: truncate to the expected count
Args:
audio_features: Audio embeddings tensor of shape
(batch_size, seq_len, embed_dim)
expected_tokens: The expected number of audio tokens (e.g., 188)
audio_padding_embs: Padding embeddings tensor of shape (1, 1, embed_dim)
Returns:
Tuple of:
- adjusted_features: Audio features adjusted to expected_tokens length
- tokens_truncated: Number of tokens truncated (0 if padding was applied)
"""
audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape
tokens_truncated = 0
if audio_seq_len < expected_tokens:
# Pad to expected length with padding embeddings
extra_padding_tokens = expected_tokens - audio_seq_len
extra_padding_features = audio_padding_embs.expand(
audio_batch_size, extra_padding_tokens, audio_embed_dim
)
audio_features = torch.cat((audio_features, extra_padding_features), dim=1)
elif audio_seq_len > expected_tokens:
# Truncate to expected length (audio encoder produced more tokens
# than expected, e.g., due to longer audio or placeholder mismatch)
tokens_truncated = audio_seq_len - expected_tokens
audio_features = audio_features[:, :expected_tokens, :]
return audio_features, tokens_truncated
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/gemma3n_audio_utils.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/core/test_rotary_embedding_mla_cache_fused.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for fused MLA KV-cache write and RoPE fused kernel
"""
import random
import pytest
import torch
from tests.kernels.allclose_default import get_default_atol, get_default_rtol
from tests.kernels.utils import DEFAULT_OPCHECK_TEST_UTILS, opcheck
from vllm import _custom_ops as ops
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
@pytest.mark.parametrize("dtype", [torch.half, torch.bfloat16, torch.float])
@pytest.mark.parametrize("is_neox_style", [False, True])
@pytest.mark.parametrize("seq_len", [11, 42])
@pytest.mark.parametrize("qk_rope_head_dim", [64, 128])
@pytest.mark.parametrize("num_q_heads", [128])
@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"])
@pytest.mark.parametrize("kv_lora_rank", [512])
@pytest.mark.parametrize("num_blocks", [64])
@pytest.mark.parametrize("block_size", [16, 64, 256])
@pytest.mark.parametrize("seed", [0])
@pytest.mark.parametrize(
"device", [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
)
@torch.inference_mode()
def test_concat_and_cache_mla_rope_fused(
default_vllm_config,
dtype: torch.dtype,
is_neox_style: bool,
seq_len: int,
qk_rope_head_dim: int,
num_q_heads: int,
kv_cache_dtype: str,
kv_lora_rank: int,
num_blocks: int,
block_size: int,
seed: int,
device: str,
max_position: int = 8192,
base: float = 10000,
) -> None:
set_random_seed(seed)
torch.set_default_device(device)
rope = RotaryEmbedding(
qk_rope_head_dim,
qk_rope_head_dim,
max_position,
base,
is_neox_style,
torch.float32,
)
rope = rope.to(dtype=dtype, device=torch.get_default_device())
positions = torch.randint(0, max_position, (seq_len,))
query = torch.randn(seq_len, num_q_heads, qk_rope_head_dim, dtype=dtype)
key = torch.randn(seq_len, 1, qk_rope_head_dim + kv_lora_rank, dtype=dtype)
k_pe = torch.flatten(key[..., :qk_rope_head_dim], start_dim=1).to(device=device)
kv_c = torch.flatten(key[..., qk_rope_head_dim:], start_dim=1).to(device=device)
if current_platform.is_rocm():
# We use forward_hip for the same numerics as the fused custom kernel on ROCm
# when dtype is FP16. The torch-native implementation implicitly upcasts
# FP16 x FP16 multiplications to FP32 before downcasting them, which leads
# to notable output divergences.
# Clone the tensors because the implementation modifies them in-place
ref_q_pe, ref_k_pe = rope.forward_hip(positions, query.clone(), k_pe.clone())
else:
# NOTE(woosuk): The reference implementation should be executed first
# because the custom kernel is in-place.
ref_q_pe, ref_k_pe = rope.forward_native(positions, query, k_pe)
assert ref_k_pe is not None
ref_k_pe = torch.flatten(ref_k_pe, start_dim=1).to(device=device)
ref_k_rope = ref_k_pe[..., :qk_rope_head_dim]
total_available_slots = num_blocks * block_size
total_needed_slots = seq_len
assert total_available_slots >= total_needed_slots, "Not enough kv slots!"
slot_mapping_lst = random.sample(range(total_available_slots), total_needed_slots)
slot_mapping = torch.tensor(slot_mapping_lst, dtype=torch.long, device=device)
entry_size = kv_lora_rank + qk_rope_head_dim
kv_cache_scale = torch.tensor([0.1], dtype=torch.float32, device=device)
kv_cache = torch.zeros(
num_blocks,
block_size,
entry_size,
dtype=torch.uint8 if kv_cache_dtype == "fp8" else dtype,
device=device,
)
ref_temp = torch.zeros(*kv_cache.shape, dtype=dtype, device=device)
for i in range(seq_len):
slot = slot_mapping[i].item()
block_idx = slot // block_size
block_offset = slot % block_size
ref_temp[block_idx, block_offset] = torch.cat((kv_c[i], ref_k_rope[i]), -1)
if kv_cache_dtype == "fp8":
ref_kv_cache = torch.empty_like(ref_temp, dtype=kv_cache.dtype)
ops.convert_fp8(
ref_kv_cache, ref_temp, kv_cache_scale.item(), kv_dtype=kv_cache_dtype
)
else:
ref_kv_cache = ref_temp
opcheck(
torch.ops._C_cache_ops.concat_and_cache_mla_rope_fused,
(
positions,
query,
k_pe,
kv_c,
rope.cos_sin_cache,
is_neox_style,
slot_mapping,
kv_cache,
kv_cache_dtype,
kv_cache_scale,
),
test_utils=DEFAULT_OPCHECK_TEST_UTILS,
)
ops.concat_and_cache_mla_rope_fused(
positions,
query,
k_pe,
kv_c,
rope.cos_sin_cache,
is_neox_style,
slot_mapping,
kv_cache,
kv_cache_dtype,
kv_cache_scale,
)
if kv_cache_dtype == "fp8":
result_temp = torch.empty_like(kv_cache, dtype=torch.float16)
ops.convert_fp8(
result_temp,
kv_cache.contiguous(),
kv_cache_scale.item(),
kv_dtype=kv_cache_dtype,
)
expected_temp = torch.empty_like(ref_kv_cache, dtype=torch.float16)
ops.convert_fp8(
expected_temp, ref_kv_cache, kv_cache_scale.item(), kv_dtype=kv_cache_dtype
)
torch.testing.assert_close(result_temp, expected_temp, atol=0.001, rtol=0.1)
else:
torch.testing.assert_close(kv_cache, ref_kv_cache)
torch.testing.assert_close(
query, ref_q_pe, atol=get_default_atol(query), rtol=get_default_rtol(query)
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/core/test_rotary_embedding_mla_cache_fused.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/benchmarks/test_bench_startup.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import subprocess
import pytest
@pytest.mark.benchmark
def test_bench_startup():
command = [
"vllm",
"bench",
"startup",
]
result = subprocess.run(command, capture_output=True, text=True)
print(result.stdout)
print(result.stderr)
assert result.returncode == 0, f"Benchmark failed: {result.stderr}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/benchmarks/test_bench_startup.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_inspection.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Model inspection utilities for vLLM."""
import torch.nn as nn
def _get_module_info(module: nn.Module) -> str:
"""Get info string for a module."""
class_name = type(module).__name__
parts = []
# Add quant_method if present
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_name = type(quant_method).__name__
# For CompressedTensors, show the underlying scheme instead
scheme = getattr(module, "scheme", None)
if scheme is not None:
quant_name = type(scheme).__name__
# Skip unquantized methods
if "Unquantized" not in quant_name:
parts.append(f"quant={quant_name}")
# If module has extra_repr, use it
if hasattr(module, "extra_repr"):
parts.append(module.extra_repr().replace("\n", ""))
if parts:
return f"{class_name}({', '.join(parts)})"
# For unknown modules, use the default PyTorch repr
return str(module)
def _get_child_signature(child: nn.Module) -> str:
"""Get a signature for a child module to detect duplicates."""
lines = []
for name, submodule in child.named_modules():
lines.append(f"{name}:{_get_module_info(submodule)}")
return "\n".join(lines)
def _format_index_ranges(indices: list[int]) -> str:
"""Format indices into range notation (e.g., [0,1,2,4,5,6] -> '0-2, 4-6')."""
indices = sorted(indices)
ranges = []
start = end = indices[0]
for idx in indices[1:]:
if idx == end + 1:
end = idx
else:
ranges.append(str(start) if start == end else f"{start}-{end}")
start = end = idx
ranges.append(str(start) if start == end else f"{start}-{end}")
return ", ".join(ranges)
def _format_module_tree(
module: nn.Module,
name: str = "",
indent: int = 0,
) -> list[str]:
"""Format a module tree with indentation, grouping identical layers.
Produces output like:
(layers): ModuleList(
(0-27, 29-47): 47 x LlamaDecoderLayer(
...
)
(28, 48): 2 x DifferentDecoderLayer(
...
)
)
"""
lines = []
prefix = " " * indent
children = list(module.named_children())
# Leaf node - just output the module info
if not children:
info = _get_module_info(module)
lines.append(f"{prefix}({name}): {info}" if name else f"{prefix}{info}")
return lines
# Non-leaf node - output opening line and recurse into children
info = _get_module_info(module)
lines.append(f"{prefix}({name}): {info}(" if name else f"{prefix}{info}(")
# Separate numbered children (e.g., "0", "1") from named ones (e.g., "norm")
numbered: list[tuple[int, nn.Module]] = []
non_numbered: list[tuple[str, nn.Module]] = []
for child_name, child_module in children:
try:
numbered.append((int(child_name), child_module))
except ValueError:
non_numbered.append((child_name, child_module))
# Group numbered children by structure signature to collapse identical layers
# e.g., layers 0-27 and 29-47 with same structure become "(0-27, 29-47): 47 x"
if numbered:
sig_to_group: dict[str, list[tuple[int, nn.Module]]] = {}
for idx, child_module in numbered:
sig = _get_child_signature(child_module)
sig_to_group.setdefault(sig, []).append((idx, child_module))
# Output groups sorted by first index
for group in sorted(sig_to_group.values(), key=lambda g: g[0][0]):
indices = [idx for idx, _ in group]
representative = group[0][1]
child_lines = _format_module_tree(representative, "", indent + 1)
first_line = child_lines[0].lstrip()
child_prefix = " " * (indent + 1)
if len(indices) > 1:
range_str = _format_index_ranges(indices)
child_lines[0] = (
f"{child_prefix}({range_str}): {len(indices)} x {first_line}"
)
else:
child_lines[0] = f"{child_prefix}({indices[0]}): {first_line}"
lines.extend(child_lines)
# Output non-numbered children (e.g., "embed_tokens", "norm")
for child_name, child_module in non_numbered:
lines.extend(_format_module_tree(child_module, child_name, indent + 1))
lines.append(f"{prefix})")
return lines
def format_model_inspection(model: nn.Module) -> str:
"""Format a model into a transformers-style hierarchical string."""
return "\n".join(_format_module_tree(model))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_inspection.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/disaggregated_serving/moriio_toy_proxy_server.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import copy
import logging
import os
import socket
import threading
import uuid
import aiohttp
import msgpack
import regex as re
import zmq
from quart import Quart, make_response, request
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
prefill_instances: list[dict] = []
decode_instances: list[dict] = []
request_nums = 0
app = Quart(__name__)
IP_PORT_PATTERN = re.compile(r"//(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d+)")
TRANSFER_TYPE = None
def _append_whole_dict_unique(target_list, data_dict):
new_filtered = {k: v for k, v in data_dict.items() if k != "index"}
for existed in target_list:
existed_filtered = {k: v for k, v in existed.items() if k != "index"}
if existed_filtered == new_filtered:
return False
print("!!APPEND!!", data_dict)
target_list.append(data_dict)
transfer_mode = data_dict.get("transfer_mode", "unknown")
global TRANSFER_TYPE
if TRANSFER_TYPE is None:
TRANSFER_TYPE = transfer_mode
logger.info("SET TRANSFER TYPE TO %s", TRANSFER_TYPE)
elif transfer_mode != TRANSFER_TYPE:
raise ValueError(f"mismatched transfer mode {TRANSFER_TYPE} vs {transfer_mode}")
return True
_list_lock = threading.RLock()
def _listen_for_register(hostname, port):
context = zmq.Context()
router_socket = context.socket(zmq.ROUTER)
router_socket.bind(f"tcp://{hostname}:{port}")
poller = zmq.Poller()
poller.register(router_socket, zmq.POLLIN)
global prefill_instances
global decode_instances
while True:
socks = dict(poller.poll())
if router_socket in socks:
remote_addr, msg = router_socket.recv_multipart()
data = msgpack.loads(msg)
if data["type"] == "HELLO":
pass
elif (
data["type"] == "register"
and data["role"] == "P"
and data["request_address"] not in prefill_instances
):
with _list_lock:
_append_whole_dict_unique(prefill_instances, data)
elif (
data["type"] == "register"
and data["role"] == "D"
and data["request_address"] not in decode_instances
):
with _list_lock:
_append_whole_dict_unique(decode_instances, data)
def start_service_discovery(hostname, port):
if not hostname:
hostname = socket.gethostname()
if port == 0:
raise ValueError("Port cannot be 0")
_listener_thread = threading.Thread(
target=_listen_for_register, args=(hostname, port), daemon=True
)
_listener_thread.start()
return _listener_thread
async def send_request_to_prefill(
endpoint, req_data, request_id, d_endpoint, dip, dport, selected_prefill_dp_rank
):
req_data_copy = req_data
req_data_copy["kv_transfer_params"].update(
{
"do_remote_decode": True,
"do_remote_prefill": False,
"remote_handshake_port": d_endpoint["handshake_port"],
"remote_notify_port": d_endpoint["notify_port"],
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": dip,
"remote_port": dport,
}
)
req_data_copy["stream"] = False
req_data_copy["max_tokens"] = 1
if "max_completion_tokens" in req_data_copy:
req_data_copy["max_completion_tokens"] = 1
if "stream_options" in req_data_copy:
del req_data_copy["stream_options"]
async with aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=6 * 6000 * 6000)
) as session:
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
if selected_prefill_dp_rank is not None:
headers["X-data-parallel-rank"] = str(selected_prefill_dp_rank)
async with session.post(
url=endpoint, json=req_data_copy, headers=headers
) as response:
if response.status == 200:
return await response.json()
else:
raise RuntimeError(
"send_request_to_prefill response.status != 200response.status = ",
response.status,
)
async def start_decode_request(endpoint, req_data, request_id):
session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=6 * 6000 * 6000)
)
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
response = await session.post(url=endpoint, json=req_data, headers=headers)
return session, response
async def stream_decode_response(session, response, request_id):
try:
if response.status == 200:
async for chunk_bytes in response.content.iter_chunked(1024):
yield chunk_bytes
else:
raise RuntimeError(
f"decode response.status != 200, status = {response.status}"
)
finally:
await session.close()
def example_round_robin_dp_loader(request_number, dp_size):
return request_nums % dp_size
@app.route("/v1/completions", methods=["POST"])
@app.route("/v1/chat/completions", methods=["POST"])
async def handle_request():
try:
with _list_lock:
global request_nums
request_nums += 1
def extract_ip_port_fast(url):
match = IP_PORT_PATTERN.search(url)
if not match:
raise ValueError(f"Invalid URL format: {url}")
return match.groups()
req_data = await request.get_json()
request_id = str(uuid.uuid4())
prefill_instance_endpoint = None
decode_instance_endpoint = None
error_msg = (
"Service Unavailable: No prefill or decode instances are registered."
)
if not prefill_instances or not decode_instances:
return await make_response(
(
error_msg,
503,
)
)
pid = request_nums % len(prefill_instances)
did = request_nums % len(decode_instances)
prefill_instance_endpoint = prefill_instances[pid]
decode_instance_endpoint = decode_instances[did]
selected_prefill_dp_rank = None
if prefill_instance_endpoint["dp_size"] > 1:
selected_prefill_dp_rank = example_round_robin_dp_loader(
request_nums // len(prefill_instance_endpoint),
prefill_instance_endpoint["dp_size"],
)
dip, dport = extract_ip_port_fast(decode_instance_endpoint["request_address"])
req_data_to_prefill = copy.deepcopy(req_data)
req_data_to_prefill["kv_transfer_params"] = {}
req_data["kv_transfer_params"] = {}
req_data_to_prefill["kv_transfer_params"]["remote_dp_size"] = (
decode_instance_endpoint["dp_size"]
)
req_data_to_prefill["kv_transfer_params"]["remote_tp_size"] = (
decode_instance_endpoint["tp_size"]
)
send_prefill_task = asyncio.create_task(
send_request_to_prefill(
prefill_instance_endpoint["request_address"],
req_data_to_prefill,
request_id,
decode_instance_endpoint,
dip,
dport,
selected_prefill_dp_rank,
)
)
ip, port = extract_ip_port_fast(prefill_instance_endpoint["request_address"])
req_data["max_tokens"] -= 1
req_data["kv_transfer_params"] = {
"do_remote_decode": False,
"do_remote_prefill": True,
"remote_handshake_port": prefill_instance_endpoint["handshake_port"],
"remote_notify_port": prefill_instance_endpoint["notify_port"],
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": ip,
"remote_port": port,
}
if TRANSFER_TYPE == "READ":
# In read mode, prefill and decode are executed serially.
prefill_response = await send_prefill_task
req_data["kv_transfer_params"]["remote_engine_id"] = prefill_response[
"kv_transfer_params"
]["remote_engine_id"]
req_data["kv_transfer_params"]["remote_block_ids"] = prefill_response[
"kv_transfer_params"
]["remote_block_ids"]
req_data["kv_transfer_params"]["remote_dp_size"] = prefill_instance_endpoint[
"dp_size"
]
req_data["kv_transfer_params"]["remote_tp_size"] = prefill_instance_endpoint[
"tp_size"
]
if selected_prefill_dp_rank is not None:
req_data["kv_transfer_params"]["remote_dp_rank"] = selected_prefill_dp_rank
decode_request_task = asyncio.create_task(
start_decode_request(
decode_instance_endpoint["request_address"], req_data, request_id
)
)
session, decode_response = await decode_request_task
stream_generator = stream_decode_response(session, decode_response, request_id)
response = await make_response(stream_generator)
return response
except Exception as e:
logger.exception("An error occurred while handling the request: %s", e)
return await make_response(
(
f"Internal Server Error: {e!s}",
500,
)
)
if __name__ == "__main__":
t = start_service_discovery("0.0.0.0", 36367)
app.debug = True
app.config["BODY_TIMEOUT"] = 360000
app.config["RESPONSE_TIMEOUT"] = 360000
app.run(host="0.0.0.0", port=10001)
t.join()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/disaggregated_serving/moriio_toy_proxy_server.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/kv_connector/unit/test_moriio_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import importlib.util
import os
import subprocess
from unittest.mock import MagicMock, patch
import msgspec
import pytest
import torch
import zmq
from tests.conftest import _find_free_port
from vllm.config import (
CacheConfig,
DeviceConfig,
KVTransferConfig,
ModelConfig,
SchedulerConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_common import (
MoRIIOAgentMetadata,
MoRIIOConnectorMetadata,
MoRIIOConstants,
zmq_ctx,
)
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector import (
KVConnectorRole,
MoRIIOConnector,
MoRIIOConnectorWorker,
)
from vllm.platforms import current_platform
from vllm.utils.network_utils import (
get_ip,
make_zmq_path,
)
from .utils import create_request, create_scheduler
aiter_available = importlib.util.find_spec("aiter") is not None
mori_available = importlib.util.find_spec("mori") is not None
def _rdma_available() -> bool:
"""Check if RDMA devices are available."""
try:
result = subprocess.run(["ibv_devinfo"], capture_output=True, text=True)
return "No IB devices found" not in result.stderr
except FileNotFoundError:
return False
rdma_available = _rdma_available()
pytestmark = pytest.mark.skipif(
not (current_platform.is_rocm() and mori_available),
reason="MoRIIOs are only available on ROCm with aiter package installed",
)
@pytest.fixture
def mock_parallel_groups():
"""Mock tensor/data parallel group functions for single-rank tests."""
mock_group = MagicMock()
mock_group.rank = 0
mock_group.local_rank = 0
mock_group.world_size = 1
with (
patch.multiple(
"vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_common",
get_tensor_model_parallel_rank=MagicMock(return_value=0),
get_tensor_model_parallel_world_size=MagicMock(return_value=0),
),
patch.multiple(
"vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector",
get_tensor_model_parallel_world_size=MagicMock(return_value=0),
get_world_group=MagicMock(return_value=mock_group),
get_tp_group=MagicMock(return_value=mock_group),
),
):
yield mock_group
def _setup_kv_transfer_request(request, remote_host="127.0.0.1", fake_port=4789):
"""Setup KV transfer parameters for a request."""
request.kv_transfer_params.update(
{
"remote_notify_port": fake_port,
"remote_block_ids": None,
"remote_host": remote_host,
"remote_port": fake_port,
"remote_handshake_port": fake_port,
"remote_engine_id": "test_engine",
}
)
return request
class FakeMorIIOWrapper:
# A fake MoRIIOWrapper for testing purposes
def __init__(self, *args, **kwargs):
pass
def set_moriio_engine(self, moriio_engine):
pass
def set_backend_type(self, backend_type):
pass
def get_agent_metadata(self):
pass
def register_remote_engine(self, remote_packed_engine_metadata):
pass
def register_local_tensor(self, tensor: torch.Tensor):
pass
def get_unpack_memory_metadata(self, packed_memory_metadata):
pass
def build_session(self, local_memory_metadata, remote_memory_metadata):
pass
def read_remote_data(
self, transfer_size_byte, local_offset=0, remote_offset=0, session=None
):
pass
def write_remote_data(
self, transfer_size_byte, local_offset=0, remote_offset=0, session=None
):
pass
def write_remote_data_single(
self, transfer_size_byte, local_offset=0, remote_offset=0, sess_idx=0
):
pass
def waiting_for_transfer_complete(self):
pass
def async_wait_reqid(self):
pass
def _handle_message(self, msg: bytes):
pass
def _handle_structured_message(self, data: dict):
pass
def _handle_completion_message(self, msg: str):
pass
def send_notify(self, req_ids, remote_ip, remote_port):
pass
def pop_finished_req_ids(self):
pass
def pop_finished_write_req_ids(self):
pass
def shutdown(self):
pass
class FakeMorIIOConnectorWorker(MoRIIOConnectorWorker):
# Define a fake remote engine id for testing
REMOTE_ENGINE_ID = "remote_engine"
def __init__(
self, *args, hand_shake_latency: float = 1.8, kv_cache_layout="HND", **kwargs
):
super().__init__(*args, **kwargs)
def create_vllm_config(
model: str = "facebook/opt-125m",
max_num_seqs: int = 16,
max_num_batched_tokens: int = 64,
block_size: int = 16,
max_model_len: int = 10000,
enable_chunked_prefill: bool = True,
enable_permute_local_kv: bool = False,
role="kv_consumer",
) -> VllmConfig:
"""Initialize VllmConfig for testing."""
scheduler_config = SchedulerConfig(
max_num_seqs=max_num_seqs,
max_num_batched_tokens=max_num_batched_tokens,
max_model_len=max_model_len,
enable_chunked_prefill=enable_chunked_prefill,
is_encoder_decoder=False,
)
model_config = ModelConfig(
model=model,
trust_remote_code=True,
dtype="bfloat16",
seed=42,
)
# Cache config, optionally force APC
cache_config = CacheConfig(
block_size=block_size,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype="auto",
enable_prefix_caching=True,
)
kv_transfer_config = KVTransferConfig(
kv_connector="MoRIIOConnector",
kv_role=role,
enable_permute_local_kv=enable_permute_local_kv,
)
return VllmConfig(
scheduler_config=scheduler_config,
model_config=model_config,
cache_config=cache_config,
kv_transfer_config=kv_transfer_config,
device_config=DeviceConfig("cpu"),
)
@pytest.fixture
def moriio_read_mode():
"""Force the connector into read mode via env for tests."""
os.environ["VLLM_MORIIO_CONNECTOR_READ_MODE"] = "True"
yield
# Cleanup after test
os.environ.pop("VLLM_MORIIO_CONNECTOR_READ_MODE", None)
def test_write_mode_saves_local_block_ids():
"""Write mode records local block ids in MoRIIOConnectorMetadata.reqs_to_save."""
# Setup Scheduler and Request
vllm_config = create_vllm_config(role="kv_producer")
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
do_remote_prefill=False,
)
request_id = request.request_id
scheduler.add_request(request)
# Fake Config
request = _setup_kv_transfer_request(request)
# Remote Prefill, triggers MoRIIOConnectorMetadata.
scheduler_output = scheduler.schedule()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None, "kv_connector_metadata is None"
assert isinstance(kv_connector_metadata, MoRIIOConnectorMetadata)
assert len(kv_connector_metadata.reqs_to_save) == 1, (
"Unexpected number of reqs_to_save"
)
assert len(kv_connector_metadata.reqs_to_recv) == 0, (
"Unexpected number of reqs_to_recv"
)
assert len(kv_connector_metadata.reqs_to_send) == 0, (
"Unexpected number of reqs_to_send"
)
assert request_id in kv_connector_metadata.reqs_to_save, (
"Request ID not in reqs_to_save"
)
req_meta = kv_connector_metadata.reqs_to_save[request_id]
for block_id, block in zip(
req_meta.local_block_ids,
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
],
):
assert block_id == block.block_id, f"{block_id} != {block.block_id}"
def test_write_mode_with_chunked_prefill_saves_local_block_ids():
"""Write mode with chunked prefill still records correct local block ids."""
# Setup Scheduler and Request
MAX_NUM_BATCHED_TOKENS = 64
NUM_TOKENS = MAX_NUM_BATCHED_TOKENS * 2 + MAX_NUM_BATCHED_TOKENS // 2
vllm_config = create_vllm_config(
max_num_batched_tokens=MAX_NUM_BATCHED_TOKENS, role="kv_producer"
)
BLOCK_SIZE = vllm_config.cache_config.block_size
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
do_remote_prefill=False,
)
request_id = request.request_id
scheduler.add_request(request)
# Fake Config
request = _setup_kv_transfer_request(request)
# Remote Prefill with chunked prefill, triggers multiple schedules.
expected_counts = [(0, 0, 0), (0, 0, 0), (1, 0, 0)]
kv_connector_metadata = None
for _, (expected_save, expected_recv, expected_send) in enumerate(expected_counts):
scheduler_output = scheduler.schedule()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert len(kv_connector_metadata.reqs_to_save) == expected_save
assert len(kv_connector_metadata.reqs_to_recv) == expected_recv
assert len(kv_connector_metadata.reqs_to_send) == expected_send
assert kv_connector_metadata is not None, "kv_connector_metadata is None"
assert request_id in kv_connector_metadata.reqs_to_save, (
"Request ID not in reqs_to_save"
)
req_meta = kv_connector_metadata.reqs_to_save[request_id]
for block_id, block in zip(
req_meta.local_block_ids,
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
],
):
assert block_id == block.block_id, f"{block_id} != {block.block_id}"
def test_read_mode_loads_remote_block_ids(moriio_read_mode):
"""Read mode loads remote block ids into local cache mapping."""
# Setup Scheduler and Request
vllm_config = create_vllm_config(role="kv_consumer")
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=False,
do_remote_prefill=True,
)
request_id = request.request_id
scheduler.add_request(request)
block_list = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
request = _setup_kv_transfer_request(request)
# Set remote block ids to be fetched.
request.kv_transfer_params["remote_block_ids"] = block_list
# Remote Prefill, triggers MorIIOConnectorMetadata.
scheduler_output = scheduler.schedule()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None, "kv_connector_metadata is None"
assert isinstance(kv_connector_metadata, MoRIIOConnectorMetadata), (
"kv_connector_metadata is not MoRIIOConnectorMetadata"
)
assert len(kv_connector_metadata.reqs_to_save) == 0, (
"Unexpected number of reqs_to_save"
)
assert len(kv_connector_metadata.reqs_to_recv) == 1, (
"Unexpected number of reqs_to_recv"
)
assert len(kv_connector_metadata.reqs_to_send) == 0, (
"Unexpected number of reqs_to_send"
)
assert request_id in kv_connector_metadata.reqs_to_recv, (
"Request ID not in reqs_to_recv"
)
req_meta = kv_connector_metadata.reqs_to_recv[request_id]
for block_id, block in zip(
req_meta.local_block_ids,
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
],
):
assert block_id == block.block_id, f"{block_id} != {block.block_id}"
@pytest.mark.skipif(
not aiter_available, reason="Requires aiter package for ROCm FlashAttention backend"
)
@pytest.mark.skipif(not rdma_available, reason="No RDMA devices available")
def test_register_kv_caches(mock_parallel_groups):
"""Test that MoRIIOConnector.register_kv_caches correctly registers kv caches."""
ROLE = "kv_consumer"
IP = get_ip()
vllm_config = create_vllm_config(role=ROLE)
DEFAULT_PORT = 6301
TP_RANK = 0
DP_RANK = 0
from vllm.v1.attention.backends.rocm_aiter_fa import AiterFlashAttentionBackend
backend_cls = AiterFlashAttentionBackend
# Create test kv cache tensors using proper backend shape
kv_cache_shape = backend_cls.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
with (
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector.threading.Event"
),
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector.threading.Thread"
),
):
# Create connector
vllm_config.kv_transfer_config.kv_connector_extra_config.update(
{
"proxy_ip": "127.0.0.1",
"proxy_ping_port": 12345,
"http_port": 12346,
}
)
with set_current_vllm_config(vllm_config):
connector = MoRIIOConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeMorIIOConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
from mori.io import (
MemoryDesc,
)
# Execute register_kv_caches
connector.register_kv_caches(kv_caches)
# Verify that the MemoryDesc stored in layer_name_to_local_kv_cache_metadata
assert (
shared_tensor.data_ptr()
== MemoryDesc.unpack(
connector.connector_worker.layer_name_to_local_kv_cache_metadata[
"layer0"
][0]
).data
)
assert (
unique_tensor.data_ptr()
== MemoryDesc.unpack(
connector.connector_worker.layer_name_to_local_kv_cache_metadata[
"layer1"
][0]
).data
)
assert (
shared_tensor.data_ptr()
== MemoryDesc.unpack(
connector.connector_worker.layer_name_to_local_kv_cache_metadata[
"layer2"
][0]
).data
)
# Verify engine keys
expected_engine_key = f"{ROLE[3:]}:{IP}:{DEFAULT_PORT}:tp{TP_RANK}:dp{DP_RANK}"
assert (
MemoryDesc.unpack(
connector.connector_worker.layer_name_to_local_kv_cache_metadata[
"layer0"
][0]
).engine_key
== expected_engine_key
)
@pytest.mark.skipif(
not aiter_available, reason="Requires aiter package for ROCm FlashAttention backend"
)
@pytest.mark.skipif(not rdma_available, reason="No RDMA devices available")
def test_moriio_handshake_returns_metadata(mock_parallel_groups):
"""MoRIIO handshake socket returns valid agent metadata over ZMQ."""
ROLE = "kv_consumer"
vllm_config = create_vllm_config(role=ROLE)
from vllm.v1.attention.backends.rocm_aiter_fa import AiterFlashAttentionBackend
backend_cls = AiterFlashAttentionBackend
# Create test kv cache tensors using proper backend shape
kv_cache_shape = backend_cls.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
with (
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_engine.MoRIIOWrapper",
FakeMorIIOWrapper,
),
):
handshake_port = _find_free_port()
# Create connector
vllm_config.kv_transfer_config.kv_connector_extra_config.update(
{
"proxy_ip": "127.0.0.1",
"proxy_ping_port": 12345,
"http_port": 12346,
"handshake_port": handshake_port,
}
)
with set_current_vllm_config(vllm_config):
connector = MoRIIOConnector(vllm_config, KVConnectorRole.WORKER)
# Execute register_kv_caches
connector.register_kv_caches(kv_caches)
# Connect to handshake socket and request metadata
path = make_zmq_path("tcp", "127.0.0.1", handshake_port)
with zmq_ctx(zmq.DEALER, path) as sock:
sock.send(MoRIIOConstants.GET_META_MSG)
received_frame = sock.recv_multipart()
if len(received_frame) != 2 or received_frame[0] != b"":
raise ValueError(f"Unexpected frame! {received_frame = }")
metadata_bytes = received_frame[1]
decoder = msgspec.msgpack.Decoder(MoRIIOAgentMetadata)
metadata = decoder.decode(metadata_bytes)
assert isinstance(metadata, MoRIIOAgentMetadata), (
"Decoded metadata is not MoRIIOAgentMetadata"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_moriio_connector.py",
"license": "Apache License 2.0",
"lines": 470,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import threading
import time
from collections.abc import Iterator
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import msgspec
import torch
import zmq
from vllm import envs
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorMetadata,
)
from vllm.distributed.parallel_state import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.logger import init_logger
from vllm.utils.network_utils import (
get_ip,
get_open_port,
make_zmq_socket,
)
if TYPE_CHECKING:
pass
from dataclasses import field
from enum import Enum
logger = init_logger(__name__)
Transfer = tuple[int, float]
EngineId = str
ReqId = str
@dataclass
class WriteTask:
request_id: str
dst_engine_id: str
local_block_ids: list[int]
remote_block_ids_hint: list[int] | None
layer_name: str
event: torch.cuda.Event
remote_notify_port: int
remote_ip: str
enqueue_time: float = field(default_factory=time.perf_counter)
retried: int = 0
@dataclass
class LayerTransferPlan:
"""Plan for transferring a single layer."""
request_id: str
layer_name: str
sess_idx: int
transfer_local_offsets: list[int]
transfer_remote_offsets: list[int]
transfer_sizes: list[int]
use_batch: bool = True
@dataclass
class RemoteAllocInfo:
"""Information about remote block allocation."""
block_ids: list[int]
writes_done: int = 0
decode_dp_rank: int = 0
transfer_offset: tuple[list[int], list[int], list[int]] | None = None
class ROLE(Enum):
PRODUCER = "producer"
CONSUMER = "consumer"
NOTINIT = "notinit"
class MoRIIOAgentMetadata(
msgspec.Struct,
omit_defaults=True, # type: ignore[call-arg]
# required for @cached_property.d
dict=True,
):
engine_id: str
agent_metadata: bytes
kv_caches_base_addr: list[int]
num_blocks: int
block_len: int
attn_backend_name: str
class RoleManager:
"""Manages role state across the connector."""
_instance: "RoleManager | None" = None
_lock = threading.Lock()
def __init__(self) -> None:
self._role: ROLE = ROLE.NOTINIT
@classmethod
def get_instance(cls) -> "RoleManager":
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def set_role(self, role: ROLE) -> None:
"""Set the current role."""
with self._lock:
self._role = role
def get_role(self) -> ROLE:
"""Get the current role."""
return self._role
def set_role(role: ROLE):
"""Set the global role."""
RoleManager.get_instance().set_role(role)
def get_role() -> ROLE:
"""Get the global role."""
return RoleManager.get_instance().get_role()
class MoRIIOMode(Enum):
READ = "read"
WRITE = "write"
class MoRIIOError(Exception):
"""Base exception for MoRIIO operations."""
pass
class HandshakeError(MoRIIOError):
"""Exception raised when handshake fails."""
pass
class TransferError(MoRIIOError):
"""Exception raised when transfer fails."""
pass
def get_moriio_mode() -> MoRIIOMode:
read_mode = envs.VLLM_MORIIO_CONNECTOR_READ_MODE
logger.debug("MoRIIO Connector read_mode: %s", read_mode)
if read_mode:
return MoRIIOMode.READ
else:
return MoRIIOMode.WRITE
def get_port_offset(dp_rank: int, tp_rank: int, tp_size: int = 1) -> int:
return (dp_rank) * tp_size + tp_rank
@dataclass
class MoRIIOConfig:
local_ip: str
local_kv_port: int
proxy_ip: str
local_ping_port: int
proxy_ping_port: int
http_port: int
handshake_port: int
notify_port: int
tp_rank: int
dp_rank: int
dp_size: int
tp_size: int
@classmethod
def from_vllm_config(cls, vllm_config: VllmConfig) -> "MoRIIOConfig":
# Port Configuration:
# local_ping_port -> Outgoing heartbeat to proxy
# proxy_ping_port -> Remote proxy's heartbeat ingress port
# http_port -> Instance's HTTP service endpoint
# local_kv_port -> service port for mori engine
# notify_port -> For synchronizing stages between prefill and decode
# handshake_port -> For initial handshake between mori engine
# TODO : merge notify_port and handshake_port to simplify port management
# supports non-contiguous ports
assert vllm_config.kv_transfer_config is not None, (
"kv_transfer_config must be set for MoRIIOConnector"
)
kv_transfer_config = vllm_config.kv_transfer_config
extra_config = kv_transfer_config.kv_connector_extra_config
tp_rank = get_tensor_model_parallel_rank()
dp_rank = vllm_config.parallel_config.data_parallel_rank
base_notify_port = int(extra_config["notify_port"])
dp_size = vllm_config.parallel_config.data_parallel_size
tp_size = get_tensor_model_parallel_world_size()
port_offset = get_port_offset(dp_rank, tp_rank)
return cls(
local_ip=get_ip(),
local_kv_port=get_open_port(),
proxy_ip=extra_config["proxy_ip"],
local_ping_port=get_open_port(),
proxy_ping_port=int(extra_config["proxy_ping_port"]),
http_port=int(extra_config["http_port"]),
handshake_port=int(extra_config["handshake_port"]),
notify_port=base_notify_port + port_offset,
tp_rank=tp_rank,
dp_rank=dp_rank,
dp_size=dp_size,
tp_size=tp_size,
)
class MoRIIOConstants:
"""Constants for MoRIIO connector."""
# ZMQ message types
GET_META_MSG = b"get_meta_msg"
POP_DONE_RECV = b"pop_done_recv"
OVER = b"OVER"
COMPLETION_PREFIX = "cmpl"
PING_INTERVAL = 5
MAX_PING_RETRIES = 100
DEFAULT_HANDSHAKE_PORT = "6301"
DEFAULT_NOTIFY_PORT = "61005"
VLLM_MORI_READ_ABORT_REQUEST_TIMEOUT = 3600
@dataclass
class ReqMeta:
"""Metadata for a single request."""
local_block_ids: list[int]
remote_block_ids: list[int]
remote_host: str
remote_port: int
remote_handshake_port: int
remote_notify_port: int
remote_engine_id: str
tp_size: int
remote_dp_size: int
class MoRIIOConnectorMetadata(KVConnectorMetadata):
def __init__(self):
self.reqs_to_recv: dict[ReqId, ReqMeta] = {}
self.reqs_to_save: dict[ReqId, ReqMeta] = {}
self.reqs_to_send: dict[ReqId, float] = {}
def __repr__(self):
return_str = ""
for req_id, req_meta in self.reqs_to_recv.items():
return_str += (
f"{req_id = },{req_meta.local_block_ids = },"
f"{req_meta.remote_host = },{req_meta.remote_port = }"
f"{req_meta.remote_engine_id = },{req_meta.tp_size = }"
)
return_str = f"MoRIIOConnectorMetadata:reqs_to_recv:{return_str},"
for req_id, expiry in self.reqs_to_send.items():
return_str += f"{req_id = },{expiry = }"
return_str = f"MoRIIOConnectorMetadata:reqs_to_send:{return_str},"
return return_str
def add_new_req(
self,
request_id: ReqId,
local_block_ids: list[int],
kv_transfer_params: dict[str, Any],
write_mode=False,
):
_req = ReqMeta(
local_block_ids=local_block_ids,
remote_block_ids=kv_transfer_params["remote_block_ids"],
remote_engine_id=kv_transfer_params["remote_engine_id"],
remote_host=kv_transfer_params["remote_host"],
remote_port=kv_transfer_params["remote_port"],
remote_handshake_port=kv_transfer_params["remote_handshake_port"],
remote_notify_port=kv_transfer_params["remote_notify_port"],
tp_size=kv_transfer_params.get("tp_size", 1),
remote_dp_size=kv_transfer_params.get("remote_dp_size", 1),
)
if write_mode:
self.reqs_to_save[request_id] = _req
else:
self.reqs_to_recv[request_id] = _req
@contextlib.contextmanager
def zmq_ctx(socket_type: Any, addr: str) -> Iterator[zmq.Socket]:
"""Context manager for a ZMQ socket"""
if socket_type not in (zmq.ROUTER, zmq.REQ, zmq.DEALER):
raise ValueError(f"Unexpected socket type: {socket_type}")
ctx: zmq.Context | None = None
try:
ctx = zmq.Context() # type: ignore[attr-defined]
yield make_zmq_socket(
ctx=ctx, path=addr, socket_type=socket_type, bind=socket_type == zmq.ROUTER
)
finally:
if ctx is not None:
ctx.destroy(linger=0)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import logging
import math
import queue
import threading
import time
from collections import defaultdict
from concurrent.futures import Future, ThreadPoolExecutor
from typing import TYPE_CHECKING, Any
import msgpack
import msgspec
import numpy as np
import torch
import zmq
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_common import (
ROLE,
EngineId,
HandshakeError,
MoRIIOAgentMetadata,
MoRIIOConfig,
MoRIIOConnectorMetadata,
MoRIIOConstants,
MoRIIOMode,
ReqId,
ReqMeta,
WriteTask,
get_moriio_mode,
get_port_offset,
get_role,
set_role,
zmq_ctx,
)
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_engine import (
MoRIIOWrapper,
MoRIIOWriter,
)
from vllm.distributed.parallel_state import (
get_tensor_model_parallel_world_size,
get_tp_group,
get_world_group,
)
from vllm.forward_context import ForwardContext
from vllm.logger import init_logger
from vllm.utils.network_utils import (
get_ip,
make_zmq_path,
make_zmq_socket,
)
from vllm.v1.attention.selector import get_attn_backend
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.request import RequestStatus
if TYPE_CHECKING:
from vllm.v1.attention.backend import AttentionMetadata
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
logger = init_logger(__name__)
try:
from mori.io import (
BackendType,
IOEngine,
IOEngineConfig,
)
logger.info("MoRIIO is available")
MoRIIO_enabled = True
except ImportError:
logger.error("MoRIIO is not available")
MoRIIO_enabled = False
def is_moriio_available() -> bool:
return MoRIIO_enabled
class MoRIIOConnector(KVConnectorBase_V1):
def __init__(
self,
vllm_config: VllmConfig,
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig | None" = None,
):
super().__init__(vllm_config, role)
assert vllm_config.kv_transfer_config is not None, (
"kv_transfer_config must be set for MoRIIOConnector"
)
self.kv_transfer_config = vllm_config.kv_transfer_config
self._set_port_defaults(vllm_config)
self.engine_id = (
str(get_ip())
+ ":"
+ str(self.kv_transfer_config.kv_connector_extra_config["handshake_port"])
)
self.mode = get_moriio_mode()
if role == KVConnectorRole.SCHEDULER:
self.connector_scheduler: MoRIIOConnectorScheduler | None = (
MoRIIOConnectorScheduler(vllm_config, self.engine_id)
)
self.connector_worker: MoRIIOConnectorWorker | None = None
elif role == KVConnectorRole.WORKER:
self.connector_scheduler = None
self.connector_worker = MoRIIOConnectorWorker(vllm_config, self.engine_id)
logger.info(
"Initialized MoRIIO Connector,engine_id:%s,role: %s",
self.engine_id,
role.value,
)
############################################################
# Scheduler Side Methods
############################################################
def _set_port_defaults(self, vllm_config: VllmConfig):
assert vllm_config.kv_transfer_config is not None, (
"kv_transfer_config must be set for MoRIIOConnector"
)
kv_transfer_config = vllm_config.kv_transfer_config
extra_config = kv_transfer_config.kv_connector_extra_config
if "handshake_port" not in extra_config or not extra_config["handshake_port"]:
extra_config["handshake_port"] = MoRIIOConstants.DEFAULT_HANDSHAKE_PORT
if "notify_port" not in extra_config or not extra_config["notify_port"]:
extra_config["notify_port"] = MoRIIOConstants.DEFAULT_NOTIFY_PORT
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int, bool]:
assert self.connector_scheduler is not None
return self.connector_scheduler.get_num_new_matched_tokens(
request, num_computed_tokens
)
def update_state_after_alloc(
self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int
):
assert self.connector_scheduler is not None
return self.connector_scheduler.update_state_after_alloc(
request, blocks, num_external_tokens, self.connector_worker
)
def build_connector_meta(
self,
scheduler_output: SchedulerOutput,
) -> KVConnectorMetadata:
assert self.connector_scheduler is not None
return self.connector_scheduler.build_connector_meta(scheduler_output)
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
assert self.connector_scheduler is not None
return self.connector_scheduler.request_finished(request, block_ids)
############################################################
# Worker Side Methods
############################################################
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
assert self.connector_worker is not None
self.connector_worker.register_kv_caches(kv_caches)
def get_finished(self, finished_req_ids: set[str]) -> tuple[set[str], set[str]]:
"""Get the finished recving and sending requests."""
assert self.connector_worker is not None
return self.connector_worker.get_finished()
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
assert self.connector_worker is not None
if self.mode == MoRIIOMode.WRITE and get_role() == ROLE.CONSUMER:
self.connector_worker.moriio_wrapper.async_wait_reqid()
assert isinstance(self._connector_metadata, MoRIIOConnectorMetadata)
self.connector_worker.start_load_kv(self._connector_metadata)
def wait_for_layer_load(self, layer_name: str) -> None:
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: "AttentionMetadata",
**kwargs,
) -> None:
# Only producer/prefill saves KV Cache
if get_role() == ROLE.CONSUMER:
return
assert self.connector_worker is not None, (
"save_kv_layer called on scheduler role"
)
assert isinstance(self._connector_metadata, MoRIIOConnectorMetadata), (
"Connector metadata not initialized yet"
)
self.connector_worker.save_kv_layer(
self._connector_metadata, layer_name, kv_layer, attn_metadata, **kwargs
)
return None
def wait_for_save(self):
pass
def shutdown(self):
if self.connector_worker is not None:
self.connector_worker.shutdown()
if self.connector_scheduler is not None:
self.connector_scheduler.shutdown()
def has_connector_metadata(self) -> bool:
"""Check whether the connector metadata is currently set.
Returns:
bool: True if connector metadata exists, False otherwise.
"""
try:
return self._connector_metadata is not None
except AttributeError:
return False
class MoRIIOConnectorScheduler:
"""Implementation of Scheduler side methods"""
def __init__(self, vllm_config: VllmConfig, engine_id: str):
self.vllm_config = vllm_config
assert vllm_config.kv_transfer_config is not None, (
"kv_transfer_config must be set for MoRIIOConnector"
)
self.kv_transfer_config = vllm_config.kv_transfer_config
self.block_size = vllm_config.cache_config.block_size
self.engine_id: EngineId = engine_id
self.mode = get_moriio_mode()
self.host_ip = get_ip()
self.handshake_port = self.kv_transfer_config.kv_connector_extra_config[
"handshake_port"
]
logger.info("Initializing MoRIIO Scheduler engine_id = %s", engine_id)
self.side_notify_port = self.kv_transfer_config.kv_connector_extra_config[
"notify_port"
]
self.tp_size = self.vllm_config.parallel_config.tensor_parallel_size
self.dp_rank = self.vllm_config.parallel_config.data_parallel_rank
self.is_producer = self.kv_transfer_config.kv_role == "kv_producer"
# Requests that need to start recv/send.
# New requests are added by update_state_after_alloc in
# the scheduler. Used to make metadata passed to Worker.
self._reqs_need_recv: dict[ReqId, tuple[Request, list[int]]] = {}
self._reqs_need_save: dict[ReqId, tuple[Request, list[int]]] = {}
# For chunked prefill, we perform layer-wise access within the final chunk.
# TODO: Perform transfer at end chunk.
self._reqs_need_pending_save: dict[ReqId, tuple[Request, list[int]]] = {}
if self.is_producer:
set_role(ROLE.PRODUCER)
else:
set_role(ROLE.CONSUMER)
# Reqs to send and their expiration time
self._reqs_need_send: dict[ReqId, float] = {}
self.paths: dict[str, zmq.Socket] = {}
def get_num_new_matched_tokens(
self,
request: "Request",
num_computed_tokens: int,
) -> tuple[int, bool]:
"""
For remote prefill, pull all prompt blocks from remote
asynchronously relative to engine execution.
Args:
request (Request): the request object.
num_computed_tokens (int): the number of locally
computed tokens for this request
Returns:
* the number of tokens that can be loaded from the
external KV cache beyond what is already computed.
* true if the external KV cache tokens will be loaded
asynchronously (between scheduler steps).
"""
if self.is_producer:
return 0, False
token_ids = request.prompt_token_ids or []
if self.mode == MoRIIOMode.WRITE:
# MoriiO in write mode, no remote prefill
return len(token_ids) - num_computed_tokens, True
return len(token_ids) - 1 - num_computed_tokens, False
def send_notify_block(
self, req_id: str, block_notify_list: list[int], host=None, port=None
):
path = make_zmq_path("tcp", host, port)
if path not in self.paths:
ctx = zmq.Context.instance()
sock = make_zmq_socket(
ctx=ctx, path=path, socket_type=zmq.DEALER, bind=False
)
self.paths[path] = sock
data = {
"req_id": req_id,
"block_notify_list": block_notify_list or [],
"decode_rank": self.dp_rank,
"type": "remote_blocks",
}
serialized_data = msgpack.dumps(data)
self.paths[path].send(serialized_data)
def update_state_after_alloc(
self,
request: "Request",
blocks: "KVCacheBlocks",
num_external_tokens: int,
connector_worker: "MoRIIOConnectorWorker | None" = None,
):
params = request.kv_transfer_params
if not params:
return
if params.get("do_remote_decode"):
local_block_ids = blocks.get_block_ids()[0]
self._reqs_need_save[request.request_id] = (request, local_block_ids)
if params is not None and params.get("do_remote_prefill"):
if self.mode == MoRIIOMode.READ:
if remote_block_ids := params.get("remote_block_ids"):
if all(
p in params
for p in ("remote_engine_id", "remote_host", "remote_port")
):
# If remote_blocks and num_external_tokens = 0, we
# a full prefix cache hit on the D worker. We need to call
# send_notif in _read_blocks to free the memory on the P.
# Get unhashed blocks to pull from remote.
local_block_ids = blocks.get_block_ids()[0]
assert len(local_block_ids) <= len(remote_block_ids)
if len(local_block_ids) == len(remote_block_ids):
pass
else:
local_block_ids = remote_block_ids[-len(local_block_ids) :]
self._reqs_need_recv[request.request_id] = (
request,
local_block_ids,
)
else:
logger.warning(
"Got invalid KVTransferParams: %s. This "
"request will not utilize KVTransfer",
params,
)
else:
assert request.kv_transfer_params is not None, (
"kv_transfer_params should not be None"
)
remote_dp_rank = request.kv_transfer_params.get("remote_dp_rank", 0)
for tp_index in range(self.tp_size):
target_port = request.kv_transfer_params[
"remote_notify_port"
] + get_port_offset(remote_dp_rank, tp_index)
self.send_notify_block(
req_id=request.request_id,
block_notify_list=blocks.get_block_ids()[0],
host=params.get("remote_host"),
port=target_port,
)
# Only trigger 1 KV transfer per request.
params["do_remote_prefill"] = False
def build_connector_meta(
self,
scheduler_output: SchedulerOutput,
) -> KVConnectorMetadata:
meta = MoRIIOConnectorMetadata()
if self.mode == MoRIIOMode.WRITE:
# when async_load_kv finished,
# new reqs will be added to scheduler_output.scheduled_new_reqs
if get_role() == ROLE.CONSUMER:
for new_req in scheduler_output.scheduled_new_reqs:
red_id = new_req.req_id
local_block_ids = list(new_req.block_ids)[0]
assert new_req.sampling_params is not None, (
f"sampling_params is None for req {new_req.req_id}"
)
assert hasattr(new_req.sampling_params, "extra_args"), (
f"sampling_params missing extra_args for req {new_req.req_id}"
)
kv_transfer_params = (
new_req.sampling_params.extra_args.get("kv_transfer_params", {})
if new_req.sampling_params.extra_args
else {}
)
meta.add_new_req(
red_id,
local_block_ids,
kv_transfer_params,
)
if get_role() == ROLE.PRODUCER:
# This is the logic for checking against chunked prefill.
# When the last chunk is identified,
# It places the request metadata into the saving queue.
for i, req_id in enumerate(
scheduler_output.scheduled_cached_reqs.req_ids
):
new_block_ids = (
scheduler_output.scheduled_cached_reqs.new_block_ids[i]
)
if new_block_ids is not None:
block_ids = new_block_ids[0]
# TODO : hybrid attn, etc
req, existing_blocks = self._reqs_need_pending_save[req_id]
updated_blocks = list(existing_blocks) + (block_ids)
self._reqs_need_pending_save[req_id] = (req, updated_blocks)
if (
len(self._reqs_need_pending_save[req_id][1])
* self.block_size
>= req.num_prompt_tokens
):
meta.add_new_req(
request_id=req_id,
local_block_ids=self._reqs_need_pending_save[req_id][1],
kv_transfer_params=req.kv_transfer_params or {},
write_mode=True,
)
del self._reqs_need_pending_save[req_id]
# Loop through scheduled reqs and convert to ReqMeta.
for req_id, (req, block_ids) in self._reqs_need_recv.items():
assert req.kv_transfer_params is not None
meta.add_new_req(
request_id=req_id,
local_block_ids=block_ids,
kv_transfer_params=req.kv_transfer_params,
)
for req_id, (req, block_ids) in self._reqs_need_save.items():
assert req.kv_transfer_params is not None
if req.num_prompt_tokens > len(block_ids) * self.block_size:
# not last chunk prefill
self._reqs_need_pending_save[req_id] = (req, block_ids)
continue
meta.add_new_req(
request_id=req_id,
local_block_ids=block_ids,
kv_transfer_params=req.kv_transfer_params,
write_mode=True,
)
# Clear the list once workers start the transfers
meta.reqs_to_send = self._reqs_need_send
self._reqs_need_recv.clear()
self._reqs_need_save.clear()
self._reqs_need_send = {}
return meta
def shutdown(self):
for path, sock in self.paths.items():
try:
sock.close(linger=0)
logger.debug("Closed ZMQ socket for path: %s", path)
except Exception as e:
logger.warning("Error closing ZMQ socket for path %s: %s", path, e)
self.paths.clear()
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
"""
Once a request is finished, determine whether request blocks
should be freed now or will be sent asynchronously and freed later.
"""
params = request.kv_transfer_params
logger.debug(
"MoriioConnector request_finished, request_status=%s, "
"kv_transfer_params=%s",
request.status,
params,
)
if not params:
return False, None
if params.get("do_remote_prefill"):
# If do_remote_prefill is still True when the request is finished,
# update_state_after_alloc must not have been called (the request
# must have been aborted before it was scheduled).
# To avoid stranding the prefill blocks in the prefill instance,
# we must add empty block_ids to _reqs_need_recv so that our
# worker side will notify and free blocks in the prefill instance.
self._reqs_need_recv[request.request_id] = (request, [])
params["do_remote_prefill"] = False
return False, None
if (
not params.get("do_remote_decode")
or request.status != RequestStatus.FINISHED_LENGTH_CAPPED
):
return False, None
# computed_block_ids = block_ids if all_full else block_ids[:-1]
computed_block_ids = block_ids
# If prompt < block_size, no xfer so free blocks immediately.
delay_free_blocks = len(computed_block_ids) > 0
if delay_free_blocks:
# Prefill request on remote. It will be read from D upon completion
self._reqs_need_send[request.request_id] = (
time.perf_counter()
+ MoRIIOConstants.VLLM_MORI_READ_ABORT_REQUEST_TIMEOUT
)
# If we execute in P-D serial mode, no notification port is needed.
return delay_free_blocks, dict(
do_remote_prefill=True,
do_remote_decode=False,
remote_block_ids=computed_block_ids,
remote_engine_id=self.engine_id,
remote_host=self.host_ip,
remote_port=self.handshake_port,
tp_size=self.vllm_config.parallel_config.tensor_parallel_size,
)
class MoRIIOConnectorWorker:
"""Implementation of Worker side methods"""
def __init__(self, vllm_config: VllmConfig, engine_id: str):
if not is_moriio_available():
raise RuntimeError(
"MoRIIO is not available. Please ensure the 'mori' package "
"is installed and properly configured."
)
self.moriio_config = MoRIIOConfig.from_vllm_config(vllm_config)
self.mode = get_moriio_mode()
logger.info("Initializing MoRIIO worker %s", engine_id)
logging.getLogger("aiter").disabled = True
# Config.
self.vllm_config = vllm_config
assert vllm_config.kv_transfer_config is not None, (
"kv_transfer_config must be set for MoRIIOConnector"
)
self.kv_transfer_config = vllm_config.kv_transfer_config
self.is_producer = self.kv_transfer_config.is_kv_producer
if self.is_producer:
set_role(ROLE.PRODUCER)
else:
set_role(ROLE.CONSUMER)
# mori engine
self._rank = get_world_group().rank
self._local_rank = get_world_group().local_rank
self.tp_rank = self.moriio_config.tp_rank
self.dp_rank = self.moriio_config.dp_rank
self.local_ip = self.moriio_config.local_ip
self.local_kv_port = self.moriio_config.local_kv_port
self.proxy_ip = self.moriio_config.proxy_ip
self.local_ping_port = self.moriio_config.local_ping_port
self.proxy_ping_port = self.moriio_config.proxy_ping_port
self.http_port = self.moriio_config.http_port
self.handshake_port = self.moriio_config.handshake_port
self.notify_port = self.moriio_config.notify_port
self.zmq_context = zmq.Context()
self.metadata_address = (
f"{self.moriio_config.local_ip}:{self.moriio_config.local_ping_port}"
)
self.request_address = (
f"{self.moriio_config.local_ip}:{self.moriio_config.http_port}"
)
self.moriio_engine = None
self._handle_request_thread = None
self._ping_thread = None
self._writer = MoRIIOWriter(self)
role = "producer" if self.is_producer else "consumer"
engine_suffix = (
f"{self.moriio_config.local_ip}:{self.moriio_config.handshake_port}:"
f"tp{self.tp_rank}:dp{self.dp_rank}"
)
self.moriio_engine = IOEngine(
f"{role}:{engine_suffix}",
IOEngineConfig(
self.moriio_config.local_ip, self.moriio_config.local_kv_port
),
)
logger.debug(
"build MORI IOEngine %s (ip=%s port=%s)",
f"{role}:{engine_suffix}",
self.moriio_config.local_ip,
self.moriio_config.local_kv_port,
)
if self._rank == 0 and self.moriio_config.proxy_ip:
self._ping_thread = threading.Thread(
target=self._ping, args=(self.zmq_context,), daemon=True
)
self._ping_thread.start()
logger.info(
"Initializing MoRIIO Engine, engine = %s, role = %s",
self.moriio_engine,
"producer" if self.is_producer else "consumer",
)
# Agent.
self.moriio_wrapper = MoRIIOWrapper(tp_rank=self.tp_rank, dp_rank=self.dp_rank)
self.moriio_wrapper.set_moriio_engine(self.moriio_engine)
self.moriio_wrapper.set_backend_type(BackendType.RDMA)
self.moriio_wrapper.notify_port = self.moriio_config.notify_port
self.local_kv_cache_metadata: list[bytes] = []
self.local_kv_cache_size: list[int] = []
self.layer_name_to_local_kv_cache_metadata: dict[str, list[bytes]] = {}
self.remote_kv_cache_metadata: list[bytes] = []
self.remote_kv_cache_size: list[int] = []
self.layer_name_to_remote_kv_cache_metadata: dict[str, dict[str, list[Any]]] = (
dict()
)
self.remote_moriio_metadata: dict[EngineId, MoRIIOAgentMetadata] = {}
self.slot_size_bytes = 0
self.load_ready_flag: dict[str, bool] = {}
self.write_ready_flags: dict[str, bool] = {}
self.kv_cache_shape = None
self.block_shape = None
self.kv_element_size = 0
# Map of engine_id -> {agent_name0, agent_name1..}.
self._remote_agents: dict[EngineId, set[str]] = {}
self.side_channel_port: int = (
self.moriio_config.handshake_port
+ get_port_offset(self.dp_rank, self.tp_rank)
)
self.engine_id: EngineId = engine_id
self.world_size = get_tensor_model_parallel_world_size()
self.tp_group = get_tp_group()
# KV Caches and moriio tracking data.
self.kv_caches: dict[str, torch.Tensor] = {}
# Map of engine_id -> kv_caches_base_addr. For TP case, each local
# rank will still only pull from a single remote TP worker.
self.kv_caches_base_addr: dict[EngineId, list[int]] = {}
# Number of MoRIIO regions. Currently one region per cache
# (so 1 per layer for MLA, otherwise 2 per layer)
self.num_regions = 0
self.num_layers = 0
# Map of engine_id -> num_blocks. All ranks in the same deployment will
# have the same number of blocks.
self.dst_num_blocks: dict[EngineId, int] = {}
# In progress transfers.
self._recving_transfers: defaultdict[ReqId, list] = defaultdict(list)
self._recving_transfers_callback_addr: dict[ReqId, tuple[str, str]] = {}
# Track the expiration time of requests that are waiting to be sent.
self._reqs_to_send: dict[ReqId, float] = {}
# Background thread for handling new handshake requests.
self._moriio_handshake_listener_t: threading.Thread | None = None
# Background thread for initializing new MoRIIO handshakes.
self._handshake_initiation_executor = ThreadPoolExecutor(
# MoRIIO is not guaranteed to be thread-safe, limit 1 worker.
max_workers=1,
thread_name_prefix="vllm-moriio-handshake-initiator",
)
self._ready_requests = queue.Queue[tuple[ReqId, ReqMeta]]()
self._handshake_futures: dict[EngineId, Future[set[str]]] = {}
# Protects _handshake_futures and _remote_agents.
self._handshake_lock = threading.RLock()
self.block_size = vllm_config.cache_config.block_size
self.model_config = vllm_config.model_config
self.cache_config = vllm_config.cache_config
self.block_window_per_layer: list[int | None] = []
self.use_mla = self.model_config.use_mla
self.built_session = False
self.built_write_session: defaultdict[str, list] = defaultdict(list)
backend = get_attn_backend(
self.model_config.get_head_size(),
self.model_config.dtype,
self.cache_config.cache_dtype,
self.block_size,
use_mla=self.use_mla,
)
# TODO: consider the integration of flashinfer or other backends.
self.backend_name = backend.get_name()
logger.debug("Detected attention backend %s", self.backend_name)
def schedule_write_blocks(
self,
request_id: str,
dst_engine_id: str,
local_block_ids: list[int],
remote_block_ids: list[int] | None,
layer_name: str,
kv_layer: torch.Tensor,
remote_notify_port: int,
remote_ip: str,
) -> None:
"""Schedule a block write operation.
Args:
request_id: Unique identifier for the request
dst_engine_id: Destination engine ID
local_block_ids: Local block IDs to transfer
remote_block_ids: Hint for remote block IDs
layer_name: Name of the layer
kv_layer: KV cache tensor
remote_notify_port: Port for completion notification
remote_ip: IP address of remote node
"""
# synchronization to prevent dirty reads between
# transfer and attention operations
# we can consider removing this synchronization after ibgda is enabled.
# when mori-io supports ibgda functionality
stream = torch.cuda.current_stream()
event = torch.cuda.Event()
event.record(stream)
task = WriteTask(
request_id=request_id,
dst_engine_id=dst_engine_id,
local_block_ids=local_block_ids,
remote_block_ids_hint=remote_block_ids,
layer_name=layer_name,
event=event,
remote_notify_port=remote_notify_port,
remote_ip=remote_ip,
)
self._writer.schedule_write(task)
def _get_built_session(self, remote_engine_id):
if remote_engine_id not in self.built_write_session:
cur_remote_engine_sessions = []
for ln, local_meta in self.layer_name_to_local_kv_cache_metadata.items():
unpacked_local_memory_meta = (
self.moriio_wrapper.get_unpack_memory_metadata(local_meta[0])
)
unpacked_remote_memory_meta = (
self.moriio_wrapper.get_unpack_memory_metadata(
self.layer_name_to_remote_kv_cache_metadata[remote_engine_id][
ln
][0]
)
)
cur_remote_engine_sessions.append(
self.moriio_wrapper.build_session(
unpacked_local_memory_meta, unpacked_remote_memory_meta
)
)
self.built_write_session[remote_engine_id] = cur_remote_engine_sessions
return self.built_write_session[remote_engine_id], self.remote_moriio_metadata[
remote_engine_id
]
def _ping(self, zmq_context):
http_request_address = f"http://{self.request_address}/v1/completions"
role = "P" if self.is_producer else "D"
retry_count = 0
index = 1
with zmq_context.socket(zmq.DEALER) as sock:
sock.connect(f"tcp://{self.proxy_ip}:{self.proxy_ping_port}")
while True:
try:
data = {
"type": "register",
"role": role,
"index": str(index),
"request_address": http_request_address,
"handshake_port": self.handshake_port,
"notify_port": self.notify_port,
"dp_size": self.moriio_config.dp_size,
"tp_size": self.moriio_config.tp_size,
"transfer_mode": self.mode.name,
}
sock.send(msgpack.dumps(data))
# logger.debug(f"Successfully sent ping message #{index}")
retry_count = 0
except ConnectionRefusedError:
logger.info(
"Connection refused: %s:%s -> %s:%s",
self.local_ip,
self.local_ping_port,
self.proxy_ip,
self.proxy_ping_port,
)
retry_count += 1
except OSError as e:
logger.info("OS error when sending ping: %s", e)
retry_count += 1
except Exception as e:
logger.info("Unexpected error when sending ping: %s", e)
retry_count += 1
if retry_count >= MoRIIOConstants.MAX_PING_RETRIES:
logger.error(
"Max retries (%s) exceeded. Stopping ping loop.",
MoRIIOConstants.MAX_PING_RETRIES,
)
raise RuntimeError(
f"Ping failed after {retry_count} retries"
) from e
finally:
time.sleep(MoRIIOConstants.PING_INTERVAL)
index += 1
def shutdown(self):
if hasattr(self, "moriio_wrapper") and self.moriio_wrapper:
self.moriio_wrapper.shutdown()
if hasattr(self, "_handshake_initiation_executor"):
self._handshake_initiation_executor.shutdown(wait=False)
if (
hasattr(self, "_moriio_handshake_listener_t")
and self._moriio_handshake_listener_t
):
self._moriio_handshake_listener_t.join(timeout=0)
if hasattr(self, "zmq_context") and self.zmq_context:
self.zmq_context.destroy(linger=0)
self.zmq_context = None
def __del__(self):
self.shutdown()
@staticmethod
def _moriio_handshake_listener(
metadata: MoRIIOAgentMetadata,
ready_event: threading.Event,
base_port: int,
tp_rank: int,
dp_rank: int,
layer_name_to_local_kv_cache_metadata: dict,
):
"""Background thread for getting new MoRIIO handshakes."""
encoder = msgspec.msgpack.Encoder()
encoded_data = encoder.encode(metadata)
size_in_bytes = len(encoded_data)
logger.debug(
"Size of encoded MoRIIOAgentMetadata: %s bytes", str(size_in_bytes)
)
# Listen for new requests for metadata.
host = "*"
path = make_zmq_path("tcp", host, base_port)
logger.debug("mori handshake starting listening on path: %s", path)
with zmq_ctx(zmq.ROUTER, path) as sock:
ready_event.set()
while True:
identity, msg = sock.recv_multipart()
if (
msg != MoRIIOConstants.GET_META_MSG
and msg != MoRIIOConstants.POP_DONE_RECV
):
logger.error("Connection listener got unexpected message")
raise HandshakeError("handshake failed, unexpected msg type")
elif msg == MoRIIOConstants.GET_META_MSG:
sock.send_multipart(
(identity, b"", encoded_data)
) # send local mori io engine meta data
logger.debug("MoRIIO handshake listener sent metadata")
# now we send tensor meta data for each block
buf = msgpack.dumps(layer_name_to_local_kv_cache_metadata)
sock.send_multipart((identity, b"", buf))
elif msg == MoRIIOConstants.POP_DONE_RECV:
_, req_id = sock.recv_multipart()
logger.debug(
"MoRIIO handshake listener received done recv for req",
req_id.decode(),
)
def _moriio_handshake(
self,
host: str,
port: int,
remote_tp_size: int,
expected_engine_id: str,
remote_dp_rank: int = 0,
) -> set[str]:
"""Do a MoRIIO handshake with a remote instance."""
start_time = time.perf_counter()
# NOTE(rob): we need each rank to have a unique port. This is
# a hack to keep us moving. We will switch when moving to etcd
# or where we have a single ZMQ socket in the scheduler.
port_offset = get_port_offset(remote_dp_rank, self.tp_rank)
path = make_zmq_path("tcp", host, port + port_offset)
logger.debug("handshake Querying metadata on path: %s", path)
# Send query for the request.
with zmq_ctx(zmq.DEALER, path) as sock:
logger.debug("prepare send msg INSTAZNCE: %s", path)
sock.send(MoRIIOConstants.GET_META_MSG)
received_frame = sock.recv_multipart()
if len(received_frame) != 2 or received_frame[0] != b"":
raise HandshakeError(f"Unexpected frame! {received_frame = }")
metadata_bytes = received_frame[1]
decoder = msgspec.msgpack.Decoder(MoRIIOAgentMetadata)
metadata = decoder.decode(metadata_bytes)
got_metadata_time = time.perf_counter()
logger.info(
"MoRIIO handshake: get metadata took: %s",
got_metadata_time - start_time,
)
self.moriio_wrapper.remote_engine_ip = host
remote_agent_name = self.moriio_wrapper.register_remote_engine(
metadata.agent_metadata
)
logger.debug(
"MoRIIO handshake: registered"
"remote agent %s for engine ID %s, path = %s",
remote_agent_name,
expected_engine_id,
path,
)
if len(self.local_kv_cache_metadata) > 0:
logger.warning(
"len(self.local_kv_cache_metadata) = %s,"
"maybe you didnt clear this buffer correctly",
len(self.local_kv_cache_metadata),
)
self.local_kv_cache_metadata = []
if len(self.remote_kv_cache_metadata) > 0:
logger.warning(
"len(self.remote_kv_cache_metadata) = %s,"
"maybe you didnt clear this buffer correctly",
len(self.remote_kv_cache_metadata),
)
self.remote_kv_cache_metadata = []
received_frame = sock.recv_multipart()
if len(received_frame) != 2 or received_frame[0] != b"":
raise HandshakeError(f"unexpected frame! {received_frame = }")
buf = received_frame[1]
self.layer_name_to_remote_kv_cache_metadata[expected_engine_id] = (
msgpack.loads(buf)
)
self.remote_moriio_metadata[expected_engine_id] = metadata
setup_agent_time = time.perf_counter()
logger.debug(
"MoRIIO handshake: add agent took: %s",
setup_agent_time - got_metadata_time,
)
return {remote_agent_name}
def _background_moriio_handshake(
self, req_id: str, remote_engine_id: EngineId, meta: ReqMeta
):
# Do MoRIIO handshake in background and add to _ready_requests when done.
fut = None
if remote_engine_id is not None:
fut = self._handshake_futures.get(remote_engine_id)
if fut is None:
host = meta.remote_host
port = int(meta.remote_handshake_port)
tp_size = int(meta.tp_size)
remote_dp_size = int(meta.remote_dp_size)
def request_ready(_f: Future[Any], entry=(req_id, meta)):
logger.info("MoRIIO handshake done for request %s", req_id)
self._ready_requests.put(entry)
self.load_ready_flag[remote_engine_id] = True
self.write_ready_flags[remote_engine_id] = True
fut_list = []
# In dp(prefill)<->dp(decode) communication, we require an all-to-all handshake.
for cur_dp_rank in range(remote_dp_size):
dp_engine_id = self.get_engine_name_with_dp(remote_engine_id, cur_dp_rank)
future = self._handshake_initiation_executor.submit(
self._moriio_handshake, host, port, tp_size, dp_engine_id, cur_dp_rank
)
fut_list.append(future)
def done_callback(f: Future[set[str]], eid=dp_engine_id):
with self._handshake_lock:
self._handshake_futures.pop(eid, None)
try:
self._remote_agents[eid] = f.result()
except Exception:
logger.exception("Handshake with %s failed", eid)
future.add_done_callback(done_callback)
self._handshake_futures[dp_engine_id] = future
# fut = fut_list
def wait_all_dp():
for future in fut_list:
future.result()
return True
all_done_future = self._handshake_initiation_executor.submit(wait_all_dp)
all_done_future.add_done_callback(request_ready)
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
"""Register the KV Cache data in moriio."""
_, first_kv_cache = next(iter(kv_caches.items()))
kv_elem_size = first_kv_cache.element_size()
use_mla = len(first_kv_cache.shape) == 3
assert use_mla == self.use_mla
if use_mla:
# MLA case.
self.num_blocks = first_kv_cache.shape[0]
block_rank = 2 # [block_size, latent_dim]
block_shape = first_kv_cache.shape[-block_rank:]
block_size, kv_latent_dim = block_shape
self.slot_size_bytes = kv_elem_size * kv_latent_dim
else:
# [2 (k and v), num_blocks, ...]
self.num_blocks = first_kv_cache.shape[1]
block_rank = 3 # [block_size, kv_heads, head_dim]
block_shape = first_kv_cache.shape[-block_rank:]
block_size, n_kv_heads, head_dim = block_shape[-3:]
# head size in bytes.
self.slot_size_bytes = (
kv_elem_size * n_kv_heads * head_dim
) # 1 token 1 layer size , slot size
assert block_size == self.block_size
# TODO(tms): self.block_len needs to be per-layer for sliding window,
# hybrid attn, etc
# block size in bytes
self.block_len = kv_elem_size * math.prod(block_shape)
self.kv_cache_shape = first_kv_cache.shape
self.block_shape = block_shape
self.kv_element_size = kv_elem_size
self.dst_num_blocks[self.engine_id] = self.num_blocks
self.kv_caches = kv_caches # layer name to kv cache
kv_caches_base_addr = []
caches_data = []
for cache_or_caches in kv_caches.values():
cache_list = [cache_or_caches] if use_mla else cache_or_caches
for cache in cache_list:
base_addr = cache.data_ptr()
region_len = self.num_blocks * self.block_len
caches_data.append((base_addr, region_len, cache.device.index, ""))
kv_caches_base_addr.append(base_addr)
for layer_name, kv_cache in kv_caches.items():
if layer_name not in self.layer_name_to_local_kv_cache_metadata:
self.layer_name_to_local_kv_cache_metadata[layer_name] = []
moriio_mem_metadata = self.moriio_wrapper.register_local_tensor(kv_cache)
self.layer_name_to_local_kv_cache_metadata[layer_name].append(
moriio_mem_metadata
)
self.local_kv_cache_size.append(cache.nelement() * cache.element_size())
self.kv_caches_base_addr[self.engine_id] = kv_caches_base_addr
self.num_regions = len(caches_data)
self.num_layers = len(self.kv_caches.keys())
# Optimization for models with local attention (Llama 4)
if self.vllm_config.model_config.hf_config.model_type == "llama4":
from transformers import Llama4TextConfig
assert isinstance(
self.vllm_config.model_config.hf_text_config, Llama4TextConfig
)
llama4_config = self.vllm_config.model_config.hf_text_config
no_rope_layers = llama4_config.no_rope_layers
chunk_size = llama4_config.attention_chunk_size
chunk_block_size = math.ceil(chunk_size / self.block_size)
for layer_idx in range(self.num_layers):
# no_rope_layers[layer_idx] == 0 means NoPE (global)
# Any other value means RoPE (local chunked)
is_local_attention = no_rope_layers[layer_idx] != 0
block_window = chunk_block_size if is_local_attention else None
self.block_window_per_layer.append(block_window)
logger.debug(
"Llama 4 block window per layer mapping: %s",
self.block_window_per_layer,
)
assert len(self.block_window_per_layer) == self.num_layers
metadata = MoRIIOAgentMetadata(
engine_id=self.engine_id,
agent_metadata=self.moriio_wrapper.get_agent_metadata(),
kv_caches_base_addr=self.kv_caches_base_addr[self.engine_id],
num_blocks=self.num_blocks,
block_len=self.block_len,
attn_backend_name=self.backend_name,
)
ready_event = threading.Event()
self._moriio_handshake_listener_t = threading.Thread(
target=self._moriio_handshake_listener,
args=(
metadata,
ready_event,
self.side_channel_port,
self.tp_rank,
self.dp_rank,
self.layer_name_to_local_kv_cache_metadata,
),
daemon=True,
name="moriio_handshake_listener",
)
self._moriio_handshake_listener_t.start()
ready_event.wait() # Wait for listener ZMQ socket to be ready.
self.moriio_wrapper.async_wait_reqid()
def get_finished(self) -> tuple[set[str], set[str]]:
"""
Get requests that are done sending or recving on this specific worker.
The scheduler process (via the MultiprocExecutor) will use this output
to track which workers are done.
"""
done_sending, done_recving = set(), set()
if self.is_producer:
done_sending = self.moriio_wrapper.pop_finished_req_ids()
else:
if self.mode == MoRIIOMode.WRITE:
done_recving = self.moriio_wrapper.pop_finished_write_req_ids()
else:
done_recving = self._pop_done_transfers()
return done_sending, done_recving
def _pop_done_transfers(self) -> set[str]:
done_req_ids: set[str] = set()
with self.moriio_wrapper.lock:
to_remove = []
for req_id, status_list in self._recving_transfers.items():
if status_list[-1].Succeeded():
done_req_ids.add(req_id)
self.moriio_wrapper.send_notify(
req_id,
self._recving_transfers_callback_addr[req_id][0],
self._recving_transfers_callback_addr[req_id][1],
)
to_remove.append(req_id)
for req_id in to_remove:
del self._recving_transfers[req_id]
del self._recving_transfers_callback_addr[req_id]
return done_req_ids
def save_kv_layer(
self,
metadata: MoRIIOConnectorMetadata,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: "AttentionMetadata",
**kwargs,
):
if not self.is_producer:
return
if self.mode == MoRIIOMode.READ:
return
remote_engine_id = None
for req_id, meta in metadata.reqs_to_save.items():
# we only need to check if dp0 in rank
remote_engine_id = (
str(meta.remote_host) + ":" + str(meta.remote_handshake_port)
)
meta.remote_engine_id = remote_engine_id
dp0_remote_engine_id = self.get_engine_name_with_dp(remote_engine_id, 0)
if dp0_remote_engine_id not in self._remote_agents:
# Initiate handshake with remote engine to exchange metadata.
with self._handshake_lock:
if remote_engine_id not in self._remote_agents:
self._background_moriio_handshake(
req_id, remote_engine_id, meta
)
continue
self._write_blocks_for_req(req_id, meta, layer_name, kv_layer)
while True:
if (
self._ready_requests.empty()
and remote_engine_id not in self.write_ready_flags
):
continue
elif not self._ready_requests.empty() and (
remote_engine_id in self.write_ready_flags
):
self._write_blocks_for_req(
*self._ready_requests.get_nowait(), layer_name, kv_layer
)
break
else:
break
def get_engine_name_with_dp(self, engine_name, dp_rank):
return f"{engine_name}_dp{dp_rank}"
def start_load_kv(self, metadata: MoRIIOConnectorMetadata):
"""
Start loading by triggering non-blocking moriio_xfer.
We check for these trnxs to complete in each step().
"""
if self.is_producer:
self.moriio_wrapper.async_wait_reqid()
return
if self.mode == MoRIIOMode.WRITE:
return
wait_handshake_readd_req = False
remote_engine_id = None
for req_id, meta in metadata.reqs_to_recv.items():
remote_engine_id = (
str(meta.remote_host) + ":" + str(meta.remote_handshake_port)
)
meta.remote_engine_id = remote_engine_id
dp0_remote_engine_id = self.get_engine_name_with_dp(remote_engine_id, 0)
if dp0_remote_engine_id not in self._remote_agents:
# Initiate handshake with remote engine to exchange metadata.
with self._handshake_lock:
if remote_engine_id not in self._remote_agents:
self._background_moriio_handshake(
req_id, remote_engine_id, meta
)
wait_handshake_readd_req = True
continue
# Handshake already completed, start async read xfer.
self._read_blocks_for_req(req_id, meta)
# Start transfers for requests whose handshakes have now finished.
while True:
if (
self._ready_requests.empty()
and remote_engine_id not in self.load_ready_flag
and wait_handshake_readd_req
):
continue
elif (
not self._ready_requests.empty()
and remote_engine_id in self.load_ready_flag
):
self._read_blocks_for_req(*self._ready_requests.get_nowait())
break
else:
break
self._reqs_to_send.update(metadata.reqs_to_send)
def _read_blocks_for_req(self, req_id: str, meta: ReqMeta):
logger.debug(
"Remote agent %s available, calling _read_blocks for req %s",
meta.remote_engine_id,
req_id,
)
self._read_blocks(
request_id=req_id,
dst_engine_id=meta.remote_engine_id,
local_block_ids=meta.local_block_ids,
remote_block_ids=meta.remote_block_ids,
remote_host=meta.remote_host,
remote_notify_port=meta.remote_notify_port,
)
def _write_blocks_for_req(self, req_id: str, meta: ReqMeta, layer_name, kv_layer):
self.schedule_write_blocks(
request_id=req_id,
dst_engine_id=meta.remote_engine_id,
local_block_ids=meta.local_block_ids,
remote_block_ids=meta.remote_block_ids,
layer_name=layer_name,
kv_layer=kv_layer,
remote_notify_port=meta.remote_notify_port,
remote_ip=meta.remote_host,
)
def _is_last_layer(self, layer_name):
return layer_name == list(self.kv_caches.keys())[-1]
def merge_contiguous_blocks(
self,
offsets_local: list[int],
offsets_remote: list[int],
sizes: list[int],
assume_sorted: bool = False,
) -> tuple[list[int], list[int], list[int]]:
n = len(offsets_local)
if n == 0:
return [], [], []
if not (n == len(offsets_remote) == len(sizes)):
raise ValueError("Input list lengths mismatch")
local_arr = np.fromiter(offsets_local, dtype=np.int64, count=n)
remote_arr = np.fromiter(offsets_remote, dtype=np.int64, count=n)
sizes_arr = np.fromiter(sizes, dtype=np.int64, count=n)
if assume_sorted:
local_sorted = local_arr
remote_sorted = remote_arr
sizes_sorted = sizes_arr
else:
if np.all(local_arr[:-1] <= local_arr[1:]):
local_sorted = local_arr
remote_sorted = remote_arr
sizes_sorted = sizes_arr
else:
sort_idx = np.argsort(local_arr, kind="stable")
local_sorted = local_arr[sort_idx]
remote_sorted = remote_arr[sort_idx]
sizes_sorted = sizes_arr[sort_idx]
if n == 1:
return (
[int(local_sorted[0])],
[int(remote_sorted[0])],
[int(sizes_sorted[0])],
)
diff_local = local_sorted[1:] - local_sorted[:-1]
diff_remote = remote_sorted[1:] - remote_sorted[:-1]
prev_size = sizes_sorted[:-1]
contiguous = (diff_local == prev_size) & (diff_remote == prev_size)
if not contiguous.any():
return local_sorted.tolist(), remote_sorted.tolist(), sizes_sorted.tolist()
if contiguous.all():
total_size = int(sizes_sorted.sum())
return [int(local_sorted[0])], [int(remote_sorted[0])], [total_size]
break_positions = np.flatnonzero(~contiguous) + 1
segment_starts = np.concatenate(([0], break_positions))
segment_ends = np.concatenate((break_positions, [n]))
seg_count = len(segment_starts)
merged_local = [0] * seg_count
merged_remote = [0] * seg_count
merged_sizes = [0] * seg_count
for si in range(seg_count):
s = segment_starts[si]
e = segment_ends[si]
merged_local[si] = int(local_sorted[s])
merged_remote[si] = int(remote_sorted[s])
merged_sizes[si] = int(
local_sorted[e - 1] + sizes_sorted[e - 1] - local_sorted[s]
)
return merged_local, merged_remote, merged_sizes
def _compute_block_transfer_offsets(
self,
layer_name: str,
local_block_ids: list[int],
remote_block_ids: list[int],
remote_moriio_meta: MoRIIOAgentMetadata,
) -> tuple[list[int], list[int], list[int]]:
"""Compute transfer offsets for block data.
Args:
layer_name: Name of the layer to transfer
local_block_ids: IDs of local blocks
remote_block_ids: IDs of remote blocks
remote_moriio_meta: Metadata of the remote MoRIIO agent
Returns:
Tuple of (local_offsets, remote_offsets, transfer_sizes)
"""
assert self.kv_cache_shape is not None, "KV caches shape not initialized"
is_mla = len(self.kv_cache_shape) == 3
stride = self.kv_caches[layer_name].stride()
sz = self.kv_caches[layer_name].element_size()
if is_mla:
blknum, blksize, hs = self.kv_cache_shape
hn = 1
block_stride = stride[0]
else:
_, blknum, blksize, hn, hs = self.kv_cache_shape
local_ktov_stride = stride[0]
block_stride = stride[1]
remote_ktov_stride = block_stride * remote_moriio_meta.num_blocks
transfer_size_byte = blksize * hn * hs * sz
per_block = 1 if is_mla else 2
total = len(local_block_ids) * per_block
offset_local = [0] * total
offset_remote = [0] * total
sizes = [transfer_size_byte] * total
w = 0
for i, lb in enumerate(local_block_ids):
rb = remote_block_ids[i]
# K
offset_local[w] = sz * (lb * block_stride)
offset_remote[w] = sz * (rb * block_stride)
w += 1
if not is_mla:
# V
# Handle num_block variations originating from PD (different kv strides)
# TODO: address block_sz differences in heterogeneous TP scenarios
# In MLA, we don't need to consider these two cases.
offset_local[w] = sz * (1 * local_ktov_stride + lb * block_stride)
offset_remote[w] = sz * (1 * remote_ktov_stride + rb * block_stride)
w += 1
merged_l, merged_r, merged_s = self.merge_contiguous_blocks(
offset_local, offset_remote, sizes, assume_sorted=False
)
return merged_l, merged_r, merged_s
def _read_blocks(
self,
local_block_ids: list[int],
remote_block_ids: list[int],
dst_engine_id: str,
request_id: str,
remote_host: str,
remote_notify_port: int,
) -> None:
if self.mode == MoRIIOMode.WRITE:
return
dp0_engine_id = self.get_engine_name_with_dp(dst_engine_id, 0)
sessions, remote_moriio_meta = self._get_built_session(dp0_engine_id)
first_layer = list(self.layer_name_to_local_kv_cache_metadata.keys())[0]
offs = self._compute_block_transfer_offsets(
first_layer, local_block_ids, remote_block_ids, remote_moriio_meta
)
for layer_name in self.layer_name_to_local_kv_cache_metadata:
sess_idx = list(self.layer_name_to_local_kv_cache_metadata.keys()).index(
layer_name
)
# TODO : apply multi-session batch-read when moriio support it
transfer_status = self.moriio_wrapper.read_remote_data(
offs[2], offs[0], offs[1], sessions[sess_idx]
)
with self.moriio_wrapper.lock:
self._recving_transfers[request_id].append(transfer_status)
self._recving_transfers_callback_addr[request_id] = (
remote_host,
str(remote_notify_port + self.tp_rank),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py",
"license": "Apache License 2.0",
"lines": 1309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import threading
from typing import TYPE_CHECKING, Any
from weakref import ref as weakref_ref
import msgpack
import torch
import zmq
from vllm import envs
from vllm.logger import init_logger
from vllm.utils.network_utils import (
make_zmq_path,
make_zmq_socket,
)
if TYPE_CHECKING:
pass
from queue import Empty, Queue
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_common import (
ROLE,
HandshakeError,
LayerTransferPlan,
MoRIIOAgentMetadata,
MoRIIOConstants,
MoRIIOError,
RemoteAllocInfo,
TransferError,
WriteTask,
get_port_offset,
get_role,
zmq_ctx,
)
if TYPE_CHECKING:
from vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector import (
MoRIIOConnectorWorker,
)
logger = init_logger(__name__)
try:
from mori.io import (
EngineDesc,
IOEngine,
MemoryDesc,
PollCqMode,
RdmaBackendConfig,
)
logger.info("MoRIIO is available")
except ImportError:
logger.error("MoRIIO is not available")
"""Write task execution logic for MoRIIO connector."""
class MoRIIOWriter:
"""Handles write operations for KV cache transfers.
Implements distributed KV cache transfer using the MoRIIO library
for RDMA-based communication between prefill and decode instances."""
def __init__(self, worker: "MoRIIOConnectorWorker"):
"""Initialize the writer.
Args:
worker: Reference to the parent worker
"""
self._worker_ref: weakref_ref[MoRIIOConnectorWorker] = weakref_ref(worker)
self._write_task_q: Queue[WriteTask] = Queue()
self._write_worker_started = False
self._write_worker_lock = threading.Lock()
self._deferred_tasks: list[WriteTask] = []
@property
def worker(self) -> "MoRIIOConnectorWorker":
"""Get the worker instance.
Returns:
The parent worker instance
Raises:
RuntimeError: If worker has been garbage collected
"""
worker = self._worker_ref()
if worker is None:
raise RuntimeError("Parent worker has been garbage collected")
return worker
def ensure_worker_started(self) -> None:
"""Ensure the background write worker is running."""
if self._write_worker_started:
return
self._write_worker_started = True
with self._write_worker_lock:
thread = threading.Thread(
target=self._write_worker_loop, daemon=True, name="moriio-write-worker"
)
thread.start()
logger.info("Started MoRIIO write worker thread")
def schedule_write(self, task: WriteTask) -> None:
"""Schedule a write task.
Args:
task: The write task to schedule
"""
self.ensure_worker_started()
self._write_task_q.put(task)
def _write_worker_loop(self) -> None:
"""Main loop for the write worker thread."""
while True:
# Process deferred tasks first
self._process_deferred_tasks()
# Get new task
try:
task = self._write_task_q.get(timeout=0.01)
except Empty:
continue
# Check if remote blocks are ready
if not self._is_remote_ready(task):
# task.retry_count += 1
self._deferred_tasks.append(task)
# logger.debug(
# "Deferred task for request %s (retry %d)",
# task.request_id, task.retry_count
# )
continue
# Execute the task
self._execute_write_task(task)
def _process_deferred_tasks(self) -> None:
"""Process tasks that were previously deferred."""
if not self._deferred_tasks:
return
still_deferred: list[WriteTask] = []
for task in self._deferred_tasks:
if self._is_remote_ready(task):
self._execute_write_task(task)
else:
still_deferred.append(task)
self._deferred_tasks = still_deferred
def _is_remote_ready(self, task: WriteTask) -> bool:
"""Check if remote blocks are allocated for this task.
Args:
task: The write task
Returns:
True if remote blocks are ready
"""
return (
task.request_id in self.worker.moriio_wrapper.done_remote_allocate_req_dict
)
def _get_remote_alloc_info(self, request_id: str) -> RemoteAllocInfo:
"""Get remote allocation info for a request.
Args:
request_id: The request ID
Returns:
Remote allocation information
Raises:
KeyError: If allocation info is missing
"""
try:
return self.worker.moriio_wrapper.done_remote_allocate_req_dict[request_id]
except KeyError as e:
raise KeyError(
f"Remote allocation info missing for request {request_id}"
) from e
def _execute_write_task(self, task: WriteTask) -> None:
"""Execute a single write task.
Args:
task: The write task to execute
"""
# Get remote allocation info
request_info = self._get_remote_alloc_info(task.request_id)
if request_info.block_ids is None:
logger.debug("Request %s remote block IDs not ready", task.request_id)
return
# Wait for CUDA event
# The attention computation of the current layer cannot
# overlap with the kv transfer task,
# otherwise it will cause precision issues.
# This event is used to synchronize the kv transfer and computation tasks.
task.event.synchronize()
# Update engine ID with DP rank
task.dst_engine_id = self.worker.get_engine_name_with_dp(
task.dst_engine_id, request_info.decode_dp_rank
)
# Get or create sessions
sessions, remote_moriio_meta = self.worker._get_built_session(
task.dst_engine_id
)
# Prepare transfer plan
plan = self._prepare_transfer_plan(task, request_info, remote_moriio_meta)
# Execute transfer
self._do_layer_write(plan, sessions)
# Finalize if all layers complete
self._finalize_if_complete(task, request_info)
def _prepare_transfer_plan(
self,
task: WriteTask,
request_info: RemoteAllocInfo,
remote_moriio_meta: MoRIIOAgentMetadata,
) -> LayerTransferPlan:
"""Prepare the transfer plan for a layer.
Args:
task: The write task
request_info: Remote allocation information
Returns:
The transfer plan
"""
# Compute offsets if not cached
if request_info.transfer_offset is None:
offsets = self.worker._compute_block_transfer_offsets(
task.layer_name,
task.local_block_ids,
request_info.block_ids,
remote_moriio_meta,
)
request_info.transfer_offset = offsets
# Get session index
layer_names = list(self.worker.layer_name_to_local_kv_cache_metadata.keys())
sess_idx = layer_names.index(task.layer_name)
local_off, remote_off, sizes = request_info.transfer_offset
return LayerTransferPlan(
request_id=task.request_id,
layer_name=task.layer_name,
sess_idx=sess_idx,
transfer_local_offsets=local_off,
transfer_remote_offsets=remote_off,
transfer_sizes=sizes,
use_batch=True,
)
def _do_layer_write(self, plan: LayerTransferPlan, sessions: list) -> None:
"""Perform the actual layer write.
Args:
plan: The transfer plan
sessions: List of transfer sessions
"""
if plan.use_batch:
self.worker.moriio_wrapper.write_remote_data(
plan.transfer_sizes,
plan.transfer_local_offsets,
plan.transfer_remote_offsets,
sessions[plan.sess_idx],
)
else:
for i in range(len(plan.transfer_local_offsets)):
self.worker.moriio_wrapper.write_remote_data_single(
plan.transfer_sizes[i],
plan.transfer_local_offsets[i],
plan.transfer_remote_offsets[i],
plan.sess_idx,
)
def _finalize_if_complete(
self, task: WriteTask, request_info: RemoteAllocInfo
) -> None:
"""Finalize transfer if all layers are complete.
Args:
task: The write task
request_info: Remote allocation information
"""
request_info.writes_done += 1
if request_info.writes_done >= self.worker.num_layers:
# Wait for transfer to complete
self.worker.moriio_wrapper.waiting_for_transfer_complete()
remote_port = task.remote_notify_port + get_port_offset(
request_info.decode_dp_rank, self.worker.tp_rank
)
# Consider using RDMA immediate data in decode side
# to eliminate the need for this notification.
# Consider including the first gen token from prefill in the notification
# Send completion notification
self.worker.moriio_wrapper.send_notify(
task.request_id, task.remote_ip, remote_port
)
# mark request as done, then we can free the blocks
with self.worker.moriio_wrapper.lock:
self.worker.moriio_wrapper.done_req_ids.append(task.request_id)
del self.worker.moriio_wrapper.done_remote_allocate_req_dict[
task.request_id
]
logger.debug(
"Completed transfer for request %s, notified port %d",
task.request_id,
remote_port,
)
class MoRIIOWrapper:
"""Wrapper for MoRIIO engine operations.
Handles both producer and consumer roles for KV cache transfers.
Args:
moriio_engine: MoRIIO engine instance
tp_rank: Tensor parallel rank
dp_rank: Data parallel rank
"""
def __init__(
self,
moriio_engine: "IOEngine | None" = None,
tp_rank: int = 0,
dp_rank: int = 0,
):
self.tp_rank = tp_rank
self.dp_rank = dp_rank
self.moriio_engine = moriio_engine
self.remote_memory_metadata = None
self.local_memory_registered = False
self.local_memory_metadata = None
self.transfer_status: list[Any] = []
self.remote_engine_ip: str | None = None
self.notify_port: int | None = None
self.lock = threading.Lock()
self.done_req_ids: list[str] = []
self.done_remote_allocate_req_dict: dict[str, RemoteAllocInfo] = {}
self.done_write_cache_req_ids: list[str] = []
self.notify_thread: threading.Thread | None = None
self.sessions: list[IOEngine.Session] = []
self.paths: dict[str, zmq.Socket] = {}
def set_moriio_engine(self, moriio_engine):
assert moriio_engine is not None, (
"You Cannot pass None engine to MoRIIOWrapper!"
)
self.moriio_engine = moriio_engine
def set_backend_type(self, backend_type):
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
qp_per_transfer = envs.VLLM_MORIIO_QP_PER_TRANSFER
post_batch_size = envs.VLLM_MORIIO_POST_BATCH_SIZE
num_worker_threads = envs.VLLM_MORIIO_NUM_WORKERS
poll_mode = PollCqMode.POLLING
rdma_cfg = RdmaBackendConfig(
qp_per_transfer,
post_batch_size,
num_worker_threads,
poll_mode,
)
self.moriio_engine.create_backend(backend_type, rdma_cfg)
def get_agent_metadata(self):
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
engine_metadata = self.moriio_engine.get_engine_desc()
engine_metadata_packed = engine_metadata.pack()
return engine_metadata_packed
def register_remote_engine(self, remote_packed_engine_metadata):
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
consumer_engine_metadata = EngineDesc.unpack(remote_packed_engine_metadata)
self.moriio_engine.register_remote_engine(consumer_engine_metadata)
return consumer_engine_metadata.key
def register_local_tensor(self, tensor: torch.Tensor):
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
try:
self.local_memory_metadata = self.moriio_engine.register_torch_tensor(
tensor
)
assert self.local_memory_metadata is not None, (
"register_torch_tensor returned None"
)
local_memory_metadata_packed = self.local_memory_metadata.pack()
except Exception as e:
raise MoRIIOError(f"Failed to register local memory: {e}") from e
self.local_memory_registered = True
return local_memory_metadata_packed
def get_unpack_memory_metadata(self, packed_memory_metadata):
return MemoryDesc.unpack(packed_memory_metadata)
def build_session(self, local_memory_metadata, remote_memory_metadata):
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
return self.moriio_engine.create_session(
local_memory_metadata, remote_memory_metadata
)
def read_remote_data(
self, transfer_size_byte, local_offset=0, remote_offset=0, session=None
):
assert self.local_memory_registered, "You have not register local memory data!"
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
transfer_status = session.batch_read(
local_offset,
remote_offset,
transfer_size_byte,
self.moriio_engine.allocate_transfer_uid(),
)
return transfer_status
def write_remote_data(
self, transfer_size_byte, local_offset=0, remote_offset=0, session=None
):
assert self.local_memory_registered, "You have not register local memory data!"
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
write_uid = self.moriio_engine.allocate_transfer_uid()
transfer_status = session.batch_write(
local_offset, remote_offset, transfer_size_byte, write_uid
)
with self.lock:
self.transfer_status.append(transfer_status)
def write_remote_data_single(
self, transfer_size_byte, local_offset=0, remote_offset=0, sess_idx=0
):
assert self.local_memory_registered, "You have not register local memory data!"
assert self.moriio_engine is not None, "MoRIIO engine must be set first"
transfer_status = self.sessions[sess_idx].write(
local_offset,
remote_offset,
transfer_size_byte,
self.moriio_engine.allocate_transfer_uid(),
)
with self.lock:
self.transfer_status.append(transfer_status)
def waiting_for_transfer_complete(self):
if not self.transfer_status:
return
transfers_to_wait = []
with self.lock:
transfers_to_wait = self.transfer_status[:]
self.transfer_status.clear()
for status in transfers_to_wait:
try:
status.Wait()
if not status.Succeeded():
logger.error(
"Transfer failed: %s, Code: %s", status.Message(), status.Code()
)
raise TransferError("MoRIIO transfer failed!")
except Exception as e:
logger.error("Transfer %s failed: %s", status, e)
raise
def async_wait_reqid(self):
assert self.notify_port is not None, "Notify port cannot be None"
if self.notify_thread is not None:
return
def _async_wait():
host = "*"
path = make_zmq_path("tcp", host, self.notify_port)
logger.info("Node starting to listen notify from path = %s", path)
with zmq_ctx(zmq.ROUTER, path) as sock:
while True:
try:
identity, msg = sock.recv_multipart()
self._handle_message(msg)
except Exception as e:
logger.error("Error processing message: %s", e)
raise HandshakeError(f"Error processing message: {e}") from e
self.notify_thread = threading.Thread(
target=_async_wait, daemon=True, name="moriio-notify-listener"
)
self.notify_thread.start()
def _handle_message(self, msg: bytes):
"""Handles incoming messages from remote nodes."""
# Handles incoming remote messages:
# Prefill Role:
# [write] mode: receives block information (allocation)
# [read] mode: receives block release messages from decode side
# Decode Role:
# [write] mode: receives KV cache write completion notifications
handled = False
try:
data = msgpack.loads(msg)
if isinstance(data, dict) and "req_id" in data:
self._handle_structured_message(data)
return
except (msgpack.exceptions.ExtraData, msgpack.exceptions.UnpackException):
logger.debug("Failed to decode msgpack message, will try as string")
pass
try:
msg_str = msg.decode("UTF-8")
if msg_str.startswith(MoRIIOConstants.COMPLETION_PREFIX):
self._handle_completion_message(msg_str)
handled = True
except UnicodeDecodeError:
logger.warning("Received non-UTF8 message: %s", msg_str)
if not handled:
raise MoRIIOError(f"Unhandled message format: {msg_str}")
def _handle_structured_message(self, data: dict):
assert get_role() == ROLE.PRODUCER, "Only prefill can get block messages"
req_id = data["req_id"]
block_notify_list = data.get("block_notify_list", [])
decode_dp_rank = data.get("decode_rank", 0)
assert len(block_notify_list) > 0, (
"block_notify_list cannot be empty in remote allocate message"
)
with self.lock:
self.done_remote_allocate_req_dict[req_id] = RemoteAllocInfo(
block_ids=block_notify_list, decode_dp_rank=decode_dp_rank
)
def _handle_completion_message(self, msg: str):
with self.lock:
if get_role() == ROLE.PRODUCER:
self.done_req_ids.append(msg)
else:
self.done_write_cache_req_ids.append(msg)
def send_notify(self, req_ids, remote_ip, remote_port):
if not remote_ip or not remote_port:
logger.warning("Missing remote_ip or remote_port for notification")
return
path = make_zmq_path("tcp", remote_ip, remote_port)
if path not in self.paths:
ctx = zmq.Context.instance()
sock = make_zmq_socket(
ctx=ctx, path=path, socket_type=zmq.DEALER, bind=False
)
self.paths[path] = sock
req_list = req_ids if isinstance(req_ids, list) else [req_ids]
sock = self.paths[path]
try:
for req_id in req_list:
if not isinstance(req_id, str):
logger.warning(
"Invalid req_id type: %s, expected str", type(req_id)
)
continue
sock.send(req_id.encode("utf-8"))
except Exception as e:
logger.error("Failed to send notification to %s: %s", path, e)
self.paths.pop(path, None)
raise
def pop_finished_req_ids(self):
# producer invocation: get the set of completed requests at the decode
with self.lock:
done_send = set(self.done_req_ids)
self.done_req_ids = []
return done_send
def pop_finished_write_req_ids(self):
# Call the consumer in write mode to get the collection after write completion
with self.lock:
done_write_cache = set(self.done_write_cache_req_ids)
self.done_write_cache_req_ids = []
return done_write_cache
def shutdown(self):
logger.debug("Closing MoRIIOWrapper and cleaning up ZMQ sockets")
for path, sock in self.paths.items():
try:
sock.close(linger=0)
logger.debug("Closed ZMQ socket for path: %s", path)
except Exception as e:
logger.warning("Error closing ZMQ socket for path %s: %s", path, e)
self.paths.clear()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/abstract.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
import torch
import torch.nn as nn
from vllm.tasks import PoolingTask
from vllm.v1.outputs import PoolerOutput
from vllm.v1.pool.metadata import PoolingMetadata
from .common import PoolingParamsUpdate
class Pooler(nn.Module, ABC):
"""The interface required for all poolers used in pooling models in vLLM."""
@abstractmethod
def get_supported_tasks(self) -> Set[PoolingTask]:
"""Determine which pooling tasks are supported."""
raise NotImplementedError
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
"""
Construct the updated pooling parameters to use for a supported task.
"""
return PoolingParamsUpdate()
@abstractmethod
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
raise NotImplementedError
__all__ = ["Pooler"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/abstract.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/activations.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PretrainedConfig
from vllm.config import ModelConfig, get_current_vllm_config
from vllm.logger import init_logger
from vllm.utils.import_utils import resolve_obj_by_qualname
logger = init_logger(__name__)
def get_classification_act_fn(
config: PretrainedConfig,
) -> "PoolerActivation":
# Implement alignment with transformers ForSequenceClassificationLoss
# https://github.com/huggingface/transformers/blob/57bb6db6ee4cfaccc45b8d474dfad5a17811ca60/src/transformers/loss/loss_utils.py#L92
problem_type = getattr(config, "problem_type", "")
if problem_type == "regression":
return PoolerIdentity()
if problem_type == "single_label_classification":
return PoolerClassify()
if problem_type == "multi_label_classification":
return PoolerMultiLabelClassify()
return PoolerClassify()
def get_cross_encoder_act_fn(
config: PretrainedConfig,
) -> "PoolerActivation":
function_name: str | None = None
if (
hasattr(config, "sentence_transformers")
and "activation_fn" in config.sentence_transformers
):
function_name = config.sentence_transformers["activation_fn"]
elif (
hasattr(config, "sbert_ce_default_activation_function")
and config.sbert_ce_default_activation_function is not None
):
function_name = config.sbert_ce_default_activation_function
if function_name is not None:
assert function_name.startswith("torch.nn.modules."), (
"Loading of activation functions is restricted to "
"torch.nn.modules for security reasons"
)
fn = resolve_obj_by_qualname(function_name)()
return PoolerActivation.wraps(fn)
return PoolerClassify()
def resolve_classifier_act_fn(
model_config: ModelConfig,
static_num_labels: bool = True,
act_fn: "PoolerActivation | str | None" = None,
):
if isinstance(act_fn, str):
if act_fn == "classify":
return get_classification_act_fn(model_config.hf_config)
if act_fn == "score":
return get_cross_encoder_act_fn(model_config.hf_config)
raise ValueError(f"act_fn [{act_fn=}] not supported.")
if act_fn is None:
return PoolerClassify(static_num_labels=static_num_labels)
assert callable(act_fn)
return act_fn
_T = TypeVar("_T", torch.Tensor, list[torch.Tensor])
class PoolerActivation(nn.Module, ABC):
@staticmethod
def wraps(module: nn.Module):
if isinstance(module, nn.Identity):
return PoolerIdentity()
if isinstance(module, (nn.Sigmoid, nn.Softmax)):
return PoolerClassify()
return LambdaPoolerActivation(module)
@abstractmethod
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def forward(self, pooled_data: _T) -> _T:
# shape:
# classify (& score) -> (batch_size, num_classes)
# embed -> (batch_size, embedding_dim) or list(embedding_dim)
# (batch_size, dimensions) or list(dimensions) if using MRL
if isinstance(pooled_data, list):
return [self.forward_chunk(data) for data in pooled_data]
return self.forward_chunk(pooled_data)
class PoolerIdentity(PoolerActivation):
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
return pooled_data
class PoolerNormalize(PoolerActivation):
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
return F.normalize(pooled_data, p=2, dim=-1)
class PoolerMultiLabelClassify(PoolerActivation):
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
return F.sigmoid(pooled_data)
class PoolerClassify(PoolerActivation):
def __init__(self, *, static_num_labels: bool = True) -> None:
super().__init__()
if static_num_labels:
vllm_config = get_current_vllm_config()
model_config = vllm_config.model_config
num_labels = getattr(model_config.hf_config, "num_labels", 0)
else:
num_labels = None
if num_labels == 0:
logger.warning(
"num_labels should be > 0 for classification "
"models, falling back to softmax. "
"Please check if the configuration is correct."
)
self.num_labels = num_labels
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
num_labels = self.num_labels
if num_labels is None:
num_labels = pooled_data.shape[-1]
if num_labels < 2:
return F.sigmoid(pooled_data)
return F.softmax(pooled_data, dim=-1)
class LambdaPoolerActivation(PoolerActivation):
def __init__(self, fn: Callable[[torch.Tensor], torch.Tensor]):
super().__init__()
self.fn = fn
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
return self.fn(pooled_data)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/activations.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/common.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from dataclasses import dataclass
from typing import TypeVar
import torch
from vllm.pooling_params import PoolingParams
_T = TypeVar("_T", bound=torch.Tensor | list[torch.Tensor])
ProjectorFn = Callable[[torch.Tensor], torch.Tensor]
ClassifierFn = Callable[[torch.Tensor], torch.Tensor]
ActivationFn = Callable[[_T], _T]
@dataclass(frozen=True)
class PoolingParamsUpdate:
requires_token_ids: bool = False
"""Set this flag to enable `get_prompt_token_ids` for your pooler."""
def __or__(self, other: "PoolingParamsUpdate") -> "PoolingParamsUpdate":
return PoolingParamsUpdate(
requires_token_ids=self.requires_token_ids or other.requires_token_ids,
)
def apply(self, params: PoolingParams) -> None:
params.requires_token_ids = self.requires_token_ids
__all__ = ["ActivationFn", "ClassifierFn", "ProjectorFn", "PoolingParamsUpdate"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/common.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/seqwise/heads.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
from typing import TypeAlias
import torch
import torch.nn as nn
from vllm.model_executor.layers.pooler import ActivationFn, ClassifierFn, ProjectorFn
from vllm.tasks import PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
from .methods import SequencePoolingMethodOutput
SequencePoolerHeadOutput: TypeAlias = torch.Tensor | list[torch.Tensor]
class SequencePoolerHead(nn.Module, ABC):
@abstractmethod
def get_supported_tasks(self) -> Set[PoolingTask]:
raise NotImplementedError
@abstractmethod
def forward(
self,
pooled_data: SequencePoolingMethodOutput,
pooling_metadata: PoolingMetadata,
) -> SequencePoolerHeadOutput:
raise NotImplementedError
class EmbeddingPoolerHead(SequencePoolerHead):
def __init__(
self,
projector: ProjectorFn | None = None,
head_dtype: torch.dtype | str | None = None,
activation: ActivationFn | None = None,
) -> None:
super().__init__()
self.projector = projector
self.head_dtype = head_dtype
self.activation = activation
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"embed"}
def forward(
self,
pooled_data: SequencePoolingMethodOutput,
pooling_metadata: PoolingMetadata,
) -> SequencePoolerHeadOutput:
pooling_params = pooling_metadata.pooling_params
assert len(pooled_data) == len(pooling_params)
if isinstance(pooled_data, list):
pooled_data = torch.stack(pooled_data)
# pooled_data shape: [batchsize, hidden_dimension]
if self.head_dtype is not None:
pooled_data = pooled_data.to(self.head_dtype)
# Apply ST projector
if self.projector is not None:
pooled_data = self.projector(pooled_data)
# pooled_data shape: [batchsize, embedding_dimension]
# for matryoshka representation
dimensions_list = [pooling_param.dimensions for pooling_param in pooling_params]
if any(d is not None for d in dimensions_list):
# change the output dimension
assert len(pooled_data) == len(dimensions_list)
if len(set(dimensions_list)) == 1 and not isinstance(pooled_data, list):
# if all dimensions are the same
d = dimensions_list[0]
pooled_data = pooled_data[..., :d]
else:
pooled_data = [
vecs if d is None else vecs[..., :d]
for vecs, d in zip(pooled_data, dimensions_list)
]
# for normalize
if self.activation is not None:
flags = [p.use_activation for p in pooling_params]
if len(set(flags)) == 1:
if flags[0]:
pooled_data = self.activation(pooled_data)
else:
pooled_data = [
self.activation(vecs) if f else vecs
for vecs, f in zip(pooled_data, flags)
]
# pooled_data shape: [batchsize, embedding_dimension]
return pooled_data
class ClassifierPoolerHead(SequencePoolerHead):
def __init__(
self,
classifier: ClassifierFn | None = None,
logit_bias: float | None = None,
head_dtype: torch.dtype | str | None = None,
activation: ActivationFn | None = None,
) -> None:
super().__init__()
self.classifier = classifier
self.logit_bias = logit_bias
self.head_dtype = head_dtype
self.activation = activation
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"classify", "score"}
def forward(
self,
pooled_data: SequencePoolingMethodOutput,
pooling_metadata: PoolingMetadata,
) -> SequencePoolerHeadOutput:
pooling_params = pooling_metadata.pooling_params
assert len(pooled_data) == len(pooling_params)
if isinstance(pooled_data, list):
pooled_data = torch.stack(pooled_data)
# pooled_data shape: [batchsize, hidden_size]
if self.head_dtype is not None:
pooled_data = pooled_data.to(self.head_dtype)
if self.classifier is not None:
pooled_data = self.classifier(pooled_data)
# pooled_data shape: [batchsize, num_labels]
if self.logit_bias is not None:
pooled_data -= self.logit_bias
if self.activation is not None:
flags = [p.use_activation for p in pooling_params]
if len(set(flags)) == 1:
pooled_data = self.activation(pooled_data) if flags[0] else pooled_data
else:
pooled_data = [
self.activation(vecs) if f else vecs
for vecs, f in zip(pooled_data, flags)
]
# pooled_data shape: [batchsize, num_labels]
return pooled_data
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/seqwise/heads.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/seqwise/methods.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
from typing import TypeAlias
import torch
import torch.nn as nn
from vllm.config.pooler import SequencePoolingType
from vllm.model_executor.layers.pooler import PoolingParamsUpdate
from vllm.tasks import PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
SequencePoolingMethodOutput: TypeAlias = torch.Tensor | list[torch.Tensor]
class SequencePoolingMethod(nn.Module, ABC):
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"token_embed", "token_classify", "embed", "classify", "score"}
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
return PoolingParamsUpdate()
@abstractmethod
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> SequencePoolingMethodOutput:
raise NotImplementedError
class CLSPool(SequencePoolingMethod):
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> SequencePoolingMethodOutput:
pooling_cursor = pooling_metadata.get_pooling_cursor()
assert not pooling_cursor.is_partial_prefill(), (
"partial prefill not supported with CLS pooling"
)
return hidden_states[pooling_cursor.first_token_indices_gpu]
class LastPool(SequencePoolingMethod):
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> SequencePoolingMethodOutput:
pooling_cursor = pooling_metadata.get_pooling_cursor()
return hidden_states[pooling_cursor.last_token_indices_gpu]
class MeanPool(SequencePoolingMethod):
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> SequencePoolingMethodOutput:
pooling_cursor = pooling_metadata.get_pooling_cursor()
assert not pooling_cursor.is_partial_prefill(), (
"partial prefill not supported with MEAN pooling"
)
prompt_lens = pooling_cursor.prompt_lens_cpu.to(
hidden_states.device, non_blocking=True
)
# Use float32 for torch.cumsum in MeanPool,
# otherwise precision will be lost significantly.
cumsum = torch.cumsum(hidden_states, dim=0, dtype=torch.float32)
start_indices = pooling_cursor.first_token_indices_gpu
end_indices = pooling_cursor.last_token_indices_gpu
return (
cumsum[end_indices] - cumsum[start_indices] + hidden_states[start_indices]
) / prompt_lens.unsqueeze(1)
def get_seq_pooling_method(pooling_type: SequencePoolingType | str):
if pooling_type == "CLS":
return CLSPool()
if pooling_type == "LAST":
return LastPool()
if pooling_type == "MEAN":
return MeanPool()
raise NotImplementedError(f"Unknown sequence pooling type: {pooling_type!r}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/seqwise/methods.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/seqwise/poolers.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable, Set
from typing import TypeAlias
import torch
from vllm.config import PoolerConfig, get_current_vllm_config
from vllm.model_executor.layers.pooler import ClassifierFn, PoolingParamsUpdate
from vllm.model_executor.layers.pooler.abstract import Pooler
from vllm.model_executor.layers.pooler.activations import (
PoolerActivation,
PoolerNormalize,
resolve_classifier_act_fn,
)
from vllm.model_executor.models.adapters import _load_st_projector
from vllm.tasks import POOLING_TASKS, PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
from .heads import (
ClassifierPoolerHead,
EmbeddingPoolerHead,
SequencePoolerHead,
SequencePoolerHeadOutput,
)
from .methods import (
SequencePoolingMethod,
SequencePoolingMethodOutput,
get_seq_pooling_method,
)
SequencePoolingFn: TypeAlias = Callable[
[torch.Tensor, PoolingMetadata],
SequencePoolingMethodOutput,
]
SequencePoolingHeadFn: TypeAlias = Callable[
[SequencePoolingMethodOutput, PoolingMetadata],
SequencePoolerHeadOutput,
]
SequencePoolerOutput: TypeAlias = torch.Tensor | list[torch.Tensor]
class SequencePooler(Pooler):
"""
A layer that pools specific information from hidden states.
This layer does the following:
1. Extracts specific tokens or aggregates data based on pooling method.
2. Postprocesses the output based on pooling head.
3. Returns structured results as `PoolerOutput`.
"""
def __init__(
self,
pooling: SequencePoolingMethod | SequencePoolingFn,
head: SequencePoolerHead | SequencePoolingHeadFn,
) -> None:
super().__init__()
self.pooling = pooling
self.head = head
def get_supported_tasks(self) -> Set[PoolingTask]:
tasks = set(POOLING_TASKS)
if isinstance(self.pooling, SequencePoolingMethod):
tasks &= self.pooling.get_supported_tasks()
if isinstance(self.head, SequencePoolerHead):
tasks &= self.head.get_supported_tasks()
return tasks
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
updates = PoolingParamsUpdate()
if isinstance(self.pooling, SequencePoolingMethod):
updates |= self.pooling.get_pooling_updates(task)
return updates
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> SequencePoolerOutput:
pooled_data = self.pooling(hidden_states, pooling_metadata)
pooled_data = self.head(pooled_data, pooling_metadata)
return pooled_data
def pooler_for_embed(pooler_config: PoolerConfig):
pooling = get_seq_pooling_method(pooler_config.get_seq_pooling_type())
vllm_config = get_current_vllm_config()
model_config = vllm_config.model_config
head = EmbeddingPoolerHead(
head_dtype=model_config.head_dtype,
projector=_load_st_projector(model_config),
activation=PoolerNormalize(),
)
return SequencePooler(pooling=pooling, head=head)
def pooler_for_classify(
pooler_config: PoolerConfig,
*,
pooling: SequencePoolingMethod | SequencePoolingFn | None = None,
classifier: ClassifierFn | None = None,
act_fn: PoolerActivation | str | None = None,
):
if pooling is None:
pooling = get_seq_pooling_method(pooler_config.get_seq_pooling_type())
vllm_config = get_current_vllm_config()
model_config = vllm_config.model_config
head = ClassifierPoolerHead(
head_dtype=model_config.head_dtype,
classifier=classifier,
logit_bias=model_config.pooler_config.logit_bias,
activation=resolve_classifier_act_fn(
model_config, static_num_labels=True, act_fn=act_fn
),
)
return SequencePooler(pooling=pooling, head=head)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/seqwise/poolers.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/special.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Mapping, Set
from itertools import groupby
import torch
from vllm.config import PoolerConfig
from vllm.model_executor.layers.pooler import PoolingParamsUpdate
from vllm.tasks import PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
from .abstract import Pooler, PoolerOutput
from .common import ClassifierFn
from .seqwise import (
SequencePoolingFn,
SequencePoolingMethod,
pooler_for_classify,
pooler_for_embed,
)
from .tokwise import AllPool, pooler_for_token_classify, pooler_for_token_embed
class DispatchPooler(Pooler):
"""Dispatches calls to a sub-pooler based on the pooling task."""
@classmethod
def for_embedding(cls, pooler_config: PoolerConfig):
return cls(
{
"token_embed": pooler_for_token_embed(pooler_config),
"embed": pooler_for_embed(pooler_config),
},
)
@classmethod
def for_seq_cls(
cls,
pooler_config: PoolerConfig,
*,
pooling: SequencePoolingMethod | SequencePoolingFn | None = None,
classifier: ClassifierFn | None = None,
):
return cls(
{
"token_classify": pooler_for_token_classify(
pooler_config,
pooling=AllPool(),
classifier=classifier,
),
"classify": pooler_for_classify(
pooler_config,
pooling=pooling,
classifier=classifier,
act_fn="classify",
),
"score": pooler_for_classify(
pooler_config,
pooling=pooling,
classifier=classifier,
act_fn="score",
),
}
)
def __init__(self, poolers_by_task: Mapping[PoolingTask, Pooler]) -> None:
super().__init__()
for task, pooler in poolers_by_task.items():
if task not in pooler.get_supported_tasks():
raise ValueError(
f"{pooler=} does not support {task=}. "
f"Supported tasks: {pooler.get_supported_tasks()}"
)
self.poolers_by_task = poolers_by_task
def get_supported_tasks(self) -> Set[PoolingTask]:
return set(self.poolers_by_task)
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
return self.poolers_by_task[task].get_pooling_updates(task)
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
poolers_by_task = self.poolers_by_task
outputs = list[torch.Tensor | None]()
offset = 0
for task, group in groupby(pooling_metadata.tasks):
if not (pooler := poolers_by_task.get(task)):
raise ValueError(
f"Unsupported task: {task!r} "
f"Supported tasks: {self.get_supported_tasks()}"
)
num_items = len(list(group))
group_output: PoolerOutput = pooler(
hidden_states,
pooling_metadata[offset : offset + num_items],
)
outputs.extend(group_output)
offset += num_items
return outputs
def extra_repr(self) -> str:
s = f"supported_task={self.get_supported_tasks()}"
return s
class IdentityPooler(Pooler):
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"plugin", "score"}
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
return hidden_states
class BOSEOSFilter(Pooler):
"""Filters the BOS and EOS token results from outputs."""
def __init__(
self,
pooler: Pooler,
bos_token_id: int = -1, # -1 disables the filtering
eos_token_id: int = -1,
) -> None:
super().__init__()
self.pooler = pooler
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
def get_supported_tasks(self) -> Set[PoolingTask]:
return self.pooler.get_supported_tasks()
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
return PoolingParamsUpdate(requires_token_ids=True)
def forward(
self,
hidden_states: torch.Tensor | list[torch.Tensor],
pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
pooled_outputs = self.pooler(hidden_states, pooling_metadata)
assert isinstance(pooled_outputs, list)
for i, prompt_len in enumerate(pooling_metadata.prompt_lens):
pooled_data = pooled_outputs[i]
assert (
isinstance(pooled_data, torch.Tensor)
and pooled_data.shape[0] == prompt_len
)
token_ids = pooling_metadata.prompt_token_ids[i, :prompt_len]
if token_ids[0] == self.bos_token_id:
pooled_data = pooled_data[1:]
if token_ids[-1] == self.eos_token_id:
pooled_data = pooled_data[:-1]
pooled_outputs[i] = pooled_data.squeeze(-1)
return pooled_outputs
__all__ = ["BOSEOSFilter", "DispatchPooler", "IdentityPooler"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/special.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/tokwise/heads.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
from typing import TypeAlias
import torch
import torch.nn as nn
from vllm.model_executor.layers.pooler import ActivationFn, ClassifierFn, ProjectorFn
from vllm.pooling_params import PoolingParams
from vllm.tasks import PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
from .methods import TokenPoolingMethodOutputItem
TokenPoolerHeadOutputItem: TypeAlias = torch.Tensor | None
class TokenPoolerHead(nn.Module, ABC):
@abstractmethod
def get_supported_tasks(self) -> Set[PoolingTask]:
raise NotImplementedError
@abstractmethod
def forward_chunk(
self,
pooled_data: TokenPoolingMethodOutputItem,
pooling_param: PoolingParams,
) -> TokenPoolerHeadOutputItem:
raise NotImplementedError
def forward(
self,
pooled_data: list[TokenPoolingMethodOutputItem],
pooling_metadata: PoolingMetadata,
) -> list[TokenPoolerHeadOutputItem]:
pooling_params = pooling_metadata.pooling_params
assert len(pooled_data) == len(pooling_params)
return [self.forward_chunk(d, p) for d, p in zip(pooled_data, pooling_params)]
class TokenEmbeddingPoolerHead(TokenPoolerHead):
def __init__(
self,
head_dtype: torch.dtype | str | None = None,
projector: ProjectorFn | None = None,
activation: ActivationFn | None = None,
) -> None:
super().__init__()
self.head_dtype = head_dtype
self.projector = projector
self.activation = activation
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"token_embed"}
def forward_chunk(
self,
pooled_data: TokenPoolingMethodOutputItem,
pooling_param: PoolingParams,
) -> TokenPoolerHeadOutputItem:
# for unfinished chunked prefill
if pooled_data is None:
return None
if self.head_dtype is not None:
pooled_data = pooled_data.to(self.head_dtype)
# pooled_data shape: [n_tokens, hidden_dimension]
# Apply ST projector
if self.projector is not None:
pooled_data = self.projector(pooled_data)
# pooled_data shape: [n_tokens, embedding_dimension]
# for matryoshka representation
pooled_data = pooled_data[..., : pooling_param.dimensions]
# for normalize
if self.activation is not None and pooling_param.use_activation:
pooled_data = self.activation(pooled_data)
# pooled_data shape: [n_tokens, embedding_dimension]
return pooled_data
class TokenClassifierPoolerHead(TokenPoolerHead):
def __init__(
self,
classifier: ClassifierFn | None = None,
logit_bias: float | None = None,
head_dtype: torch.dtype | str | None = None,
activation: ActivationFn | None = None,
) -> None:
super().__init__()
self.classifier = classifier
self.logit_bias = logit_bias
self.head_dtype = head_dtype
self.activation = activation
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"token_classify"}
def forward_chunk(
self,
pooled_data: TokenPoolingMethodOutputItem,
pooling_param: PoolingParams,
) -> TokenPoolerHeadOutputItem:
# for unfinished chunked prefill
if pooled_data is None:
return None
if self.head_dtype is not None:
pooled_data = pooled_data.to(self.head_dtype)
# hidden_states shape: [n_token, hidden_size]
if self.classifier is not None:
scores = self.classifier(pooled_data)
else:
scores = pooled_data
# scores shape: [n_token, num_labels]
if self.logit_bias is not None:
scores -= self.logit_bias
if self.activation is not None and pooling_param.use_activation:
scores = self.activation(scores)
# scores shape: [n_token, num_labels]
return scores
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/tokwise/heads.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/tokwise/methods.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
from typing import TypeAlias
import torch
import torch.nn as nn
from vllm.config import get_current_vllm_config
from vllm.config.pooler import TokenPoolingType
from vllm.model_executor.layers.pooler import PoolingParamsUpdate
from vllm.tasks import PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
TokenPoolingMethodOutputItem: TypeAlias = torch.Tensor | None
class TokenPoolingMethod(nn.Module, ABC):
def get_supported_tasks(self) -> Set[PoolingTask]:
return {"token_embed", "token_classify"}
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
return PoolingParamsUpdate()
@abstractmethod
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> list[TokenPoolingMethodOutputItem]:
raise NotImplementedError
class AllPool(TokenPoolingMethod):
def __init__(self):
super().__init__()
vllm_config = get_current_vllm_config()
scheduler_config = vllm_config.scheduler_config
self.enable_chunked_prefill = scheduler_config.enable_chunked_prefill
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> list[TokenPoolingMethodOutputItem]:
pooling_cursor = pooling_metadata.get_pooling_cursor()
hidden_states_all = hidden_states.split(
pooling_cursor.num_scheduled_tokens_cpu.tolist()
)
hidden_states_lst = [hidden_states_all[i] for i in pooling_cursor.index]
if not self.enable_chunked_prefill:
return hidden_states_lst
pooling_states = pooling_metadata.pooling_states
# If chunked_prefill is enabled
# 1. first store the chunked hidden_states in pooling_states.hidden_states_cache
for p, hs_chunk in zip(pooling_states, hidden_states_lst):
p.hidden_states_cache.append(hs_chunk)
# 2. Once prefill is finished, send hidden_states_cache to PoolerHead
output_list = list[TokenPoolingMethodOutputItem]()
for p, finished in zip(pooling_states, pooling_cursor.is_finished()):
if finished:
hidden_states_cache = p.hidden_states_cache
if len(hidden_states_cache) == 1:
output_list.append(hidden_states_cache[0])
else:
output_list.append(torch.concat(hidden_states_cache, dim=0))
p.clean()
else:
output_list.append(None)
return output_list
class StepPool(AllPool):
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
return PoolingParamsUpdate(requires_token_ids=True)
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> list[TokenPoolingMethodOutputItem]:
pooled_data_lst = super().forward(hidden_states, pooling_metadata)
prompt_token_ids = pooling_metadata.get_prompt_token_ids()
pooling_params = pooling_metadata.pooling_params
pooled_data = list[torch.Tensor | None]()
for data, token_id, pooling_param in zip(
pooled_data_lst, prompt_token_ids, pooling_params
):
# for unfinished chunked prefill
if data is None:
pass
else:
step_tag_id = pooling_param.step_tag_id
returned_token_ids = pooling_param.returned_token_ids
if returned_token_ids is not None and len(returned_token_ids) > 0:
data = data[:, returned_token_ids]
if step_tag_id is not None:
data = data[token_id == step_tag_id]
pooled_data.append(data)
return pooled_data
def get_tok_pooling_method(pooling_type: TokenPoolingType | str):
if pooling_type == "ALL":
return AllPool()
if pooling_type == "STEP":
return StepPool()
raise NotImplementedError(f"Unknown tokenwise pooling type: {pooling_type!r}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/tokwise/methods.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/pooler/tokwise/poolers.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable, Set
from typing import TypeAlias
import torch
from vllm.config import PoolerConfig, get_current_vllm_config
from vllm.model_executor.layers.pooler import (
ClassifierFn,
PoolingParamsUpdate,
ProjectorFn,
)
from vllm.model_executor.layers.pooler.abstract import Pooler
from vllm.model_executor.layers.pooler.activations import (
PoolerActivation,
PoolerNormalize,
resolve_classifier_act_fn,
)
from vllm.model_executor.models.adapters import _load_st_projector
from vllm.tasks import POOLING_TASKS, PoolingTask
from vllm.v1.pool.metadata import PoolingMetadata
from .heads import (
TokenClassifierPoolerHead,
TokenEmbeddingPoolerHead,
TokenPoolerHead,
TokenPoolerHeadOutputItem,
)
from .methods import (
TokenPoolingMethod,
TokenPoolingMethodOutputItem,
get_tok_pooling_method,
)
TokenPoolingFn: TypeAlias = Callable[
[torch.Tensor, PoolingMetadata],
list[TokenPoolingMethodOutputItem],
]
TokenPoolingHeadFn: TypeAlias = Callable[
[list[TokenPoolingMethodOutputItem], PoolingMetadata],
list[TokenPoolerHeadOutputItem],
]
TokenPoolerOutput: TypeAlias = list[torch.Tensor | None]
class TokenPooler(Pooler):
"""
A layer that pools specific information from hidden states.
This layer does the following:
1. Extracts specific tokens or aggregates data based on pooling method.
2. Postprocesses the output based on pooling head.
3. Returns structured results as `PoolerOutput`.
"""
def __init__(
self,
pooling: TokenPoolingMethod | TokenPoolingFn,
head: TokenPoolerHead | TokenPoolingHeadFn,
) -> None:
super().__init__()
self.pooling = pooling
self.head = head
def get_supported_tasks(self) -> Set[PoolingTask]:
tasks = set(POOLING_TASKS)
if isinstance(self.pooling, TokenPoolingMethod):
tasks &= self.pooling.get_supported_tasks()
if isinstance(self.head, TokenPoolerHead):
tasks &= self.head.get_supported_tasks()
return tasks
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
updates = PoolingParamsUpdate()
if isinstance(self.pooling, TokenPoolingMethod):
updates |= self.pooling.get_pooling_updates(task)
return updates
def forward(
self,
hidden_states: torch.Tensor,
pooling_metadata: PoolingMetadata,
) -> TokenPoolerOutput:
pooled_data = self.pooling(hidden_states, pooling_metadata)
pooled_data = self.head(pooled_data, pooling_metadata)
return pooled_data
def pooler_for_token_embed(
pooler_config: PoolerConfig, projector: ProjectorFn | None = None
) -> TokenPooler:
pooling = get_tok_pooling_method(pooler_config.get_tok_pooling_type())
vllm_config = get_current_vllm_config()
model_config = vllm_config.model_config
head = TokenEmbeddingPoolerHead(
head_dtype=model_config.head_dtype,
projector=projector
if projector is not None
else _load_st_projector(model_config),
activation=PoolerNormalize(),
)
return TokenPooler(pooling=pooling, head=head)
def pooler_for_token_classify(
pooler_config: PoolerConfig,
*,
pooling: TokenPoolingMethod | TokenPoolingFn | None = None,
classifier: ClassifierFn | None = None,
act_fn: PoolerActivation | str | None = None,
):
if pooling is None:
pooling = get_tok_pooling_method(pooler_config.get_tok_pooling_type())
vllm_config = get_current_vllm_config()
model_config = vllm_config.model_config
head = TokenClassifierPoolerHead(
head_dtype=model_config.head_dtype,
classifier=classifier,
logit_bias=model_config.pooler_config.logit_bias,
activation=resolve_classifier_act_fn(
model_config, static_num_labels=False, act_fn=act_fn
),
)
return TokenPooler(pooling=pooling, head=head)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/pooler/tokwise/poolers.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.entrypoints.utils import get_max_tokens, sanitize_message
def test_sanitize_message():
assert (
sanitize_message("<_io.BytesIO object at 0x7a95e299e750>")
== "<_io.BytesIO object>"
)
class TestGetMaxTokens:
"""Tests for get_max_tokens() to ensure generation_config's max_tokens
acts as a default when from model author, and as a ceiling when
explicitly set by the user."""
def test_default_sampling_params_used_when_no_request_max_tokens(self):
"""When user doesn't specify max_tokens, generation_config default
should apply."""
result = get_max_tokens(
max_model_len=24000,
max_tokens=None,
input_length=100,
default_sampling_params={"max_tokens": 2048},
)
assert result == 2048
def test_request_max_tokens_not_capped_by_default_sampling_params(self):
"""When user specifies max_tokens in request, model author's
generation_config max_tokens must NOT cap it (fixes #34005)."""
result = get_max_tokens(
max_model_len=24000,
max_tokens=5000,
input_length=100,
default_sampling_params={"max_tokens": 2048},
)
assert result == 5000
def test_override_max_tokens_caps_request(self):
"""When user explicitly sets max_tokens, it acts as a ceiling."""
result = get_max_tokens(
max_model_len=24000,
max_tokens=5000,
input_length=100,
default_sampling_params={"max_tokens": 2048},
override_max_tokens=2048,
)
assert result == 2048
def test_override_max_tokens_used_as_default(self):
"""When no request max_tokens, override still applies as default."""
result = get_max_tokens(
max_model_len=24000,
max_tokens=None,
input_length=100,
default_sampling_params={"max_tokens": 2048},
override_max_tokens=2048,
)
assert result == 2048
def test_max_model_len_still_caps_output(self):
"""max_model_len - input_length is always the hard ceiling."""
result = get_max_tokens(
max_model_len=3000,
max_tokens=5000,
input_length=100,
default_sampling_params={"max_tokens": 2048},
)
assert result == 2900 # 3000 - 100
def test_request_max_tokens_smaller_than_default(self):
"""When user explicitly requests fewer tokens than gen_config default,
that should be respected."""
result = get_max_tokens(
max_model_len=24000,
max_tokens=512,
input_length=100,
default_sampling_params={"max_tokens": 2048},
)
assert result == 512
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/test_utils.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/iquest_loopcoder.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only LoopCoder model compatible with HuggingFace weights."""
from __future__ import annotations
from collections.abc import Iterable
from dataclasses import replace
from typing import Any
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.llama import LlamaMLP
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
from .utils import (
AutoWeightsLoader,
extract_layer_index,
make_layers,
maybe_prefix,
)
class LoopCoderAttention(nn.Module):
def __init__(
self,
config: PretrainedConfig,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position: int = 4096 * 32,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
dual_chunk_attention_config: dict[str, Any] | None = None,
layer_idx: int = 0,
) -> None:
super().__init__()
self.layer_idx = layer_idx
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.dual_chunk_attention_config = dual_chunk_attention_config
# Get loop_num from config, default to 2 if not specified
self.loop_num = getattr(config, "loop_num", 2)
self.loop_window_size = getattr(config, "loop_window_size", 64)
# Use total number of hidden layers instead of hardcoded 24
total_layers = config.num_hidden_layers
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position,
rope_parameters=config.rope_parameters,
dual_chunk_attention_config=dual_chunk_attention_config,
)
self.attn = nn.ModuleList()
base_cache_config = cache_config
for loop_idx in range(self.loop_num):
base_layer_idx = extract_layer_index(prefix)
unique_layer_idx = loop_idx * total_layers + base_layer_idx
unique_prefix = prefix.replace(
f"layers.{base_layer_idx}", f"layers.{unique_layer_idx}"
)
if loop_idx == 0:
loop_cache_config = cache_config
else:
if base_cache_config is not None:
loop_cache_config = replace(
base_cache_config,
sliding_window=self.loop_window_size,
)
else:
loop_cache_config = CacheConfig(
sliding_window=self.loop_window_size,
cache_dtype="auto",
)
self.attn.append(
Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=loop_cache_config,
quant_config=quant_config,
attn_type=attn_type,
prefix=f"{unique_prefix}.attn",
**{
"layer_idx": unique_layer_idx,
"dual_chunk_attention_config": dual_chunk_attention_config,
}
if dual_chunk_attention_config and loop_idx == 0
else {},
)
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
loop_idx: int,
gate_proj: LoopGateProjection | None = None,
) -> torch.Tensor:
if loop_idx == 0:
attn = self.attn[0]
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
attn_output = attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
else:
global_attn = self.attn[0]
local_attn = self.attn[loop_idx]
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
num_tokens, _ = q.shape
num_heads = self.num_heads
head_dim = self.head_dim
q_reshaped = q.view(num_tokens, num_heads, head_dim).transpose(0, 1)
global_attn_output = global_attn(q, None, None)
local_attn_output = local_attn(q, k, v)
assert gate_proj is not None, "gate_proj must be provided for loop_idx > 0"
gate = gate_proj(q_reshaped)
output = global_attn_output * gate + local_attn_output * (1 - gate)
output, _ = self.o_proj(output)
return output
class LoopCoderDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
layer_idx: int = 0,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
dual_chunk_attention_config = getattr(
config, "dual_chunk_attention_config", None
)
self.layer_idx = layer_idx
if getattr(config, "is_causal", True):
attn_type = AttentionType.DECODER
else:
attn_type = AttentionType.ENCODER_ONLY
self.self_attn = LoopCoderAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
max_position=config.max_position_embeddings,
num_kv_heads=config.num_key_value_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
attn_type=attn_type,
dual_chunk_attention_config=dual_chunk_attention_config,
layer_idx=self.layer_idx,
)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
loop_idx: int,
gate_proj: LoopGateProjection | None = None,
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
loop_idx=loop_idx,
gate_proj=gate_proj,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
class LoopGateProjection(nn.Module):
"""Gate projection for mixed attention in Loop 2+.
Computes: g = sigmoid(linear(Q)) for each head independently.
This gate determines how much to use Loop1's KV (global) vs current
loop's KV (local).
Supports tensor parallelism: each GPU handles a subset of heads.
The weight matrix has shape [num_heads, head_dim] and is split along
the head dimension.
"""
def __init__(
self,
total_num_heads: int,
head_dim: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.total_num_heads = total_num_heads
self.head_dim = head_dim
tp_size = get_tensor_model_parallel_world_size()
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.gate_proj = ColumnParallelLinear(
head_dim,
self.total_num_heads,
bias=True,
gather_output=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_proj",
)
def forward(self, query: torch.Tensor) -> torch.Tensor:
"""Compute gate values from query tensor.
Args:
query: [num_heads, num_tokens, head_dim] (vLLM flattened format)
where num_heads is the number of heads on this TP rank
and num_tokens = batch * seq_len
Returns:
gate: [num_tokens, num_heads * head_dim] (flattened format matching q shape)
"""
num_heads, num_tokens, head_dim = query.shape
assert num_heads == self.num_heads, (
f"Expected {self.num_heads} heads, got {num_heads}"
)
query_flat = query.reshape(-1, head_dim)
gate_logits_flat, _ = self.gate_proj(query_flat)
gate_logits = gate_logits_flat.reshape(
num_heads, num_tokens, self.num_heads
) # [num_heads, num_tokens, num_heads]
# Extract diagonal: each head h's query should use output column h
# gate_logits[h, :, h] gives the output for head h at each token
gate_logits = torch.diagonal(
gate_logits, dim1=0, dim2=2
) # [num_tokens, num_heads]
gate_logits = gate_logits.transpose(0, 1) # [num_heads, num_tokens]
gate_logits = gate_logits.unsqueeze(-1) # [num_heads, num_tokens, 1]
# Apply sigmoid
gate = torch.sigmoid(gate_logits) # [num_heads, num_tokens, 1]
# Expand and reshape to match q shape: [num_tokens, num_heads * head_dim]
gate = gate.transpose(0, 1) # [num_tokens, num_heads, 1]
gate = gate.expand(-1, -1, head_dim) # [num_tokens, num_heads, head_dim]
gate = gate.reshape(
num_tokens, num_heads * head_dim
) # [num_tokens, num_heads * head_dim]
return gate
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class IQuestLoopCoderModel(nn.Module):
def __init__(
self,
*,
vllm_config: VllmConfig,
prefix: str = "",
decoder_layer_type: type[nn.Module] = LoopCoderDecoderLayer,
):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
# TODO (@robertgshaw2): see if this can be moved out
if cache_config.sliding_window is not None and hasattr(
config, "max_window_layers"
):
assert config.max_window_layers == config.num_hidden_layers, (
"Sliding window for some but all layers is not supported. "
"This model uses sliding window but `max_window_layers` = {} "
"is less than `num_hidden_layers` = {}. Please open an issue "
"to discuss this feature.".format(
config.max_window_layers,
config.num_hidden_layers,
)
)
self.config = config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens",
)
self.loop_num = getattr(self.config, "loop_num", 2)
self.window_size = getattr(self.config, "loop_window_size", 64)
# Gate projections for Loop 2+ (one per layer)
head_dim = config.hidden_size // config.num_attention_heads
_, _, self.gate_projections = make_layers(
config.num_hidden_layers,
lambda prefix: LoopGateProjection(
total_num_heads=config.num_attention_heads,
head_dim=head_dim,
quant_config=quant_config,
prefix=prefix,
),
prefix=f"{prefix}.gate_projections",
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: LoopCoderDecoderLayer(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
layer_idx=extract_layer_index(prefix),
),
prefix=f"{prefix}.layers",
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
for loop_idx in range(self.loop_num):
for layer_idx, layer in enumerate(
self.layers[self.start_layer : self.end_layer]
):
# Get the actual layer index (accounting for pipeline parallelism)
actual_layer_idx = self.start_layer + layer_idx
# Get gate_proj for this layer (only for loop_idx > 0)
gate_proj = (
self.gate_projections[actual_layer_idx] if loop_idx > 0 else None
)
hidden_states = layer(positions, hidden_states, loop_idx, gate_proj)
hidden_states = self.norm(hidden_states)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if "gate_projections" in name:
continue
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if name.endswith("scale"):
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
if weight_loader == default_weight_loader:
weight_loader(param, loaded_weight)
else:
weight_loader(param, loaded_weight, shard_id)
break
else:
if name.startswith("gate_projections."):
if name.endswith(".weight"):
vllm_name = name.replace(".weight", ".gate_proj.weight")
elif name.endswith(".bias"):
vllm_name = name.replace(".bias", ".gate_proj.bias")
else:
continue
if vllm_name in params_dict:
param = params_dict[vllm_name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(vllm_name)
continue
continue
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class IQuestLoopCoderForCausalLM(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = IQuestLoopCoderModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
self.logits_processor = LogitsProcessor(config.vocab_size)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/iquest_loopcoder.py",
"license": "Apache License 2.0",
"lines": 529,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/lfm2_vl.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import itertools
import math
from collections.abc import Iterable, Mapping, Sequence
from typing import Annotated, Literal
import torch
import torch.nn as nn
from transformers import BatchFeature
from transformers.activations import ACT2FN
from transformers.models.lfm2_vl import Lfm2VlProcessor
from transformers.models.lfm2_vl.configuration_lfm2_vl import Lfm2VlConfig
from transformers.models.lfm2_vl.image_processing_lfm2_vl_fast import (
Lfm2VlImageProcessorFast,
find_closest_aspect_ratio,
round_by_factor,
)
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.forward_context import set_forward_context
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import ImageProcessorItems, ImageSize, MultiModalDataItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdateDetails,
)
from vllm.renderers import TokenizeParams
from vllm.sequence import IntermediateTensors
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import (
IsHybrid,
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
)
from .lfm2_siglip2 import Siglip2Model
from .utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
from .vision import is_vit_use_data_parallel
class Lfm2VLImagePixelInputs(TensorSchema):
"""
Dimensions:
- b: Number of images in the prompt
- bn: Batch size * number of images
- d: Number of dimensions
- fd: Number of features per dimension
"""
type: Literal["pixel_values"] = "pixel_values"
pixel_values: Annotated[torch.Tensor, TensorShape("bn", "d", "fd")]
spatial_shapes: Annotated[torch.Tensor, TensorShape("bn", 2)]
num_patches: Annotated[torch.Tensor, TensorShape("b")]
LFM2VLImageInputs = Lfm2VLImagePixelInputs
class Lfm2VLProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(Lfm2VlConfig)
def get_hf_processor(self, **kwargs):
return self.ctx.get_hf_processor(Lfm2VlProcessor, **kwargs)
def get_image_processor(self, **kwargs: object) -> Lfm2VlImageProcessorFast:
return self.get_hf_processor(**kwargs).image_processor
def get_default_tok_params(self) -> TokenizeParams:
return super().get_default_tok_params().with_kwargs(add_special_tokens=False)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_image_size_with_most_features(self) -> ImageSize:
processor = self.get_image_processor()
max_image_tokens = processor.max_image_tokens
encoder_patch_size = processor.encoder_patch_size
downsample_factor = processor.downsample_factor
max_pixels = max_image_tokens * (encoder_patch_size**2) * (downsample_factor**2)
side = int(math.sqrt(max_pixels))
return ImageSize(width=side, height=side)
def _is_image_too_large(
self,
height: int,
width: int,
max_image_tokens: int,
encoder_patch_size: int,
downsample_factor: int,
max_pixels_tolerance: float,
) -> bool:
"""Check if the image is too large to be processed as one tile."""
total_factor = encoder_patch_size * downsample_factor
h_bar = max(encoder_patch_size, round_by_factor(height, total_factor))
w_bar = max(encoder_patch_size, round_by_factor(width, total_factor))
return (
h_bar * w_bar
> max_image_tokens
* encoder_patch_size**2
* downsample_factor**2
* max_pixels_tolerance
)
def smart_resize(
self,
height: int,
width: int,
downsample_factor: int,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
) -> tuple[int, int]:
total_factor = encoder_patch_size * downsample_factor
smart_resize_min_pixels = (
min_image_tokens * encoder_patch_size**2 * downsample_factor**2
)
smart_resize_max_pixels = (
max_image_tokens * encoder_patch_size**2 * downsample_factor**2
)
h_bar = max(total_factor, round_by_factor(height, total_factor))
w_bar = max(total_factor, round_by_factor(width, total_factor))
if h_bar * w_bar > smart_resize_max_pixels:
beta = math.sqrt((height * width) / smart_resize_max_pixels)
h_bar = max(
total_factor, math.floor(height / beta / total_factor) * total_factor
)
w_bar = max(
total_factor, math.floor(width / beta / total_factor) * total_factor
)
elif h_bar * w_bar < smart_resize_min_pixels:
beta = math.sqrt(smart_resize_min_pixels / (height * width))
h_bar = math.ceil(height * beta / total_factor) * total_factor
w_bar = math.ceil(width * beta / total_factor) * total_factor
return w_bar, h_bar
def _target_ratios(self, min_tiles: int, max_tiles: int) -> list[tuple[int, int]]:
ratios = [
(w, h)
for n in range(min_tiles, max_tiles + 1)
for w in range(1, n + 1)
for h in range(1, n + 1)
if min_tiles <= w * h <= max_tiles
]
return sorted(set(ratios), key=lambda x: x[0] * x[1])
def _get_grid_layout(
self,
height: int,
width: int,
min_tiles: int,
max_tiles: int,
tile_size: int,
) -> tuple[int, int, int]:
aspect_ratio = width / height
target_ratios = self._target_ratios(min_tiles, max_tiles)
# find best matching grid configuration
grid_width, grid_height = find_closest_aspect_ratio(
aspect_ratio, target_ratios, width, height, tile_size
)
total_patches = grid_width * grid_height
return grid_width, grid_height, total_patches
def _get_image_feature_grid_size(
self,
image_width: int,
image_height: int,
processor: Lfm2VlProcessor,
mm_kwargs: Mapping[str, object],
) -> tuple[int, int, int]:
image_processor: Lfm2VlImageProcessorFast = processor.image_processor
mm_kwargs = self.ctx.get_merged_mm_kwargs(mm_kwargs)
downsample_factor = mm_kwargs.get(
"downsample_factor", image_processor.downsample_factor
)
encoder_patch_size = mm_kwargs.get(
"encoder_patch_size", image_processor.encoder_patch_size
)
max_pixels_tolerance = mm_kwargs.get(
"max_pixels_tolerance", image_processor.max_pixels_tolerance
)
min_tiles = mm_kwargs.get("min_tiles", image_processor.min_tiles)
max_tiles = mm_kwargs.get("max_tiles", image_processor.max_tiles)
max_image_tokens = mm_kwargs.get(
"max_image_tokens", image_processor.max_image_tokens
)
tile_size = mm_kwargs.get("tile_size", image_processor.tile_size)
do_image_splitting = not min_tiles == max_tiles == 1
is_image_large = self._is_image_too_large(
height=image_height,
width=image_width,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
downsample_factor=downsample_factor,
max_pixels_tolerance=max_pixels_tolerance,
)
# Big image will be cropped into patches and small images are just resized
if is_image_large and do_image_splitting:
grid_width, grid_height, total_patches = self._get_grid_layout(
image_height,
image_width,
min_tiles=min_tiles,
max_tiles=max_tiles,
tile_size=tile_size,
)
else:
grid_width = grid_height = total_patches = 1
if grid_width * grid_height != 1: # Thumbnail
total_patches += 1
return grid_width, grid_height, total_patches
def get_num_patches(
self,
*,
image_width: int,
image_height: int,
processor: Lfm2VlProcessor,
mm_kwargs: Mapping[str, object],
) -> int:
_, _, total_patches = self._get_image_feature_grid_size(
image_width=image_width,
image_height=image_height,
processor=processor,
mm_kwargs=mm_kwargs,
)
return total_patches
def get_image_repl(
self,
image_width: int,
image_height: int,
spatial_shapes: torch.Tensor,
processor: Lfm2VlProcessor,
mm_kwargs: Mapping[str, object],
) -> str:
grid_placeholder = "<|img_row_{n_h}_col_{n_w}|>"
image_token = processor.image_token
image_start_token = processor.image_start_token
image_end_token = processor.image_end_token
image_thumbnail_token = processor.image_thumbnail_token
num_thumbnail_tokens, num_tokens_per_tile = self.get_num_image_tokens(
spatial_shapes=spatial_shapes,
processor=processor,
mm_kwargs=mm_kwargs,
)
tile_img_placeholder = grid_placeholder + (image_token * num_tokens_per_tile)
grid_w, grid_h, _ = self._get_image_feature_grid_size(
image_width=image_width,
image_height=image_height,
processor=processor,
mm_kwargs=mm_kwargs,
)
if grid_w > 1 or grid_h > 1:
tiles_placeholder: list[str] = [
tile_img_placeholder.format(n_h=i + 1, n_w=j + 1)
for i in range(grid_h)
for j in range(grid_w)
]
if num_thumbnail_tokens > 0:
tiles_placeholder.append(
image_thumbnail_token + (image_token * num_thumbnail_tokens)
)
else:
tiles_placeholder = [image_token * num_thumbnail_tokens]
placeholder = "".join(
itertools.chain([image_start_token], tiles_placeholder, [image_end_token])
)
return placeholder
def get_num_image_tokens(
self,
*,
spatial_shapes: torch.Tensor,
processor: Lfm2VlProcessor,
mm_kwargs: Mapping[str, object],
) -> tuple[int, int]:
image_processor: Lfm2VlImageProcessorFast = processor.image_processor
mm_kwargs = self.ctx.get_merged_mm_kwargs(mm_kwargs)
downsample_factor = mm_kwargs.get(
"downsample_factor", image_processor.downsample_factor
)
encoder_patch_size = mm_kwargs.get(
"encoder_patch_size", image_processor.encoder_patch_size
)
tile_size = mm_kwargs.get("tile_size", image_processor.tile_size)
num_thumbnail_tokens = spatial_shapes[-1].prod() // (downsample_factor**2)
num_patches_tile = tile_size // encoder_patch_size
dwn_num_patches_tile = math.ceil(num_patches_tile / downsample_factor)
num_tiles_tokens = dwn_num_patches_tile * dwn_num_patches_tile
return num_thumbnail_tokens, num_tiles_tokens
class Lfm2VLDummyInputsBuilder(BaseDummyInputsBuilder[Lfm2VLProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
processor = self.info.get_hf_processor()
image_token = processor.image_token
return image_token * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
target_width, target_height = self.info.get_image_size_with_most_features()
image_overrides = mm_options.get("image")
return {
"image": self._get_dummy_images(
width=target_width,
height=target_height,
num_images=num_images,
overrides=image_overrides,
),
}
class Lfm2VLMultiModalProcessor(BaseMultiModalProcessor[Lfm2VLProcessingInfo]):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
# Text-only input not supported in composite processor
if not (images := mm_data.get("images", [])):
prompt_ids = self.info.get_tokenizer().encode(
prompt, add_special_tokens=False
)
prompt_ids = self._apply_hf_processor_tokens_only(prompt_ids)
return BatchFeature(dict(input_ids=[prompt_ids]), tensor_type="pt")
processed_outputs = super()._call_hf_processor(
prompt,
mm_data,
mm_kwargs,
tok_kwargs,
)
mm_items = self.info.parse_mm_data({"image": images}, validate=False)
parsed_images = mm_items.get_items("image", ImageProcessorItems)
image_sizes = [
parsed_images.get_image_size(i) for i in range(len(parsed_images))
]
hf_processor = self.info.get_hf_processor(**mm_kwargs)
num_patches = [
self.info.get_num_patches(
image_width=size.width,
image_height=size.height,
processor=hf_processor,
mm_kwargs=mm_kwargs,
)
for size in image_sizes
]
processed_outputs["num_patches"] = torch.tensor(num_patches)
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
num_patches = hf_inputs.get("num_patches", torch.empty(0))
return dict[str, MultiModalFieldConfig](
pixel_values=MultiModalFieldConfig.flat_from_sizes("image", num_patches),
spatial_shapes=MultiModalFieldConfig.flat_from_sizes(
"image", num_patches, keep_on_cpu=True
),
num_patches=MultiModalFieldConfig.batched("image", keep_on_cpu=True),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptReplacement]:
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
image_token = hf_processor.image_token
def get_image_replacement_lfm2vl(item_idx: int):
images = mm_items.get_items("image", ImageProcessorItems)
image_size = images.get_image_size(item_idx)
out_item = out_mm_kwargs["image"][item_idx]
spatial_shapes = out_item["spatial_shapes"].data
assert isinstance(spatial_shapes, torch.Tensor)
image_repl = self.info.get_image_repl(
image_width=image_size.width,
image_height=image_size.height,
spatial_shapes=spatial_shapes,
processor=hf_processor,
mm_kwargs=hf_processor_mm_kwargs,
)
return PromptUpdateDetails.select_text(
image_repl,
embed_text=image_token,
)
return [
PromptReplacement(
modality="image",
target=image_token,
replacement=get_image_replacement_lfm2vl,
)
]
class Lfm2VLMultiModalProjector(nn.Module):
def __init__(
self,
config: Lfm2VlConfig,
prefix: str = "",
):
super().__init__()
self.use_data_parallel = is_vit_use_data_parallel()
in_channels = config.vision_config.hidden_size * (config.downsample_factor**2)
self.factor = config.downsample_factor
self.projector_use_layernorm = config.projector_use_layernorm
if self.projector_use_layernorm:
self.layer_norm = nn.LayerNorm(in_channels)
self.linear_1 = nn.Linear(
in_channels,
config.projector_hidden_size,
bias=config.projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.projector_hidden_size,
config.text_config.hidden_size,
bias=config.projector_bias,
)
def forward(
self,
vision_features_packed: torch.Tensor,
spatial_shapes: torch.Tensor,
) -> torch.Tensor:
"""Project packed vision features without materializing padded tensors.
Args:
vision_features_packed: (total_tokens, hidden_size) packed in tile order.
spatial_shapes: (num_tiles, 2) on CPU (height, width) per tile.
Returns:
projected_packed: (total_projected_tokens, text_hidden_size)
"""
assert spatial_shapes.device.type == "cpu", (
"Expected `spatial_shapes` on CPU to avoid device-to-host sync in "
"variable-length packing."
)
factor = self.factor
device = vision_features_packed.device
hidden_size = vision_features_packed.shape[-1]
spatial_shapes_list: list[list[int]] = spatial_shapes.tolist()
lengths_list = [h * w for h, w in spatial_shapes_list]
gather_idx_parts: list[torch.Tensor] = []
offset = 0
dh = torch.arange(factor, dtype=torch.int64)
dw = torch.arange(factor, dtype=torch.int64)
dh_grid, dw_grid = torch.meshgrid(dh, dw, indexing="ij")
dh_flat = dh_grid.reshape(-1)
dw_flat = dw_grid.reshape(-1)
for (height, width), length in zip(spatial_shapes_list, lengths_list):
if length <= 0:
continue
if height % factor != 0 or width % factor != 0:
raise ValueError(
"spatial_shapes must be divisible by downsample_factor: "
f"got ({height}, {width}) with factor={factor}."
)
height_out = height // factor
width_out = width // factor
rows_out = torch.arange(height_out, dtype=torch.int64)
cols_out = torch.arange(width_out, dtype=torch.int64)
rr, cc = torch.meshgrid(rows_out, cols_out, indexing="ij")
rr = rr.reshape(-1)
cc = cc.reshape(-1)
token_idx = (rr[:, None] * factor + dh_flat[None, :]) * width + (
cc[:, None] * factor + dw_flat[None, :]
)
gather_idx_parts.append(token_idx.reshape(-1) + offset)
offset += length
if gather_idx_parts:
gather_idx = torch.cat(gather_idx_parts).to(device=device)
gathered = vision_features_packed.index_select(0, gather_idx)
unshuffled = gathered.reshape(-1, factor * factor * hidden_size)
else:
unshuffled = vision_features_packed.new_empty(
(0, factor * factor * hidden_size)
)
if self.projector_use_layernorm:
unshuffled = self.layer_norm(unshuffled)
hidden_states = self.linear_1(unshuffled)
hidden_states = self.act(hidden_states)
projected_packed = self.linear_2(hidden_states)
return projected_packed
@MULTIMODAL_REGISTRY.register_processor(
Lfm2VLMultiModalProcessor,
info=Lfm2VLProcessingInfo,
dummy_inputs=Lfm2VLDummyInputsBuilder,
)
class Lfm2VLForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP, IsHybrid
):
merge_by_field_config = True
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"lm_head.": "language_model.lm_head.",
"model.language_model.": "language_model.model.",
"model.vision_tower.": "vision_tower.",
"model.multi_modal_projector.": "multi_modal_projector.",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
raise ValueError("Only image modality is supported")
@classmethod
def get_mamba_state_dtype_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[torch.dtype, ...]:
return MambaStateDtypeCalculator.short_conv_state_dtype(
vllm_config.model_config.dtype,
vllm_config.cache_config.mamba_cache_dtype,
)
@classmethod
def get_mamba_state_shape_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[tuple[int, int]]:
"""Calculate shapes for LFM2's convolutional cache.
Args:
vllm_config: vLLM config
Returns:
Tuple containing:
- conv_state_shape: Shape for convolutional state cache
"""
parallel_config = vllm_config.parallel_config
hf_language_config = vllm_config.model_config.hf_config.text_config
return MambaStateShapeCalculator.short_conv_state_shape(
tp_world_size=parallel_config.tensor_parallel_size,
intermediate_size=hf_language_config.hidden_size,
conv_kernel=hf_language_config.conv_L_cache,
)
@classmethod
def get_mamba_state_copy_func(cls) -> tuple[MambaStateCopyFunc]:
return MambaStateCopyFuncCalculator.short_conv_state_copy_func()
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
super().__init__()
config: Lfm2VlConfig = vllm_config.model_config.hf_config
multimodal_config = vllm_config.model_config.multimodal_config
vision_config = config.vision_config
quant_config = vllm_config.quant_config
self.config = config
self.vllm_config = vllm_config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
with self._mark_tower_model(vllm_config, "image"):
if vision_config.model_type == "siglip2_vision_model":
self.vision_tower = Siglip2Model(
config=vision_config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vision_tower"),
)
else:
raise ValueError(
f"Unsupported visual tokenizer type: {vision_config.model_type}"
)
self.multi_modal_projector = Lfm2VLMultiModalProjector(
config=config,
prefix=maybe_prefix(prefix, "multi_modal_projector"),
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language"),
architectures=config.text_config.architectures,
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _parse_and_validate_image_input(
self, **kwargs: object
) -> LFM2VLImageInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
spatial_shapes = kwargs.pop("spatial_shapes", None)
num_patches = kwargs.pop("num_patches", None)
if pixel_values is None:
return None
return LFM2VLImageInputs(
type="pixel_values",
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
num_patches=num_patches,
)
def image_pixels_to_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
) -> torch.Tensor:
assert spatial_shapes.device.type == "cpu", (
"Expected `spatial_shapes` on CPU to avoid device-to-host sync in "
"variable-length packing."
)
pixel_values = pixel_values.to(
dtype=self.vision_tower.vision_model.embeddings.patch_embedding.weight.dtype
) # fp16 compatibility
# LFM2-VL's HF processor pads patch sequences with trailing zeros.
# Pack patch tokens upfront so the vision tower runs entirely unpadded.
spatial_shapes_list: list[list[int]] = spatial_shapes.tolist()
lengths_list = [h * w for h, w in spatial_shapes_list]
total_tokens = int(sum(lengths_list))
lengths_cpu = (spatial_shapes[:, 0] * spatial_shapes[:, 1]).to(
dtype=torch.int32
)
max_seqlen = (
lengths_cpu.max().reshape(1)
if lengths_cpu.numel()
else torch.tensor([0], dtype=torch.int32)
)
if total_tokens == 0:
return []
packed_pixel_values = pixel_values.new_empty(
(total_tokens, pixel_values.shape[-1])
)
offset = 0
for i, length in enumerate(lengths_list):
if length <= 0:
continue
packed_pixel_values[offset : offset + length].copy_(
pixel_values[i, :length]
)
offset += length
packed_pixel_values = packed_pixel_values.unsqueeze(0)
lengths = torch.tensor(
lengths_list, dtype=torch.int32, device=pixel_values.device
)
cu_seqlens = torch.zeros(
lengths.shape[0] + 1,
dtype=torch.int32,
device=pixel_values.device,
)
cu_seqlens[1:] = torch.cumsum(lengths, dim=0)
with set_forward_context(None, self.vllm_config):
vision_outputs = self.vision_tower(
pixel_values_packed=packed_pixel_values,
spatial_shapes=spatial_shapes,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
image_outputs_packed = getattr(
vision_outputs, "last_hidden_state", vision_outputs
)
vision_features_packed = image_outputs_packed[0]
factor = self.multi_modal_projector.factor
projected_lengths_list: list[int] = []
for (height, width), length in zip(spatial_shapes_list, lengths_list):
if length <= 0:
projected_lengths_list.append(0)
continue
if height % factor != 0 or width % factor != 0:
raise ValueError(
"spatial_shapes must be divisible by downsample_factor: "
f"got ({height}, {width}) with factor={factor}."
)
projected_lengths_list.append((height // factor) * (width // factor))
projected_packed = self.multi_modal_projector(
vision_features_packed=vision_features_packed,
spatial_shapes=spatial_shapes,
)
image_features: list[torch.Tensor] = []
offset = 0
for out_len in projected_lengths_list:
image_features.append(projected_packed[offset : offset + out_len])
offset += out_len
return image_features
def _process_image_input(
self,
image_input: LFM2VLImageInputs,
) -> torch.Tensor | list[torch.Tensor]:
pixel_values = image_input["pixel_values"]
spatial_shapes = image_input["spatial_shapes"]
num_patches = image_input["num_patches"]
image_features = self.image_pixels_to_features(
pixel_values,
spatial_shapes=spatial_shapes,
)
# Group patches by image - num_patches is on CPU (keep_on_cpu=True)
# so .tolist() is instant with no DtoH sync
num_patches_list = num_patches.tolist()
batched_features: list[torch.Tensor] = []
patch_idx = 0
for count in num_patches_list:
# Slice the list of patch tensors for this image
image_patches = image_features[patch_idx : patch_idx + count]
# Concatenate patches for this image
batched_features.append(torch.cat(image_patches, dim=0))
patch_idx += count
return batched_features
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return []
return self._process_image_input(image_input)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="multi_modal_projector",
tower_model="vision_tower",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/lfm2_vl.py",
"license": "Apache License 2.0",
"lines": 728,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/generation/test_grok.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from ...utils import dummy_hf_overrides
MODELS = ["xai-org/grok-2"]
def _grok2_dummy_overrides(hf_config):
hf_config = dummy_hf_overrides(hf_config, model_arch="Grok1ForCausalLM")
text_config = hf_config.get_text_config()
text_config.update(
{
"hidden_size": 256,
"intermediate_size": 512,
"moe_intermediate_size": 256,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"head_dim": 64,
}
)
return hf_config
@pytest.mark.parametrize("model", MODELS)
def test_dummy_generate(vllm_runner, monkeypatch, model: str) -> None:
with monkeypatch.context() as m:
m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
with vllm_runner(
model,
load_format="dummy",
max_model_len=128,
hf_overrides=_grok2_dummy_overrides,
enforce_eager=True,
) as llm:
prompt = "Hello from Grok-2"
tokenizer = llm.get_llm().get_tokenizer()
prompt_len = len(tokenizer.encode(prompt))
outputs = llm.generate_greedy([prompt], max_tokens=1)
output_ids, output_str = outputs[0]
assert len(output_ids) > prompt_len
assert output_str is not None
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/generation/test_grok.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/tokenizers/grok2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tokenizer for Grok-2 .tok.json format."""
import functools
import json
from collections.abc import Collection, Set
from pathlib import Path
from typing import Any, Literal, overload
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import (
EntryNotFoundError,
HfHubHTTPError,
RepositoryNotFoundError,
RevisionNotFoundError,
)
from transformers import BatchEncoding
from transformers.utils import chat_template_utils as hf_chat_utils
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
from vllm.logger import init_logger
from .protocol import TokenizerLike
logger = init_logger(__name__)
PAD = "<|pad|>"
EOS = "<|eos|>"
SEP = "<|separator|>"
RESERVED_TOKEN_TEXTS = [f"<|reserved_{i}|>" for i in range(3, 128)]
CONTROL_TOKEN_TEXTS = [f"<|control{i}|>" for i in range(1, 705)]
DEFAULT_SPECIAL_TOKENS = [PAD, SEP, EOS]
DEFAULT_CONTROL_TOKENS = {"pad": PAD, "sep": SEP, "eos": EOS}
DEFAULT_CHAT_TEMPLATE = (
"{% for message in messages %}"
"{% if message['role'] == 'user' %}"
"{{ 'Human: ' + message['content'].strip() + '<|separator|>\\n\\n' }}"
"{% elif message['role'] == 'system' %}"
"{{ 'System: ' + message['content'].strip() + '<|separator|>\\n\\n' }}"
"{% elif message['role'] == 'assistant' %}"
"{{ 'Assistant: ' + message['content'] + '<|separator|>\\n\\n' }}"
"{% endif %}"
"{% endfor %}"
"{% if add_generation_prompt %}"
"{{ 'Assistant:' }}"
"{% endif %}"
)
# Default + separate each single digit.
PAT_STR_B = (
r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}|"""
r""" ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
)
def _maybe_load_tokenizer_config(
model_path: Path,
*,
repo_id: str | None,
revision: str | None,
download_dir: str | None,
) -> dict[str, Any]:
config_path = model_path / "tokenizer_config.json"
if config_path.is_file():
with config_path.open("r", encoding="utf-8") as f:
return json.load(f)
if repo_id is None:
return {}
try:
config_file = hf_hub_download(
repo_id=repo_id,
filename="tokenizer_config.json",
revision=revision,
cache_dir=download_dir,
)
except (RepositoryNotFoundError, RevisionNotFoundError, EntryNotFoundError):
# If the repo, revision, or file does not exist, fall back silently.
return {}
except HfHubHTTPError as exc:
logger.warning(
"Failed to download tokenizer_config.json from %s. "
"This may be due to a network or authentication issue. "
"The default chat template will be used. Error: %s",
repo_id,
exc,
)
return {}
try:
with Path(config_file).open("r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError as exc:
logger.warning(
"Failed to parse tokenizer_config.json. "
"The default chat template will be used. Error: %s",
exc,
)
return {}
except OSError as exc:
logger.warning(
"Failed to open tokenizer_config.json. "
"The default chat template will be used. Error: %s",
exc,
)
return {}
def _load_tiktoken_encoding(
vocab_file: Path,
) -> tuple[Any, dict[str, int]]:
try:
import tiktoken
except ImportError as exc:
raise ImportError("Grok-2 tokenizer requires the `tiktoken` package.") from exc
with vocab_file.open("rb") as f:
xtok_dict = json.load(f)
mergeable_ranks = {
bytes(item["bytes"]): item["token"]
for item in xtok_dict.get("regular_tokens", [])
}
special_tokens = {
bytes(item["bytes"]).decode("utf-8", errors="replace"): item["token"]
for item in xtok_dict.get("special_tokens", [])
}
if xtok_dict.get("word_split") == "V1":
pat_str = PAT_STR_B
else:
raise ValueError(f"Unknown word_split: {xtok_dict.get('word_split')!r}")
pat_str = xtok_dict.get("pat_str", pat_str)
kwargs = {
"name": str(vocab_file),
"pat_str": pat_str,
"mergeable_ranks": mergeable_ranks,
"special_tokens": special_tokens,
}
if "vocab_size" in xtok_dict:
kwargs["explicit_n_vocab"] = xtok_dict["vocab_size"]
tokenizer = tiktoken.Encoding(**kwargs)
default_allowed_special: set[str] | None = None
if "default_allowed_special" in xtok_dict:
default_allowed_special = {
bytes(bytes_list).decode("utf-8", errors="replace")
for bytes_list in xtok_dict["default_allowed_special"]
}
tokenizer._default_allowed_special = default_allowed_special or set()
tokenizer._control_tokens = DEFAULT_CONTROL_TOKENS
def encode_patched(
self,
text: str,
*,
allowed_special: Literal["all"] | Set[str] = set(),
disallowed_special: Literal["all"] | Collection[str] = "all",
) -> list[int]:
del disallowed_special
if isinstance(allowed_special, set):
allowed_special |= self._default_allowed_special
return tiktoken.Encoding.encode(
self,
text,
allowed_special=allowed_special,
disallowed_special=(),
)
tokenizer.encode = functools.partial(encode_patched, tokenizer)
tokenizer._default_allowed_special |= set(DEFAULT_CONTROL_TOKENS.values())
tokenizer._default_allowed_special |= set(
CONTROL_TOKEN_TEXTS + RESERVED_TOKEN_TEXTS
)
return tokenizer, special_tokens
class Grok2Tokenizer(TokenizerLike):
@classmethod
def from_pretrained(
cls,
path_or_repo_id: str | Path,
*args,
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> "Grok2Tokenizer":
if args:
logger.debug_once("Ignoring extra positional args for Grok2Tokenizer.")
path = Path(path_or_repo_id)
if path.is_file():
vocab_file = path
model_path = path.parent
repo_id = None
elif path.is_dir():
vocab_file = path / "tokenizer.tok.json"
model_path = path
repo_id = None
else:
vocab_file = Path(
hf_hub_download(
repo_id=str(path_or_repo_id),
filename="tokenizer.tok.json",
revision=revision,
cache_dir=download_dir,
)
)
model_path = vocab_file.parent
repo_id = str(path_or_repo_id)
if not vocab_file.is_file():
raise FileNotFoundError(f"tokenizer.tok.json not found at {vocab_file}.")
config = _maybe_load_tokenizer_config(
model_path,
repo_id=repo_id,
revision=revision,
download_dir=download_dir,
)
return cls(
vocab_file=vocab_file,
name_or_path=str(path_or_repo_id),
truncation_side=kwargs.get("truncation_side", "left"),
chat_template=config.get("chat_template"),
init_kwargs=config,
)
def __init__(
self,
*,
vocab_file: Path,
name_or_path: str,
truncation_side: str,
chat_template: str | None,
init_kwargs: dict[str, Any] | None = None,
) -> None:
super().__init__()
self.name_or_path = name_or_path
self._truncation_side = truncation_side
self.init_kwargs = init_kwargs or {}
self._chat_template = chat_template or DEFAULT_CHAT_TEMPLATE
self._tokenizer, self._special_tokens = _load_tiktoken_encoding(vocab_file)
self._token_to_id: dict[str, int] = {}
self._id_to_token: dict[int, str] = {}
for token, token_id in self._tokenizer._mergeable_ranks.items():
token_str = token.decode("utf-8", errors="replace")
self._token_to_id[token_str] = token_id
self._id_to_token[token_id] = token_str
for token, token_id in self._special_tokens.items():
self._token_to_id[token] = token_id
self._id_to_token[token_id] = token
bos_token_id = self._special_tokens.get(SEP)
if bos_token_id is None:
bos_token_id = self._special_tokens.get(PAD)
if bos_token_id is None:
bos_token_id = self._special_tokens.get(EOS)
if bos_token_id is None:
bos_token_id = 0
self._bos_token_id = bos_token_id
self._eos_token_id = self._special_tokens.get(EOS, self._bos_token_id)
self._pad_token_id = self._special_tokens.get(PAD, self._eos_token_id)
self._unk_token_id = self._pad_token_id
self._max_chars_per_token = max(len(tok) for tok in self._token_to_id)
def num_special_tokens_to_add(self) -> int:
return 0
@property
def all_special_tokens(self) -> list[str]:
return list(self._special_tokens.keys())
@property
def all_special_ids(self) -> list[int]:
return list(self._special_tokens.values())
@property
def bos_token_id(self) -> int:
return self._bos_token_id
@property
def eos_token_id(self) -> int:
return self._eos_token_id
@property
def pad_token_id(self) -> int:
return self._pad_token_id
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
return self._tokenizer.n_vocab
@property
def max_token_id(self) -> int:
return self._tokenizer.n_vocab - 1
@property
def max_chars_per_token(self) -> int:
return self._max_chars_per_token
@property
def truncation_side(self) -> str:
return self._truncation_side
def get_vocab(self) -> dict[str, int]:
return dict(self._token_to_id)
def get_added_vocab(self) -> dict[str, int]:
return dict(self._special_tokens)
def _maybe_truncate(self, tokens: list[int], max_length: int | None) -> list[int]:
if max_length is None or len(tokens) <= max_length:
return tokens
if self.truncation_side == "left":
return tokens[-max_length:]
return tokens[:max_length]
def encode(
self,
text: str,
truncation: bool | None = None,
max_length: int | None = None,
add_special_tokens: bool = True,
) -> list[int]:
del add_special_tokens
tokens = self._tokenizer.encode(text)
if truncation:
tokens = self._maybe_truncate(tokens, max_length)
return tokens
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
if isinstance(ids, int):
ids = [ids]
if skip_special_tokens:
ids = [
token_id
for token_id in ids
if token_id not in self._special_tokens.values()
]
return self._tokenizer.decode(ids)
@overload
def convert_tokens_to_ids(self, tokens: str) -> int: ...
@overload
def convert_tokens_to_ids(self, tokens: list[str]) -> list[int]: ...
def convert_tokens_to_ids(self, tokens: str | list[str]) -> int | list[int]:
if isinstance(tokens, str):
return self._token_to_id.get(tokens, self._unk_token_id)
return [self._token_to_id.get(token, self._unk_token_id) for token in tokens]
def convert_ids_to_tokens(
self, ids: list[int], skip_special_tokens: bool = False
) -> list[str]:
tokens = []
for token_id in ids:
if skip_special_tokens and token_id in self._special_tokens.values():
continue
tokens.append(self._id_to_token.get(token_id, "<|unk|>"))
return tokens
def convert_tokens_to_string(self, tokens: list[str]) -> str:
token_ids = self.convert_tokens_to_ids(tokens)
return self.decode(token_ids, skip_special_tokens=False)
def __call__(
self,
text: str | list[str],
text_pair: str | None = None,
add_special_tokens: bool = True,
truncation: bool = False,
max_length: int | None = None,
) -> BatchEncoding:
if text_pair is not None:
raise NotImplementedError("text_pair is not supported for Grok2Tokenizer.")
if isinstance(text, list):
input_ids_batch: list[list[int]] = [
self.encode(
item,
truncation=truncation,
max_length=max_length,
add_special_tokens=add_special_tokens,
)
for item in text
]
attention_mask_batch = [[1] * len(ids) for ids in input_ids_batch]
return BatchEncoding(
{"input_ids": input_ids_batch, "attention_mask": attention_mask_batch}
)
input_ids = self.encode(
text,
truncation=truncation,
max_length=max_length,
add_special_tokens=add_special_tokens,
)
attention_mask = [1] * len(input_ids)
return BatchEncoding({"input_ids": input_ids, "attention_mask": attention_mask})
def get_chat_template(
self, chat_template: str | None, tools: list[dict[str, Any]] | None = None
) -> str | None:
del tools
return chat_template or self._chat_template
def apply_chat_template(
self,
messages: list[ChatCompletionMessageParam],
tools: list[dict[str, Any]] | None = None,
chat_template: str | None = None,
tokenize: bool = False,
**kwargs,
) -> str | list[int]:
template = self.get_chat_template(chat_template, tools=tools)
if template is None:
raise ValueError(
"No chat template available. Provide `chat_template` explicitly."
)
kwargs["return_dict"] = False
prompt = hf_chat_utils.apply_chat_template(
conversation=messages,
chat_template=template,
tools=tools,
**kwargs,
)
if tokenize:
return self.encode(prompt, add_special_tokens=False)
return prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/tokenizers/grok2.py",
"license": "Apache License 2.0",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/test_grpc_server.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
End-to-end tests for the vLLM gRPC server.
"""
import asyncio
import socket
import subprocess
import sys
import time
import grpc
import pytest
import pytest_asyncio
from vllm.grpc import vllm_engine_pb2, vllm_engine_pb2_grpc
# Use a small model for fast testing
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
def find_free_port() -> int:
"""Find a free port on localhost."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
return port
async def wait_for_server(port: int, timeout: float = 60.0) -> bool:
"""Wait for the gRPC server to be ready by trying health checks."""
start_time = time.time()
print("waiting for server to start...")
while time.time() - start_time < timeout:
try:
channel = grpc.aio.insecure_channel(f"localhost:{port}")
stub = vllm_engine_pb2_grpc.VllmEngineStub(channel)
request = vllm_engine_pb2.HealthCheckRequest()
response = await stub.HealthCheck(request, timeout=5.0)
await channel.close()
if response.healthy:
print("server returned healthy=True")
return True
except Exception:
await asyncio.sleep(0.5)
return False
class GrpcServerProcess:
"""Manages a gRPC server running in a subprocess."""
def __init__(self):
self.process: subprocess.Popen | None = None
self.port: int | None = None
async def start(self):
"""Start the gRPC server process."""
self.port = find_free_port()
# Start the server as a subprocess
self.process = subprocess.Popen(
[
sys.executable,
"-m",
"vllm.entrypoints.grpc_server",
"--model",
MODEL_NAME,
"--host",
"localhost",
"--port",
str(self.port),
"--max-num-batched-tokens",
"512",
"--disable-log-stats-server",
],
)
# Wait for server to be ready
if not await wait_for_server(self.port):
self.stop()
raise RuntimeError("gRPC server failed to start within timeout")
def stop(self):
"""Stop the gRPC server process."""
if self.process:
self.process.terminate()
try:
self.process.wait(timeout=10)
except subprocess.TimeoutExpired:
self.process.kill()
self.process.wait()
@pytest_asyncio.fixture(scope="module")
async def grpc_server():
"""Fixture providing a running gRPC server in a subprocess."""
server = GrpcServerProcess()
await server.start()
yield server
server.stop()
@pytest_asyncio.fixture
async def grpc_client(grpc_server):
"""Fixture providing a gRPC client connected to the server."""
channel = grpc.aio.insecure_channel(f"localhost:{grpc_server.port}")
stub = vllm_engine_pb2_grpc.VllmEngineStub(channel)
yield stub
await channel.close()
@pytest.mark.asyncio
async def test_health_check(grpc_client):
"""Test the HealthCheck RPC."""
request = vllm_engine_pb2.HealthCheckRequest()
response = await grpc_client.HealthCheck(request)
assert response.healthy is True
assert response.message == "Health"
@pytest.mark.asyncio
async def test_get_model_info(grpc_client):
"""Test the GetModelInfo RPC."""
request = vllm_engine_pb2.GetModelInfoRequest()
response = await grpc_client.GetModelInfo(request)
assert response.model_path == MODEL_NAME
assert response.is_generation is True
assert response.max_context_length > 0
assert response.vocab_size > 0
assert response.supports_vision is False
@pytest.mark.asyncio
async def test_get_server_info(grpc_client):
"""Test the GetServerInfo RPC."""
request = vllm_engine_pb2.GetServerInfoRequest()
response = await grpc_client.GetServerInfo(request)
assert response.active_requests >= 0
assert response.is_paused is False
assert response.uptime_seconds >= 0
assert response.server_type == "vllm-grpc"
assert response.last_receive_timestamp > 0
@pytest.mark.asyncio
async def test_generate_non_streaming(grpc_client):
"""Test the Generate RPC in non-streaming mode."""
# Create a simple request
request = vllm_engine_pb2.GenerateRequest(
request_id="test-non-streaming-1",
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello, my name is",
input_ids=[15496, 11, 616, 1438, 318], # GPT-2 tokens for the prompt
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0,
max_tokens=10,
n=1,
),
stream=False,
)
# Collect all responses
responses = []
async for response in grpc_client.Generate(request):
responses.append(response)
# Should have exactly one response (complete)
assert len(responses) == 1
# Check the response
final_response = responses[0]
assert final_response.HasField("complete")
complete = final_response.complete
assert len(complete.output_ids) > 0
assert complete.finish_reason in ["stop", "length"]
assert complete.prompt_tokens > 0
assert complete.completion_tokens > 0
@pytest.mark.asyncio
async def test_generate_streaming(grpc_client):
"""Test the Generate RPC in streaming mode."""
request = vllm_engine_pb2.GenerateRequest(
request_id="test-streaming-1",
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="The capital of France is",
input_ids=[464, 3139, 286, 4881, 318], # GPT-2 tokens
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0, max_tokens=10, n=1
),
stream=True,
)
# Collect all responses
chunks = []
complete_response = None
async for response in grpc_client.Generate(request):
if response.HasField("chunk"):
chunks.append(response.chunk)
elif response.HasField("complete"):
complete_response = response.complete
# Should have received some chunks
assert len(chunks) >= 0 # May have 0 chunks if generation is very fast
# Should have a final complete response
assert complete_response is not None
assert complete_response.finish_reason in ["stop", "length"]
assert complete_response.prompt_tokens > 0
# Verify chunk structure
for chunk in chunks:
assert chunk.prompt_tokens > 0
assert chunk.completion_tokens >= 0
@pytest.mark.asyncio
async def test_generate_with_different_sampling_params(grpc_client):
"""Test Generate with various sampling parameters."""
# Test with temperature
request = vllm_engine_pb2.GenerateRequest(
request_id="test-sampling-temp",
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello",
input_ids=[15496],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.8, top_p=0.95, max_tokens=5
),
stream=False,
)
responses = [r async for r in grpc_client.Generate(request)]
assert len(responses) == 1
assert responses[0].HasField("complete")
# Test with top_k
request = vllm_engine_pb2.GenerateRequest(
request_id="test-sampling-topk",
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello",
input_ids=[15496],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=1.0, top_k=50, max_tokens=5
),
stream=False,
)
responses = [r async for r in grpc_client.Generate(request)]
assert len(responses) == 1
assert responses[0].HasField("complete")
@pytest.mark.asyncio
async def test_generate_with_stop_strings(grpc_client):
"""Test Generate with stop strings."""
request = vllm_engine_pb2.GenerateRequest(
request_id="test-stop-strings",
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello",
input_ids=[15496],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0,
max_tokens=20,
stop=["\n", "END"],
),
stream=False,
)
responses = [r async for r in grpc_client.Generate(request)]
assert len(responses) == 1
assert responses[0].HasField("complete")
complete = responses[0].complete
assert complete.finish_reason in ["stop", "length"]
@pytest.mark.asyncio
async def test_generate_multiple_requests(grpc_client):
"""Test handling multiple concurrent Generate requests."""
async def make_request(request_id: str):
request = vllm_engine_pb2.GenerateRequest(
request_id=request_id,
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello",
input_ids=[15496],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0, max_tokens=5
),
stream=False,
)
responses = [r async for r in grpc_client.Generate(request)]
return responses[0]
# Send multiple requests concurrently
tasks = [make_request(f"test-concurrent-{i}") for i in range(3)]
responses = await asyncio.gather(*tasks)
# Verify all requests completed successfully
assert len(responses) == 3
for i, response in enumerate(responses):
assert response.HasField("complete")
@pytest.mark.asyncio
async def test_generate_with_seed(grpc_client):
"""Test Generate with a fixed seed for reproducibility."""
def make_request(request_id: str, seed: int):
return vllm_engine_pb2.GenerateRequest(
request_id=request_id,
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="The future of AI is",
input_ids=[464, 2003, 286, 9552, 318],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=1.0, max_tokens=10, seed=seed
),
stream=False,
)
# Make two requests with the same seed
request1 = make_request("test-seed-1", 42)
request2 = make_request("test-seed-2", 42)
response_list1 = [r async for r in grpc_client.Generate(request1)]
response_list2 = [r async for r in grpc_client.Generate(request2)]
# Both should complete successfully
assert len(response_list1) == 1
assert len(response_list2) == 1
assert response_list1[0].HasField("complete")
assert response_list2[0].HasField("complete")
# With the same seed, outputs should be identical
output_ids1 = list(response_list1[0].complete.output_ids)
output_ids2 = list(response_list2[0].complete.output_ids)
assert output_ids1 == output_ids2
@pytest.mark.asyncio
async def test_generate_error_handling(grpc_client):
"""Test error handling in Generate RPC."""
# Request with invalid top_p value (-33)
request = vllm_engine_pb2.GenerateRequest(
request_id="test-error-invalid-topp",
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0, max_tokens=10, top_p=-33
),
stream=False,
)
# Should raise an error response
with pytest.raises(grpc.RpcError) as exc_info:
_ = [r async for r in grpc_client.Generate(request)]
assert exc_info.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert "top_p must be in (0, 1], got -33.0" in exc_info.value.details()
@pytest.mark.asyncio
async def test_abort_request(grpc_client):
"""Test the out-of-band Abort RPC."""
request_id = "test-abort-1"
# Start a long-running streaming generate request
generate_request = vllm_engine_pb2.GenerateRequest(
request_id=request_id,
tokenized=vllm_engine_pb2.TokenizedInput(
original_text="Hello",
input_ids=[15496],
),
sampling_params=vllm_engine_pb2.SamplingParams(
temperature=0.0,
min_tokens=500,
max_tokens=500, # Request many tokens to ensure it runs long enough
),
stream=True,
)
# Track whether we were aborted
was_aborted = False
received_chunks = 0
async def run_generate():
nonlocal was_aborted, received_chunks
async for response in grpc_client.Generate(generate_request):
if response.HasField("chunk"):
received_chunks += 1
if response.HasField("complete"):
complete = response.complete
was_aborted = complete.finish_reason == "abort"
else:
was_aborted = False
async def abort_after_delay():
# Small delay to ensure generate has started
await asyncio.sleep(0.1)
abort_request = vllm_engine_pb2.AbortRequest(request_ids=[request_id])
await grpc_client.Abort(abort_request)
# Run generate and abort concurrently
await asyncio.gather(run_generate(), abort_after_delay())
# The request should have been aborted (received final chunk with
# "abort" finish reason) and finished early due to the abort.
assert was_aborted and received_chunks < 500, (
"Request should have been aborted before generating all 500 tokens"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/test_grpc_server.py",
"license": "Apache License 2.0",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/entrypoints/grpc_server.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# mypy: ignore-errors
"""
vLLM gRPC Server
Starts a gRPC server for vLLM using the VllmEngine protocol.
Usage:
python -m vllm.entrypoints.grpc_server --model <model_path>
Example:
python -m vllm.entrypoints.grpc_server \
--model meta-llama/Llama-2-7b-hf \
--host 0.0.0.0 \
--port 50051
"""
import argparse
import asyncio
import signal
import sys
import time
from collections.abc import AsyncGenerator
import grpc
import uvloop
from grpc_reflection.v1alpha import reflection
from vllm import SamplingParams, TextPrompt, TokensPrompt
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.entrypoints.utils import log_version_and_model
from vllm.grpc import vllm_engine_pb2, vllm_engine_pb2_grpc
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import RequestOutputKind, StructuredOutputsParams
from vllm.usage.usage_lib import UsageContext
from vllm.utils.argparse_utils import FlexibleArgumentParser
from vllm.v1.engine.async_llm import AsyncLLM
from vllm.version import __version__ as VLLM_VERSION
logger = init_logger(__name__)
class VllmEngineServicer(vllm_engine_pb2_grpc.VllmEngineServicer):
"""
gRPC servicer implementing the VllmEngine service.
Handles 6 RPCs:
- Generate: Streaming text generation
- Embed: Embeddings (TODO)
- HealthCheck: Health probe
- Abort: Cancel requests out-of-band
- GetModelInfo: Model metadata
- GetServerInfo: Server state
"""
def __init__(self, async_llm: AsyncLLM, start_time: float):
"""
Initialize the servicer.
Args:
async_llm: The AsyncLLM instance
start_time: The server start time, in seconds since epoch
"""
self.async_llm = async_llm
self.start_time = start_time
logger.info("VllmEngineServicer initialized")
async def Generate(
self,
request: vllm_engine_pb2.GenerateRequest,
context: grpc.aio.ServicerContext,
) -> AsyncGenerator[vllm_engine_pb2.GenerateResponse, None]:
"""
Handle streaming generation requests.
Args:
request: The GenerateRequest protobuf
context: gRPC context
Yields:
GenerateResponse protobuf messages (streaming)
"""
request_id = request.request_id
logger.debug("Generate request %s received.", request_id)
try:
# Extract tokenized input
if request.WhichOneof("input") == "tokenized":
prompt: TokensPrompt = {
"prompt_token_ids": list(request.tokenized.input_ids)
}
if request.tokenized.original_text:
prompt["prompt"] = request.tokenized.original_text
else:
prompt: TextPrompt = {"prompt": request.text}
# Build sampling params with detokenize=False
sampling_params = self._sampling_params_from_proto(
request.sampling_params, stream=request.stream
)
tokenization_kwargs = self._tokenization_kwargs_from_proto(
request.sampling_params
)
async for output in self.async_llm.generate(
prompt=prompt,
sampling_params=sampling_params,
request_id=request_id,
tokenization_kwargs=tokenization_kwargs,
):
# Convert vLLM output to protobuf
# For streaming, always send chunks
if request.stream:
yield self._chunk_response(output)
# Send complete response when finished
if output.finished:
yield self._complete_response(output)
except ValueError as e:
# Invalid request error (equiv to 400).
await context.abort(grpc.StatusCode.INVALID_ARGUMENT, str(e))
except Exception as e:
logger.exception("Error in Generate for request %s", request_id)
await context.abort(grpc.StatusCode.INTERNAL, str(e))
async def Embed(
self,
request: vllm_engine_pb2.EmbedRequest,
context: grpc.aio.ServicerContext,
) -> vllm_engine_pb2.EmbedResponse:
"""
Handle embedding requests.
TODO: Implement in Phase 4
Args:
request: The EmbedRequest protobuf
context: gRPC context
Returns:
EmbedResponse protobuf
"""
logger.warning("Embed RPC not yet implemented")
await context.abort(
grpc.StatusCode.UNIMPLEMENTED, "Embed RPC not yet implemented"
)
async def HealthCheck(
self,
request: vllm_engine_pb2.HealthCheckRequest,
context: grpc.aio.ServicerContext,
) -> vllm_engine_pb2.HealthCheckResponse:
"""
Handle health check requests.
Args:
request: The HealthCheckRequest protobuf
context: gRPC context
Returns:
HealthCheckResponse protobuf
"""
is_healthy = not self.async_llm.errored
message = "Health" if is_healthy else "Engine is not alive"
logger.debug("HealthCheck request: healthy=%s, message=%s", is_healthy, message)
return vllm_engine_pb2.HealthCheckResponse(healthy=is_healthy, message=message)
async def Abort(
self,
request: vllm_engine_pb2.AbortRequest,
context: grpc.aio.ServicerContext,
) -> vllm_engine_pb2.AbortResponse:
"""
Out-of-band abort requests.
Args:
request: The AbortRequest protobuf
context: gRPC context
Returns:
AbortResponse protobuf
"""
request_ids = request.request_ids
logger.debug("Abort requests: %s", request_ids)
await self.async_llm.abort(request_ids)
return vllm_engine_pb2.AbortResponse()
async def GetModelInfo(
self,
request: vllm_engine_pb2.GetModelInfoRequest,
context: grpc.aio.ServicerContext,
) -> vllm_engine_pb2.GetModelInfoResponse:
"""
Handle model info requests.
Args:
request: The GetModelInfoRequest protobuf
context: gRPC context
Returns:
GetModelInfoResponse protobuf
"""
model_config = self.async_llm.model_config
return vllm_engine_pb2.GetModelInfoResponse(
model_path=model_config.model,
is_generation=model_config.runner_type == "generate",
max_context_length=model_config.max_model_len,
vocab_size=model_config.get_vocab_size(),
supports_vision=model_config.is_multimodal_model,
)
async def GetServerInfo(
self,
request: vllm_engine_pb2.GetServerInfoRequest,
context: grpc.aio.ServicerContext,
) -> vllm_engine_pb2.GetServerInfoResponse:
"""
Handle server info requests.
Args:
request: The GetServerInfoRequest protobuf
context: gRPC context
Returns:
GetServerInfoResponse protobuf
"""
num_requests = self.async_llm.output_processor.get_num_unfinished_requests()
return vllm_engine_pb2.GetServerInfoResponse(
active_requests=num_requests,
is_paused=False, # TODO
last_receive_timestamp=time.time(), # TODO looks wrong?
uptime_seconds=time.time() - self.start_time,
server_type="vllm-grpc",
)
# ========== Helper methods ==========
@staticmethod
def _sampling_params_from_proto(
params: vllm_engine_pb2.SamplingParams, stream: bool = True
) -> SamplingParams:
"""
Convert protobuf SamplingParams to vLLM SamplingParams.
Args:
params: Protobuf SamplingParams message
stream: Whether streaming is enabled
Returns:
vLLM SamplingParams with detokenize=False and structured_outputs
"""
# Build stop sequences
stop = list(params.stop) if params.stop else None
stop_token_ids = list(params.stop_token_ids) if params.stop_token_ids else None
# Handle structured outputs constraints
structured_outputs = None
constraint_field = params.WhichOneof("constraint")
if constraint_field:
if constraint_field == "json_schema":
structured_outputs = StructuredOutputsParams(json=params.json_schema)
elif constraint_field == "regex":
structured_outputs = StructuredOutputsParams(regex=params.regex)
elif constraint_field == "grammar":
structured_outputs = StructuredOutputsParams(grammar=params.grammar)
elif constraint_field == "structural_tag":
structured_outputs = StructuredOutputsParams(
structural_tag=params.structural_tag
)
elif constraint_field == "json_object":
structured_outputs = StructuredOutputsParams(
json_object=params.json_object
)
elif constraint_field == "choice":
structured_outputs = StructuredOutputsParams(
choice=list(params.choice.choices)
)
# Create SamplingParams
# output_kind=DELTA: Return only new tokens in each chunk (for streaming)
return SamplingParams(
temperature=params.temperature if params.HasField("temperature") else 1.0,
top_p=params.top_p if params.top_p != 0.0 else 1.0,
top_k=params.top_k,
min_p=params.min_p,
frequency_penalty=params.frequency_penalty,
presence_penalty=params.presence_penalty,
repetition_penalty=params.repetition_penalty
if params.repetition_penalty != 0.0
else 1.0,
max_tokens=params.max_tokens if params.HasField("max_tokens") else None,
min_tokens=params.min_tokens,
stop=stop,
stop_token_ids=stop_token_ids,
skip_special_tokens=params.skip_special_tokens,
spaces_between_special_tokens=params.spaces_between_special_tokens,
ignore_eos=params.ignore_eos,
n=params.n if params.n > 0 else 1,
logprobs=params.logprobs if params.HasField("logprobs") else None,
prompt_logprobs=params.prompt_logprobs
if params.HasField("prompt_logprobs")
else None,
seed=params.seed if params.HasField("seed") else None,
include_stop_str_in_output=params.include_stop_str_in_output,
logit_bias=dict(params.logit_bias) if params.logit_bias else None,
structured_outputs=structured_outputs,
# detokenize must be True if stop strings are used
detokenize=bool(stop),
output_kind=RequestOutputKind.DELTA
if stream
else RequestOutputKind.FINAL_ONLY,
)
@staticmethod
def _tokenization_kwargs_from_proto(
params: vllm_engine_pb2.SamplingParams,
) -> dict[str, int] | None:
if params.HasField("truncate_prompt_tokens"):
return {"truncate_prompt_tokens": params.truncate_prompt_tokens}
return None
@staticmethod
def _chunk_response(output: RequestOutput) -> vllm_engine_pb2.GenerateResponse:
"""
Build a streaming chunk response from vLLM output.
When output_kind=DELTA, vLLM returns only new tokens automatically.
Args:
output: vLLM RequestOutput (with delta tokens when output_kind=DELTA)
Returns:
GenerateResponse with chunk field set
"""
# Get the completion output (first one if n > 1)
completion = output.outputs[0] if output.outputs else None
if completion is None:
# Empty chunk
return vllm_engine_pb2.GenerateResponse(
chunk=vllm_engine_pb2.GenerateStreamChunk(
token_ids=[],
prompt_tokens=0,
completion_tokens=0,
cached_tokens=0,
),
)
# When output_kind=DELTA, completion.token_ids contains only new tokens
# vLLM handles the delta logic internally
# completion_tokens = delta count (client will accumulate)
return vllm_engine_pb2.GenerateResponse(
chunk=vllm_engine_pb2.GenerateStreamChunk(
token_ids=completion.token_ids,
prompt_tokens=len(output.prompt_token_ids)
if output.prompt_token_ids
else 0,
completion_tokens=len(completion.token_ids), # Delta count
cached_tokens=output.num_cached_tokens,
),
)
@staticmethod
def _complete_response(output: RequestOutput) -> vllm_engine_pb2.GenerateResponse:
"""
Build a final completion response from vLLM output.
Args:
output: vLLM RequestOutput (finished=True)
Returns:
GenerateResponse with complete field set
"""
# Get the completion output (first one if n > 1)
completion = output.outputs[0] if output.outputs else None
if completion is None:
# Empty completion
return vllm_engine_pb2.GenerateResponse(
complete=vllm_engine_pb2.GenerateComplete(
output_ids=[],
finish_reason="error",
prompt_tokens=0,
completion_tokens=0,
cached_tokens=0,
),
)
# Build complete response
# When streaming (DELTA mode): completion.token_ids will be empty/last delta
# When non-streaming (FINAL_ONLY mode): completion.token_ids has all tokens
# Client will accumulate token counts for streaming
return vllm_engine_pb2.GenerateResponse(
complete=vllm_engine_pb2.GenerateComplete(
output_ids=completion.token_ids,
finish_reason=completion.finish_reason or "stop",
prompt_tokens=len(output.prompt_token_ids)
if output.prompt_token_ids
else 0,
completion_tokens=len(completion.token_ids),
cached_tokens=output.num_cached_tokens,
),
)
async def serve_grpc(args: argparse.Namespace):
"""
Main serving function.
Args:
args: Parsed command line arguments
"""
log_version_and_model(logger, VLLM_VERSION, args.model)
logger.info("vLLM gRPC server args: %s", args)
start_time = time.time()
# Create engine args
engine_args = AsyncEngineArgs.from_cli_args(args)
# Build vLLM config
vllm_config = engine_args.create_engine_config(
usage_context=UsageContext.OPENAI_API_SERVER
)
# Create AsyncLLM
async_llm = AsyncLLM.from_vllm_config(
vllm_config=vllm_config,
usage_context=UsageContext.OPENAI_API_SERVER,
enable_log_requests=args.enable_log_requests,
disable_log_stats=args.disable_log_stats_server,
)
# Create servicer
servicer = VllmEngineServicer(async_llm, start_time)
# Create gRPC server
server = grpc.aio.server(
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Add servicer to server
vllm_engine_pb2_grpc.add_VllmEngineServicer_to_server(servicer, server)
# Enable reflection for grpcurl and other tools
service_names = (
vllm_engine_pb2.DESCRIPTOR.services_by_name["VllmEngine"].full_name,
reflection.SERVICE_NAME,
)
reflection.enable_server_reflection(service_names, server)
# Bind to address
address = f"{args.host}:{args.port}"
server.add_insecure_port(address)
# Start server
await server.start()
logger.info("vLLM gRPC server started on %s", address)
logger.info("Server is ready to accept requests")
# Handle shutdown signals
loop = asyncio.get_running_loop()
stop_event = asyncio.Event()
def signal_handler():
logger.info("Received shutdown signal")
stop_event.set()
for sig in (signal.SIGTERM, signal.SIGINT):
loop.add_signal_handler(sig, signal_handler)
# Serve until shutdown signal
try:
await stop_event.wait()
except KeyboardInterrupt:
logger.info("Interrupted by user")
finally:
logger.info("Shutting down vLLM gRPC server...")
# Stop gRPC server
await server.stop(grace=5.0)
logger.info("gRPC server stopped")
# Shutdown AsyncLLM
async_llm.shutdown()
logger.info("AsyncLLM engine stopped")
logger.info("Shutdown complete")
def main():
"""Main entry point."""
parser = FlexibleArgumentParser(
description="vLLM gRPC Server",
)
# Server args
parser.add_argument(
"--host",
type=str,
default="0.0.0.0",
help="Host to bind gRPC server to",
)
parser.add_argument(
"--port",
type=int,
default=50051,
help="Port to bind gRPC server to",
)
parser.add_argument(
"--disable-log-stats-server",
action="store_true",
help="Disable stats logging on server side",
)
# Add vLLM engine args
parser = AsyncEngineArgs.add_cli_args(parser)
args = parser.parse_args()
# Run server
try:
uvloop.run(serve_grpc(args))
except Exception as e:
logger.exception("Server failed: %s", e)
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/grpc_server.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/grpc/compile_protos.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Compile vLLM protobuf definitions into Python code.
This script uses grpcio-tools to generate *_pb2.py, *_pb2_grpc.py, and
*_pb2.pyi (type stubs) files from the vllm_engine.proto definition.
NOTE: Proto compilation happens automatically during package build (via setup.py).
This script is provided for developers who want to regenerate protos manually,
e.g., after modifying vllm_engine.proto.
Usage:
python vllm/grpc/compile_protos.py
Requirements:
pip install grpcio-tools
"""
import sys
from pathlib import Path
def compile_protos():
"""Compile protobuf definitions."""
# Get the vllm package root directory
script_dir = Path(__file__).parent
vllm_package_root = script_dir.parent.parent # vllm/vllm/grpc -> vllm/
proto_file = script_dir / "vllm_engine.proto"
if not proto_file.exists():
print(f"Error: Proto file not found at {proto_file}")
return 1
print(f"Compiling protobuf: {proto_file}")
print(f"Output directory: {script_dir}")
# Compile the proto file
# We use vllm/vllm as the proto_path so that the package is vllm.grpc.engine
try:
from grpc_tools import protoc
result = protoc.main(
[
"grpc_tools.protoc",
f"--proto_path={vllm_package_root}",
f"--python_out={vllm_package_root}",
f"--grpc_python_out={vllm_package_root}",
f"--pyi_out={vllm_package_root}", # Generate type stubs
str(script_dir / "vllm_engine.proto"),
]
)
if result == 0:
# Add SPDX headers to generated files
spdx_header = (
"# SPDX-License-Identifier: Apache-2.0\n"
"# SPDX-FileCopyrightText: Copyright contributors to the vLLM project\n"
)
for generated_file in [
script_dir / "vllm_engine_pb2.py",
script_dir / "vllm_engine_pb2_grpc.py",
script_dir / "vllm_engine_pb2.pyi",
]:
if generated_file.exists():
content = generated_file.read_text()
if not content.startswith("# SPDX-License-Identifier"):
# Add mypy ignore-errors comment for all generated files
header = spdx_header + "# mypy: ignore-errors\n"
generated_file.write_text(header + content)
print("✓ Protobuf compilation successful!")
print(f" Generated: {script_dir / 'vllm_engine_pb2.py'}")
print(f" Generated: {script_dir / 'vllm_engine_pb2_grpc.py'}")
print(f" Generated: {script_dir / 'vllm_engine_pb2.pyi'} (type stubs)")
return 0
else:
print(f"Error: protoc returned {result}")
return result
except ImportError:
print("Error: grpcio-tools not installed")
print("Install with: pip install grpcio-tools")
return 1
except Exception as e:
print(f"Error during compilation: {e}")
return 1
if __name__ == "__main__":
sys.exit(compile_protos())
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/grpc/compile_protos.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/oracle/nvfp4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from enum import Enum
import torch
import vllm.envs as envs
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.config.kernel import MoEBackend
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.all2all_utils import (
maybe_make_prepare_finalize,
)
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
mxfp4_w4a16_moe_quant_config,
nvfp4_moe_quant_config,
nvfp4_w4a16_moe_quant_config,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_fp4_moe import (
is_supported_config_trtllm,
prepare_nvfp4_moe_layer_for_fi_or_cutlass,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
FlashinferMoeBackend,
get_flashinfer_moe_backend,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
prepare_nvfp4_moe_layer_for_marlin,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
)
logger = init_logger(__name__)
class NvFp4MoeBackend(Enum):
FLASHINFER_TRTLLM = "FLASHINFER_TRTLLM"
FLASHINFER_CUTLASS = "FLASHINFER_CUTLASS"
FLASHINFER_CUTEDSL = "FLASHINFER_CUTEDSL"
VLLM_CUTLASS = "VLLM_CUTLASS"
MARLIN = "MARLIN"
FLASHINFER_NVFP4_MOE_BACKENDS = [
NvFp4MoeBackend.FLASHINFER_TRTLLM,
NvFp4MoeBackend.FLASHINFER_CUTLASS,
NvFp4MoeBackend.FLASHINFER_CUTEDSL,
]
fi_2_vllm_backend_map: dict[FlashinferMoeBackend, NvFp4MoeBackend] = {
FlashinferMoeBackend.CUTLASS: NvFp4MoeBackend.FLASHINFER_CUTLASS,
FlashinferMoeBackend.TENSORRT_LLM: NvFp4MoeBackend.FLASHINFER_TRTLLM,
FlashinferMoeBackend.CUTEDSL: NvFp4MoeBackend.FLASHINFER_CUTEDSL,
}
def is_global_sf_supported_for_nvfp4_backend(backend: NvFp4MoeBackend) -> bool:
# Checks whether `backend` supports quantizing with scaling factors
# of all experts in Expert Parallel Mode when all experts are not
# on the same rank.
return backend in FLASHINFER_NVFP4_MOE_BACKENDS
def backend_to_kernel_cls(
backend: NvFp4MoeBackend,
) -> type[mk.FusedMoEPermuteExpertsUnpermute]:
if backend == NvFp4MoeBackend.FLASHINFER_TRTLLM:
raise NotImplementedError(
"FLASHINFER_TRTLLM doesn't support Modular Kernel Interface"
)
elif backend == NvFp4MoeBackend.FLASHINFER_CUTLASS:
from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import (
FlashInferExperts,
)
return FlashInferExperts
elif backend == NvFp4MoeBackend.FLASHINFER_CUTEDSL:
from vllm.model_executor.layers.fused_moe.flashinfer_cutedsl_moe import (
FlashInferCuteDSLExperts,
)
return FlashInferCuteDSLExperts
elif backend == NvFp4MoeBackend.VLLM_CUTLASS:
from vllm.model_executor.layers.fused_moe.cutlass_moe import (
CutlassExpertsFp4,
)
return CutlassExpertsFp4
elif backend == NvFp4MoeBackend.MARLIN:
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
MarlinExperts,
)
return MarlinExperts
else:
raise ValueError(f"Unknown NvFP4 MoE backend: {backend.value}")
def map_nvfp4_backend(runner_backend: MoEBackend) -> NvFp4MoeBackend:
"""Map user's MoEBackend to NvFp4MoeBackend."""
mapping = {
"cutlass": NvFp4MoeBackend.VLLM_CUTLASS,
"flashinfer_trtllm": NvFp4MoeBackend.FLASHINFER_TRTLLM,
"flashinfer_cutlass": NvFp4MoeBackend.FLASHINFER_CUTLASS,
"flashinfer_cutedsl": NvFp4MoeBackend.FLASHINFER_CUTEDSL,
"marlin": NvFp4MoeBackend.MARLIN,
}
if backend := mapping.get(runner_backend):
return backend
raise ValueError(
f"moe_backend='{runner_backend}' is not supported for NvFP4 MoE. "
f"Expected one of {list(mapping.keys())}."
)
def select_nvfp4_moe_backend(
config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> tuple[NvFp4MoeBackend, type[mk.FusedMoEPermuteExpertsUnpermute] | None]:
"""
Select the primary NvFP4 MoE backend
Note: Shape-specific fallbacks may still occur at runtime.
"""
# NOTE: the kernels are selected in the following order.
AVAILABLE_BACKENDS = [
NvFp4MoeBackend.FLASHINFER_TRTLLM,
NvFp4MoeBackend.FLASHINFER_CUTEDSL,
NvFp4MoeBackend.FLASHINFER_CUTLASS,
NvFp4MoeBackend.VLLM_CUTLASS,
NvFp4MoeBackend.MARLIN,
]
# NOTE(rob): this is kind of a hack. We need to peak into
# the prepare-finalize selection to determine if we are using
# the batched or standard expert format.
use_batched = config.moe_parallel_config.use_deepep_ll_kernels
activation_format = (
mk.FusedMoEActivationFormat.BatchedExperts
if use_batched
else mk.FusedMoEActivationFormat.Standard
)
def _make_log_backend(backend: NvFp4MoeBackend):
available_backend_strs = [b.value for b in AVAILABLE_BACKENDS]
return (
f"Using '{backend.value}' NvFp4 MoE backend out "
f"of potential backends: {available_backend_strs}."
)
def _make_log_unsupported(backend: NvFp4MoeBackend, reason: str | None) -> str:
if reason:
return (
f"NvFp4 MoE backend '{backend.value}' does not support the "
f"deployment configuration since {reason}."
)
else:
return (
f"NvFp4 MoE backend '{backend.value}' does not support the "
"deployment configuration."
)
def _return_or_raise(
backend: NvFp4MoeBackend,
config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
activation_format: mk.FusedMoEActivationFormat,
) -> tuple[NvFp4MoeBackend, type[mk.FusedMoEPermuteExpertsUnpermute]]:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls, config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(backend))
return backend, k_cls
raise ValueError(_make_log_unsupported(backend, reason))
# Handle explicit moe_backend from user.
runner_backend = config.moe_backend
if runner_backend != "auto":
requested_backend = map_nvfp4_backend(runner_backend)
if requested_backend == NvFp4MoeBackend.FLASHINFER_TRTLLM:
supported, reason = is_supported_config_trtllm(
config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(requested_backend))
return requested_backend, None
raise ValueError(_make_log_unsupported(requested_backend, reason))
return _return_or_raise(
requested_backend, config, weight_key, activation_key, activation_format
)
if envs.is_set("VLLM_USE_FLASHINFER_MOE_FP4"):
if not envs.VLLM_USE_FLASHINFER_MOE_FP4:
# If the user rejects FlashInfer remove those backends.
for b in FLASHINFER_NVFP4_MOE_BACKENDS:
AVAILABLE_BACKENDS.remove(b)
elif envs.is_set("VLLM_FLASHINFER_MOE_BACKEND"):
# If user is explicit about backend, validate it.
fi_backend = get_flashinfer_moe_backend()
if fi_backend == FlashinferMoeBackend.TENSORRT_LLM:
backend = NvFp4MoeBackend.FLASHINFER_TRTLLM
supported, reason = is_supported_config_trtllm(
config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(backend))
return backend, None
else:
raise ValueError(_make_log_unsupported(backend, reason))
else:
backend = fi_2_vllm_backend_map[fi_backend]
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
else:
# If the user is not explicit about the backend, try each.
for backend in FLASHINFER_NVFP4_MOE_BACKENDS:
if backend == NvFp4MoeBackend.FLASHINFER_TRTLLM:
k_cls = None
supported, reason = is_supported_config_trtllm(
config,
weight_key,
activation_key,
activation_format,
)
else:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls,
config,
weight_key,
activation_key,
activation_format,
)
if supported:
logger.info_once(_make_log_backend(backend), scope="local")
return backend, None
else:
logger.debug_once(
_make_log_unsupported(backend, reason), scope="local"
)
raise NotImplementedError(
"Found VLLM_USE_FLASHINFER_MOE_FP4=1, but no "
"FlashInfer NVFP4 MoE backend supports the configuration."
)
if envs.VLLM_TEST_FORCE_FP8_MARLIN:
backend = NvFp4MoeBackend.MARLIN
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
# Select kernels in order of backend.
for backend in AVAILABLE_BACKENDS:
if backend == NvFp4MoeBackend.FLASHINFER_TRTLLM:
k_cls = None # type: ignore[assignment]
supported, reason = is_supported_config_trtllm(
config,
weight_key,
activation_key,
activation_format,
)
else:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls,
config,
weight_key,
activation_key,
activation_format,
)
if supported:
logger.info_once(_make_log_backend(backend), scope="local")
return backend, k_cls
else:
logger.debug_once(_make_log_unsupported(backend, reason), scope="local")
raise NotImplementedError(
"No NvFp4 MoE backend supports the deployment configuration."
)
def convert_to_nvfp4_moe_kernel_format(
nvfp4_backend: NvFp4MoeBackend,
layer: torch.nn.Module,
w13: torch.Tensor,
w13_scale: torch.Tensor,
w13_scale_2: torch.Tensor,
a13_scale: torch.Tensor | None,
w2: torch.Tensor,
w2_scale: torch.Tensor,
w2_scale_2: torch.Tensor,
a2_scale: torch.Tensor | None,
is_act_and_mul: bool,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
if (
nvfp4_backend in FLASHINFER_NVFP4_MOE_BACKENDS
or nvfp4_backend == NvFp4MoeBackend.VLLM_CUTLASS
):
(
w13,
w13_scale,
w13_scale_2,
a13_scale,
w2,
w2_scale,
w2_scale_2,
a2_scale,
) = prepare_nvfp4_moe_layer_for_fi_or_cutlass(
backend=nvfp4_backend,
layer=layer,
w13=w13,
w13_scale=w13_scale,
w13_scale_2=w13_scale_2,
a13_scale=a13_scale,
w2=w2,
w2_scale=w2_scale,
w2_scale_2=w2_scale_2,
a2_scale=a2_scale,
is_act_and_mul=is_act_and_mul,
)
elif nvfp4_backend == NvFp4MoeBackend.MARLIN:
a13_scale = None
a2_scale = None
(
w13,
w13_scale,
w13_scale_2,
w2,
w2_scale,
w2_scale_2,
) = prepare_nvfp4_moe_layer_for_marlin(
layer=layer,
w13=w13,
w13_scale=w13_scale,
w13_scale_2=w13_scale_2,
w2=w2,
w2_scale=w2_scale,
w2_scale_2=w2_scale_2,
is_act_and_mul=is_act_and_mul,
)
else:
raise ValueError(f"Unknown NvFp4 backend for MoE: {nvfp4_backend}")
return (
w13,
w13_scale,
w13_scale_2,
a13_scale,
w2,
w2_scale,
w2_scale_2,
a2_scale,
)
def make_mxfp4_moe_quant_config(
w13_scale: torch.Tensor,
w2_scale: torch.Tensor,
) -> FusedMoEQuantConfig:
return mxfp4_w4a16_moe_quant_config(
w1_scale=w13_scale,
w2_scale=w2_scale,
)
def make_nvfp4_moe_quant_config(
backend: NvFp4MoeBackend,
w13_scale: torch.Tensor,
w2_scale: torch.Tensor,
w13_scale_2: torch.Tensor,
w2_scale_2: torch.Tensor,
a13_scale: torch.Tensor,
a2_scale: torch.Tensor,
) -> FusedMoEQuantConfig | None:
UNSUPPORTED = [NvFp4MoeBackend.FLASHINFER_TRTLLM]
if backend in UNSUPPORTED:
return None
elif backend == NvFp4MoeBackend.MARLIN:
return nvfp4_w4a16_moe_quant_config(
g1_alphas=w13_scale_2,
g2_alphas=w2_scale_2,
w1_scale=w13_scale,
w2_scale=w2_scale,
)
g1_alphas = a13_scale * w13_scale_2
g2_alphas = a2_scale * w2_scale_2
return nvfp4_moe_quant_config(
g1_alphas=g1_alphas,
g2_alphas=g2_alphas,
a1_gscale=(1.0 / a13_scale),
a2_gscale=(1.0 / a2_scale),
w1_scale=w13_scale,
w2_scale=w2_scale,
)
def make_nvfp4_moe_kernel(
moe_quant_config: FusedMoEQuantConfig,
moe_config: FusedMoEConfig,
experts_cls: type[mk.FusedMoEPermuteExpertsUnpermute],
routing_tables: tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None = None,
shared_experts: torch.nn.Module | None = None,
) -> mk.FusedMoEModularKernel:
# Create Prepare/Finalize.
prepare_finalize = maybe_make_prepare_finalize(
moe=moe_config,
quant_config=moe_quant_config,
routing_tables=routing_tables,
allow_new_interface=True,
)
assert prepare_finalize is not None
logger.info_once("Using %s", prepare_finalize.__class__.__name__)
# Create Experts.
if prepare_finalize.activation_format == mk.FusedMoEActivationFormat.BatchedExperts:
max_num_tokens = prepare_finalize.max_num_tokens_per_rank()
assert max_num_tokens is not None
experts = experts_cls(
moe_config=moe_config,
quant_config=moe_quant_config,
max_num_tokens=max_num_tokens,
num_dispatchers=prepare_finalize.num_dispatchers(),
)
else:
experts = experts_cls(
moe_config=moe_config,
quant_config=moe_quant_config,
)
# NOTE(rob): we only want the mk to control the shared_expert
# if using all2all (for SBO). bnell is making this explict in
# the new MoE runner class.
kernel = mk.FusedMoEModularKernel(
prepare_finalize,
experts,
shared_experts=(
shared_experts
if moe_config.moe_parallel_config.use_all2all_kernels
else None
),
moe_parallel_config=moe_config.moe_parallel_config,
inplace=False,
)
# TODO(rob): update inplace logic to be part of the kernel.
return kernel
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/oracle/nvfp4.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/fallback.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import FusedMoEParallelConfig
from vllm.model_executor.layers.quantization.utils.quant_utils import QuantKey
class FallbackExperts(mk.FusedMoEPermuteExpertsUnpermute, ABC):
"""Base class for runtime dispatching of expert implementations."""
def __init__(
self,
experts: mk.FusedMoEPermuteExpertsUnpermute,
fallback_experts: mk.FusedMoEPermuteExpertsUnpermute,
):
super().__init__(
moe_config=experts.moe_config, quant_config=experts.quant_config
)
self.fallback_experts = fallback_experts
self.experts = experts
@staticmethod
def get_clses() -> tuple[
type[mk.FusedMoEPermuteExpertsUnpermute],
type[mk.FusedMoEPermuteExpertsUnpermute],
]:
"""
Get the cls for the experts and fallback experts.
Subclasses should implement this method, so that
we have a consistent way to call the _supports_*
class methods below.
"""
raise NotImplementedError(
"Subclasses must return the cls for the experts and fallback experts."
)
@classmethod
def activation_format(
cls: type["FallbackExperts"],
) -> mk.FusedMoEActivationFormat:
experts_cls, fallback_cls = cls.get_clses()
assert experts_cls.activation_format() == fallback_cls.activation_format()
return experts_cls.activation_format()
@classmethod
def _supports_current_device(cls) -> bool:
experts_cls, fallback_cls = cls.get_clses()
return (
experts_cls._supports_current_device()
and fallback_cls._supports_current_device()
)
@classmethod
def _supports_no_act_and_mul(cls) -> bool:
experts_cls, fallback_cls = cls.get_clses()
return (
experts_cls._supports_no_act_and_mul()
and fallback_cls._supports_no_act_and_mul()
)
@classmethod
def _supports_quant_scheme(
cls,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
experts_cls, fallback_cls = cls.get_clses()
return experts_cls._supports_quant_scheme(
weight_key, activation_key
) and fallback_cls._supports_quant_scheme(weight_key, activation_key)
@classmethod
def _supports_activation(cls, activation: MoEActivation) -> bool:
experts_cls, fallback_cls = cls.get_clses()
return experts_cls._supports_activation(
activation
) and fallback_cls._supports_activation(activation)
@classmethod
def _supports_parallel_config(
cls, moe_parallel_config: FusedMoEParallelConfig
) -> bool:
experts_cls, fallback_cls = cls.get_clses()
return experts_cls._supports_parallel_config(
moe_parallel_config
) and fallback_cls._supports_parallel_config(moe_parallel_config)
def supports_chunking(self) -> bool:
assert (
self.experts.supports_chunking()
== self.fallback_experts.supports_chunking()
)
return (
self.experts.supports_chunking()
and self.fallback_experts.supports_chunking()
)
def supports_expert_map(self) -> bool:
assert (
self.experts.supports_expert_map()
== self.fallback_experts.supports_expert_map()
)
return (
self.experts.supports_expert_map()
and self.fallback_experts.supports_expert_map()
)
def finalize_weight_and_reduce_impl(self) -> mk.TopKWeightAndReduce:
e_war = self.experts.finalize_weight_and_reduce_impl()
fbe_war = self.fallback_experts.finalize_weight_and_reduce_impl()
is_dge_war = e_war is not None
is_fbe_war = fbe_war is not None
if is_dge_war and is_fbe_war:
assert e_war == fbe_war, (
"Both implementations should agree on WeightAndReduce impls. "
f"Got e_war: {e_war}, and fbe_war: {fbe_war}"
)
if e_war is not None:
return e_war
assert fbe_war is not None
return fbe_war
@abstractmethod
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
raise NotImplementedError
@abstractmethod
def _select_experts_impl(
self,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
) -> mk.FusedMoEPermuteExpertsUnpermute:
raise NotImplementedError
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
workspace13: torch.Tensor,
workspace2: torch.Tensor,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
apply_router_weight_on_input: bool,
):
experts = self._select_experts_impl(hidden_states, w1, w2)
experts.apply(
output,
hidden_states,
w1,
w2,
topk_weights,
topk_ids,
activation,
global_num_experts,
expert_map,
a1q_scale,
a2_scale,
workspace13,
workspace2,
expert_tokens_meta,
apply_router_weight_on_input,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/fallback.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/oracle/fp8.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from enum import Enum
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm import envs
from vllm._aiter_ops import rocm_aiter_ops
from vllm.config.kernel import MoEBackend
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.all2all_utils import (
maybe_make_prepare_finalize,
)
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
fp8_w8a8_moe_quant_config,
fp8_w8a16_moe_quant_config,
)
from vllm.model_executor.layers.fused_moe.flashinfer_trtllm_moe import (
is_supported_config_trtllm_fp8,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
FlashinferMoeBackend,
get_flashinfer_moe_backend,
make_fp8_moe_alpha_scales_for_fi,
prepare_fp8_moe_layer_for_fi,
)
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
prepare_fp8_moe_layer_for_deepgemm,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import (
prepare_fp8_moe_layer_for_marlin,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
kFp8Dynamic128Sym,
kFp8Static128BlockSym,
)
from vllm.platforms import current_platform
logger = init_logger(__name__)
class Fp8MoeBackend(Enum):
NONE = "NONE"
FLASHINFER_TRTLLM = "FLASHINFER_TRTLLM"
FLASHINFER_CUTLASS = "FLASHINFER_CUTLASS"
DEEPGEMM = "DEEPGEMM"
BATCHED_DEEPGEMM = "BATCHED_DEEPGEMM"
MARLIN = "MARLIN"
TRITON = "TRITON"
BATCHED_TRITON = "BATCHED_TRITON"
AITER = "AITER"
VLLM_CUTLASS = "VLLM_CUTLASS"
BATCHED_VLLM_CUTLASS = "BATCHED_VLLM_CUTLASS"
XPU = "XPU"
def _get_priority_backends(
moe_config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> list[Fp8MoeBackend]:
"""
Get available backends in priority order based on platform and config.
This function can be extended to become more complex as needed.
"""
_AVAILABLE_BACKENDS = [
Fp8MoeBackend.AITER,
Fp8MoeBackend.FLASHINFER_TRTLLM,
Fp8MoeBackend.FLASHINFER_CUTLASS,
Fp8MoeBackend.DEEPGEMM,
Fp8MoeBackend.VLLM_CUTLASS,
Fp8MoeBackend.TRITON,
Fp8MoeBackend.MARLIN,
Fp8MoeBackend.BATCHED_DEEPGEMM,
Fp8MoeBackend.BATCHED_VLLM_CUTLASS,
Fp8MoeBackend.BATCHED_TRITON,
Fp8MoeBackend.XPU,
]
def _move_to_front(backends: list[Fp8MoeBackend], backend: Fp8MoeBackend) -> None:
backends.insert(0, backends.pop(backends.index(backend)))
# On Hopper for Block Fp8, prefer Triton for TP and FI CUTLASS for EP.
if (
current_platform.is_cuda()
and current_platform.is_device_capability(90)
and activation_key == kFp8Dynamic128Sym
and weight_key == kFp8Static128BlockSym
):
if moe_config.moe_parallel_config.ep_size > 1:
_move_to_front(_AVAILABLE_BACKENDS, Fp8MoeBackend.FLASHINFER_CUTLASS)
else:
_move_to_front(_AVAILABLE_BACKENDS, Fp8MoeBackend.TRITON)
return _AVAILABLE_BACKENDS
def backend_to_kernel_cls(
backend: Fp8MoeBackend,
) -> type[mk.FusedMoEPermuteExpertsUnpermute]:
if backend == Fp8MoeBackend.FLASHINFER_TRTLLM:
raise NotImplementedError
elif backend == Fp8MoeBackend.FLASHINFER_CUTLASS:
from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import (
FlashInferExperts,
)
return FlashInferExperts
elif backend == Fp8MoeBackend.DEEPGEMM:
from vllm.model_executor.layers.fused_moe.triton_deep_gemm_moe import (
TritonOrDeepGemmExperts,
)
return TritonOrDeepGemmExperts
elif backend == Fp8MoeBackend.BATCHED_DEEPGEMM:
from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import (
BatchedDeepGemmExperts,
)
return BatchedDeepGemmExperts
elif backend == Fp8MoeBackend.MARLIN:
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
MarlinExperts,
)
return MarlinExperts
elif backend == Fp8MoeBackend.TRITON:
from vllm.model_executor.layers.fused_moe.fused_moe import (
TritonExperts,
)
return TritonExperts
elif backend == Fp8MoeBackend.BATCHED_TRITON:
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
BatchedTritonExperts,
)
return BatchedTritonExperts
elif backend == Fp8MoeBackend.AITER:
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
AiterExperts,
)
return AiterExperts
elif backend == Fp8MoeBackend.VLLM_CUTLASS:
from vllm.model_executor.layers.fused_moe.triton_cutlass_moe import (
TritonOrCutlassExperts,
)
return TritonOrCutlassExperts
elif backend == Fp8MoeBackend.BATCHED_VLLM_CUTLASS:
from vllm.model_executor.layers.fused_moe.cutlass_moe import (
CutlassBatchedExpertsFp8,
)
return CutlassBatchedExpertsFp8
elif backend == Fp8MoeBackend.XPU:
from vllm.model_executor.layers.fused_moe.xpu_fused_moe import (
XPUExpertsFp8,
)
return XPUExpertsFp8
else:
raise ValueError(f"Unknown FP8 MoE backend: {backend.value}")
def map_fp8_backend(runner_backend: MoEBackend) -> Fp8MoeBackend:
"""Map user's MoEBackend to Fp8MoeBackend."""
mapping = {
"triton": Fp8MoeBackend.TRITON,
"deep_gemm": Fp8MoeBackend.DEEPGEMM,
"cutlass": Fp8MoeBackend.VLLM_CUTLASS,
"flashinfer_trtllm": Fp8MoeBackend.FLASHINFER_TRTLLM,
"flashinfer_cutlass": Fp8MoeBackend.FLASHINFER_CUTLASS,
"marlin": Fp8MoeBackend.MARLIN,
"aiter": Fp8MoeBackend.AITER,
}
if backend := mapping.get(runner_backend):
return backend
raise ValueError(
f"moe_backend='{runner_backend}' is not supported for FP8 MoE. "
f"Expected one of {list(mapping.keys())}."
)
def select_fp8_moe_backend(
config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
allow_vllm_cutlass: bool = False,
) -> tuple[Fp8MoeBackend, type[mk.FusedMoEPermuteExpertsUnpermute] | None]:
"""
Select the primary FP8 MoE backend
Note: Shape-specific fallbacks may still occur at runtime.
"""
k_cls: type[mk.FusedMoEPermuteExpertsUnpermute] | None = None
if config.is_lora_enabled:
return Fp8MoeBackend.TRITON, backend_to_kernel_cls(Fp8MoeBackend.TRITON)
# NOTE: the kernels are selected in the following order.
AVAILABLE_BACKENDS = _get_priority_backends(config, weight_key, activation_key)
# NOTE(rob): We need to peak into the P/F selection to determine
# if we are using the batched or standard expert format, which
# if not ideal. Once we unify TP + DP/EP, we can select P/F first.
activation_format = (
mk.FusedMoEActivationFormat.BatchedExperts
if config.moe_parallel_config.use_batched_activation_format
else mk.FusedMoEActivationFormat.Standard
)
def _make_log_backend(backend: Fp8MoeBackend):
available_backend_strs = [b.value for b in AVAILABLE_BACKENDS]
return (
f"Using {backend.value} Fp8 MoE backend out "
f"of potential backends: {available_backend_strs}."
)
def _make_log_unsupported(backend: Fp8MoeBackend, reason: str | None) -> str:
if reason:
return (
f"FP8 MoE backend {backend.value} does not support the "
f"deployment configuration since {reason}."
)
else:
return (
f"FP8 MoE backend '{backend.value}' does not support the "
"deployment configuration."
)
def _return_or_raise(
backend: Fp8MoeBackend,
config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
activation_format: mk.FusedMoEActivationFormat,
) -> tuple[Fp8MoeBackend, type[mk.FusedMoEPermuteExpertsUnpermute]]:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls, config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(backend), scope="local")
return backend, k_cls
raise ValueError(_make_log_unsupported(backend, reason))
# Handle explicit moe_backend from user.
runner_backend = config.moe_backend
if runner_backend != "auto":
requested_backend = map_fp8_backend(runner_backend)
# For batched activation format, use batched variants if available.
if activation_format == mk.FusedMoEActivationFormat.BatchedExperts:
if requested_backend == Fp8MoeBackend.DEEPGEMM:
requested_backend = Fp8MoeBackend.BATCHED_DEEPGEMM
elif requested_backend == Fp8MoeBackend.TRITON:
requested_backend = Fp8MoeBackend.BATCHED_TRITON
elif requested_backend == Fp8MoeBackend.VLLM_CUTLASS:
requested_backend = Fp8MoeBackend.BATCHED_VLLM_CUTLASS
if (
requested_backend
in [
Fp8MoeBackend.VLLM_CUTLASS,
Fp8MoeBackend.BATCHED_VLLM_CUTLASS,
]
and not allow_vllm_cutlass
):
raise ValueError(
"vLLM CUTLASS FP8 MoE backend is disabled for this configuration."
)
# Handle FLASHINFER_TRTLLM specially (no kernel class).
if requested_backend == Fp8MoeBackend.FLASHINFER_TRTLLM:
supported, reason = is_supported_config_trtllm_fp8(
config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(requested_backend))
return requested_backend, None
raise ValueError(_make_log_unsupported(requested_backend, reason))
return _return_or_raise(
requested_backend, config, weight_key, activation_key, activation_format
)
# Handle explicit FlashInfer FP8 configuration.
if envs.is_set("VLLM_USE_FLASHINFER_MOE_FP8"):
if not envs.VLLM_USE_FLASHINFER_MOE_FP8:
# If the user rejects FlashInfer remove those backends.
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.FLASHINFER_TRTLLM)
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.FLASHINFER_CUTLASS)
elif envs.is_set("VLLM_FLASHINFER_MOE_BACKEND"):
# If user is explicit about backend, validate it.
fi_backend = get_flashinfer_moe_backend()
if fi_backend == FlashinferMoeBackend.TENSORRT_LLM:
backend = Fp8MoeBackend.FLASHINFER_TRTLLM
supported, reason = is_supported_config_trtllm_fp8(
config, weight_key, activation_key, activation_format
)
if supported:
logger.info_once(_make_log_backend(backend))
return backend, None
else:
raise ValueError(_make_log_unsupported(backend, reason))
elif fi_backend == FlashinferMoeBackend.CUTLASS:
backend = Fp8MoeBackend.FLASHINFER_CUTLASS
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
else:
assert fi_backend == FlashinferMoeBackend.CUTEDSL
raise ValueError("FlashInfer MaskedGEMM not supported for FP8")
else:
# If the user is not explicit about the backend, try both.
for backend in [
Fp8MoeBackend.FLASHINFER_TRTLLM,
Fp8MoeBackend.FLASHINFER_CUTLASS,
]:
if backend == Fp8MoeBackend.FLASHINFER_TRTLLM:
k_cls = None
supported, reason = is_supported_config_trtllm_fp8(
config,
weight_key,
activation_key,
activation_format,
)
else:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls,
config,
weight_key,
activation_key,
activation_format,
)
if supported:
logger.info_once(_make_log_backend(backend), scope="local")
return backend, k_cls
else:
logger.debug_once(
_make_log_unsupported(backend, reason), scope="local"
)
raise NotImplementedError(
"Found VLLM_USE_FLASHINFER_MOE_FP8=1, but no "
"FlashInfer FP8 MoE backend supports the configuration."
)
# Handle explicit DeepGEMM FP8 configuration.
if envs.is_set("VLLM_USE_DEEP_GEMM") or envs.is_set("VLLM_MOE_USE_DEEP_GEMM"):
if not envs.VLLM_USE_DEEP_GEMM or not envs.VLLM_MOE_USE_DEEP_GEMM:
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.DEEPGEMM)
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.BATCHED_DEEPGEMM)
else:
backend = (
Fp8MoeBackend.DEEPGEMM
if activation_format == mk.FusedMoEActivationFormat.Standard
else Fp8MoeBackend.BATCHED_DEEPGEMM
)
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
# Handle explicit MARLIN FP8 configuration.
if envs.VLLM_TEST_FORCE_FP8_MARLIN:
backend = Fp8MoeBackend.MARLIN
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
# Handle explicit AITER FP8 configuration.
if envs.is_set("VLLM_ROCM_USE_AITER") or envs.is_set("VLLM_ROCM_USE_AITER_MOE"):
if not envs.VLLM_ROCM_USE_AITER or not envs.VLLM_ROCM_USE_AITER_MOE:
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.AITER)
else:
backend = Fp8MoeBackend.AITER
return _return_or_raise(
backend, config, weight_key, activation_key, activation_format
)
if not allow_vllm_cutlass:
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.VLLM_CUTLASS)
AVAILABLE_BACKENDS.remove(Fp8MoeBackend.BATCHED_VLLM_CUTLASS)
# Select kernels in order of backend.
for backend in AVAILABLE_BACKENDS:
if backend == Fp8MoeBackend.FLASHINFER_TRTLLM:
k_cls = None
supported, reason = is_supported_config_trtllm_fp8(
config,
weight_key,
activation_key,
activation_format,
)
else:
k_cls = backend_to_kernel_cls(backend)
supported, reason = k_cls.is_supported_config(
k_cls,
config,
weight_key,
activation_key,
activation_format,
)
if supported:
logger.info_once(_make_log_backend(backend), scope="local")
return backend, k_cls
else:
logger.debug_once(_make_log_unsupported(backend, reason), scope="local")
# TODO(rob): per discussion with TPU team, we need a way to register
# MoE backends by OOT plugins, rather than having an explicit list
# of AVAILABLE_BACKENDS. Enabling returning `Fp8MoeBackend.NONE` is
# a temporary measure until these register APIs are complete.
if current_platform.is_cuda() or current_platform.is_rocm():
raise NotImplementedError(
"No FP8 MoE backend supports the deployment configuration."
)
return Fp8MoeBackend.NONE, None
def convert_to_fp8_moe_kernel_format(
fp8_backend: Fp8MoeBackend,
layer: torch.nn.Module,
w13: torch.Tensor,
w2: torch.Tensor,
w13_scale: torch.Tensor,
w2_scale: torch.Tensor,
w13_input_scale: torch.Tensor | None,
w2_input_scale: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
block_quant = hasattr(layer, "weight_block_size")
if fp8_backend in [Fp8MoeBackend.DEEPGEMM, Fp8MoeBackend.BATCHED_DEEPGEMM]:
assert block_quant
w13, w2, w13_scale, w2_scale = prepare_fp8_moe_layer_for_deepgemm(
w13,
w2,
w13_scale,
w2_scale,
tuple(layer.weight_block_size),
)
elif fp8_backend == Fp8MoeBackend.AITER:
w13, w2 = rocm_aiter_ops.shuffle_weights(w13, w2)
elif fp8_backend == Fp8MoeBackend.MARLIN:
w13, w2, w13_scale, w2_scale = prepare_fp8_moe_layer_for_marlin(
layer,
w13,
w2,
w13_scale,
w2_scale,
)
elif fp8_backend in [
Fp8MoeBackend.FLASHINFER_CUTLASS,
Fp8MoeBackend.FLASHINFER_TRTLLM,
]:
w13, w2, w13_scale = prepare_fp8_moe_layer_for_fi(
layer=layer,
w13=w13,
w2=w2,
w13_scale=w13_scale,
w13_input_scale=w13_input_scale,
w2_scale=w2_scale,
w2_input_scale=w2_input_scale,
is_trtllm=(fp8_backend == Fp8MoeBackend.FLASHINFER_TRTLLM),
)
else:
if fp8_backend not in [
Fp8MoeBackend.TRITON,
Fp8MoeBackend.BATCHED_TRITON,
Fp8MoeBackend.VLLM_CUTLASS,
Fp8MoeBackend.BATCHED_VLLM_CUTLASS,
Fp8MoeBackend.XPU,
]:
raise ValueError(f"Unsupported FP8 MoE backend: {fp8_backend.value}")
return w13, w2, w13_scale, w2_scale
def make_fp8_moe_quant_config(
fp8_backend: Fp8MoeBackend,
w1_scale: torch.Tensor,
w2_scale: torch.Tensor,
a1_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
block_shape: list[int] | None = None,
per_act_token_quant: bool = False,
per_out_ch_quant: bool = False,
) -> FusedMoEQuantConfig | None:
"""
Create FusedMoEQuantConfig for the specified FP8 Backend.
The FusedMoEQuantConfig holds the scales that are used
at runtime by the Modular Kernel abstraction.
Note that certain kernels (e.g. Flashinfer CUTLASS) need
special Quant configs to handle non-standard inputs to
their kernel interfaces.
In a future PR, we will have this function should be
a method of the modular kernel itself.
"""
# TRTLLM does not use Modular Kernel abstraction yet.
if fp8_backend == Fp8MoeBackend.FLASHINFER_TRTLLM:
return None
# MARLIN is mixed precision W8A16 config.
if fp8_backend == Fp8MoeBackend.MARLIN:
return fp8_w8a16_moe_quant_config(
w1_scale=w1_scale,
w2_scale=w2_scale,
block_shape=block_shape,
)
# Flashinfer CUTLASS per-tensor uses single dq scale
# (alpha = w_scale * a_scale) and inverse a2 scale.
if fp8_backend == Fp8MoeBackend.FLASHINFER_CUTLASS and block_shape is None:
assert a1_scale is not None and a2_scale is not None
g1_alphas, g2_alphas = make_fp8_moe_alpha_scales_for_fi(
w1_scale,
a1_scale,
w2_scale,
a2_scale,
)
return fp8_w8a8_moe_quant_config(
w1_scale=w1_scale,
w2_scale=w2_scale,
a1_scale=a1_scale,
a2_scale=a2_scale,
a1_gscale=(1.0 / a1_scale),
a2_gscale=(1.0 / a2_scale),
g1_alphas=g1_alphas,
g2_alphas=g2_alphas,
)
# All other backends use normal config.
return fp8_w8a8_moe_quant_config(
w1_scale=w1_scale,
w2_scale=w2_scale,
a1_scale=a1_scale,
a2_scale=a2_scale,
block_shape=block_shape,
per_act_token_quant=per_act_token_quant,
per_out_ch_quant=per_out_ch_quant,
)
def make_fp8_moe_kernel(
moe_quant_config: FusedMoEQuantConfig,
moe_config: FusedMoEConfig,
experts_cls: type[mk.FusedMoEPermuteExpertsUnpermute],
fp8_backend: Fp8MoeBackend,
routing_tables: tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None = None,
shared_experts: torch.nn.Module | None = None,
) -> mk.FusedMoEModularKernel:
# Create Prepare/Finalize.
prepare_finalize = maybe_make_prepare_finalize(
moe=moe_config,
quant_config=moe_quant_config,
routing_tables=routing_tables,
allow_new_interface=True,
)
assert prepare_finalize is not None
logger.info_once("Using %s", prepare_finalize.__class__.__name__, scope="local")
# Create Experts.
if prepare_finalize.activation_format == mk.FusedMoEActivationFormat.BatchedExperts:
max_num_tokens = prepare_finalize.max_num_tokens_per_rank()
assert max_num_tokens is not None
experts = experts_cls(
moe_config=moe_config,
quant_config=moe_quant_config,
max_num_tokens=max_num_tokens,
num_dispatchers=prepare_finalize.num_dispatchers(),
)
else:
experts = experts_cls(
moe_config=moe_config,
quant_config=moe_quant_config,
)
# NOTE(rob): we only want the mk to control the shared_expert
# if using all2all (for SBO). bnell is making this explict in
# the new MoE runner class.
kernel = mk.FusedMoEModularKernel(
prepare_finalize,
experts,
shared_experts=(
shared_experts
if moe_config.moe_parallel_config.use_all2all_kernels
else None
),
moe_parallel_config=moe_config.moe_parallel_config,
inplace=(
not moe_config.disable_inplace
and fp8_backend != Fp8MoeBackend.FLASHINFER_CUTLASS
),
)
return kernel
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/oracle/fp8.py",
"license": "Apache License 2.0",
"lines": 543,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.cutlass_moe import CutlassExpertsFp8
from vllm.model_executor.layers.fused_moe.fallback import FallbackExperts
from vllm.model_executor.layers.fused_moe.fused_moe import TritonExperts
from vllm.platforms import current_platform
class TritonOrCutlassExperts(FallbackExperts):
"""Cutlass with fallback to Triton for low latency shapes on SM100."""
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
):
self.is_sm100 = current_platform.has_device_capability(100)
super().__init__(
experts=CutlassExpertsFp8(moe_config, quant_config),
fallback_experts=TritonExperts(moe_config, quant_config),
)
@staticmethod
def get_clses() -> tuple[
type[mk.FusedMoEPermuteExpertsUnpermute],
type[mk.FusedMoEPermuteExpertsUnpermute],
]:
return (CutlassExpertsFp8, TritonExperts)
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
# Small batch fallback for sm100.
if self.is_sm100 and M <= 8:
return self.fallback_experts.workspace_shapes(
M,
N,
K,
topk,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
else:
return self.experts.workspace_shapes(
M,
N,
K,
topk,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
def _select_experts_impl(
self,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
) -> mk.FusedMoEPermuteExpertsUnpermute:
# Small batch fallback for sm100.
if self.is_sm100 and hidden_states.shape[0] <= 8:
return self.fallback_experts
else:
return self.experts
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/exceptions.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Custom exceptions for vLLM."""
from typing import Any
class VLLMValidationError(ValueError):
"""vLLM-specific validation error for request validation failures.
Args:
message: The error message describing the validation failure.
parameter: Optional parameter name that failed validation.
value: Optional value that was rejected during validation.
"""
def __init__(
self,
message: str,
*,
parameter: str | None = None,
value: Any = None,
) -> None:
super().__init__(message)
self.parameter = parameter
self.value = value
def __str__(self):
base = super().__str__()
extras = []
if self.parameter is not None:
extras.append(f"parameter={self.parameter}")
if self.value is not None:
extras.append(f"value={self.value}")
return f"{base} ({', '.join(extras)})" if extras else base
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/exceptions.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/quantization/test_fp8_min_max_helper.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for the get_fp8_min_max() helper function.
These tests verify the FP8 min/max value logic for both standard
and fnuz (ROCm MI300) dtype handling.
"""
from unittest.mock import patch
import pytest
import torch
from vllm.model_executor.layers.quantization.utils.quant_utils import (
get_fp8_min_max,
)
class TestGetFp8MinMax:
"""Test cases for get_fp8_min_max() function."""
@patch("vllm.model_executor.layers.quantization.utils.quant_utils.current_platform")
def test_standard_fp8_platform(self, mock_platform):
"""Test that standard FP8 platform uses PyTorch's finfo values."""
mock_platform.is_fp8_fnuz.return_value = False
mock_platform.fp8_dtype.return_value = torch.float8_e4m3fn
fp8_min, fp8_max = get_fp8_min_max()
finfo = torch.finfo(torch.float8_e4m3fn)
# Standard FP8 max is 448.0 for e4m3fn
assert fp8_max == finfo.max, f"Expected finfo.max={finfo.max}, got {fp8_max}"
assert fp8_min == finfo.min, f"Expected finfo.min={finfo.min}, got {fp8_min}"
@patch("vllm.model_executor.layers.quantization.utils.quant_utils.current_platform")
def test_fnuz_platform_returns_224(self, mock_platform):
"""Test that fnuz platform returns 224.0."""
mock_platform.is_fp8_fnuz.return_value = True
fp8_min, fp8_max = get_fp8_min_max()
# fnuz on ROCm MI300 should return 224.0, not 240.0
assert fp8_max == 224.0, f"Expected 224.0 for fnuz platform, got {fp8_max}"
assert fp8_min == -224.0, f"Expected -224.0 for fnuz platform, got {fp8_min}"
@patch("vllm.model_executor.layers.quantization.utils.quant_utils.current_platform")
def test_non_fnuz_platform_uses_finfo(self, mock_platform):
"""Test that non-fnuz platform uses finfo values."""
mock_platform.is_fp8_fnuz.return_value = False
mock_platform.fp8_dtype.return_value = torch.float8_e4m3fn
fp8_min, fp8_max = get_fp8_min_max()
finfo = torch.finfo(torch.float8_e4m3fn)
assert fp8_max == finfo.max, (
f"Non-fnuz platform should use finfo.max={finfo.max}, got {fp8_max}"
)
assert fp8_min == finfo.min, (
f"Non-fnuz platform should use finfo.min={finfo.min}, got {fp8_min}"
)
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/quantization/test_fp8_min_max_helper.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:benchmarks/kernels/cpu/benchmark_cpu_attn.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
import time
import numpy as np
import torch
from vllm._custom_ops import (
cpu_attention_with_kv_cache,
cpu_attn_get_scheduler_metadata,
cpu_attn_reshape_and_cache,
)
from vllm.platforms import CpuArchEnum, current_platform
from vllm.utils.argparse_utils import FlexibleArgumentParser
from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE, set_random_seed
from vllm.v1.attention.backends.cpu_attn import CPUAttentionBackend, _get_attn_isa
def get_attn_isa(
block_size: int | None = None,
dtype: torch.dtype | None = None,
):
if block_size and dtype:
return _get_attn_isa(dtype, block_size)
else:
if current_platform.get_cpu_architecture() == CpuArchEnum.ARM:
return "neon"
elif torch._C._cpu._is_amx_tile_supported():
return "amx"
else:
return "vec"
# rand number generation takes too much time, cache rand tensors
@functools.lru_cache(maxsize=128, typed=False)
def tensor_cache(
elem_num: int,
dtype: torch.dtype,
) -> torch.Tensor:
tensor = torch.randn(elem_num, dtype=dtype)
return tensor
@torch.inference_mode()
def main(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int = None,
dtype: torch.dtype = torch.bfloat16,
block_size: int = 128,
num_blocks: int = 4096,
use_sink: bool = False,
enable_kv_split: bool = False,
isa: str | None = None,
seed: int = 0,
iters: int = 20,
) -> None:
set_random_seed(seed)
num_seqs = len(seq_lens)
query_lens = [x[0] for x in seq_lens]
kv_lens = [x[1] for x in seq_lens]
num_query_heads = num_heads[0]
num_kv_heads = num_heads[1]
assert num_query_heads % num_kv_heads == 0
max_kv_len = max(kv_lens)
window_size = (sliding_window - 1, 0) if sliding_window is not None else (-1, -1)
scale = head_size**-0.5
token_num = sum(query_lens)
if isa is None:
isa = get_attn_isa(block_size, dtype)
s_aux = (
15 * torch.rand((num_query_heads,), dtype=torch.bfloat16) if use_sink else None
)
query = tensor_cache(
elem_num=token_num * num_query_heads * head_size,
dtype=dtype,
)
query = query.view(
token_num,
num_query_heads,
head_size,
)
key_value = tensor_cache(
elem_num=2 * num_blocks * num_kv_heads * block_size * head_size,
dtype=dtype,
)
key_value = key_value.view(
2,
num_blocks,
block_size,
num_kv_heads,
head_size,
)
key_cache, value_cache = key_value.unbind(0)
# KV cache for CPU attention
packed_key_cache = torch.empty(
num_blocks, num_kv_heads, block_size, head_size, dtype=dtype
)
packed_value_cache = torch.empty_like(packed_key_cache)
cu_query_lens = torch.tensor([0] + query_lens, dtype=torch.int32).cumsum(
dim=0, dtype=torch.int32
)
kv_lens_tensor = torch.tensor(kv_lens, dtype=torch.int32)
max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
block_tables = torch.randint(
0, num_blocks, (num_seqs, max_num_blocks_per_seq), dtype=torch.int32
)
# use reshape_and_cache to pack key_cache and value_cache
slot_mapping = torch.arange(0, num_blocks * block_size, dtype=torch.int64)
cpu_attn_reshape_and_cache(
key=key_cache.view(-1, num_kv_heads, head_size),
value=value_cache.view(-1, num_kv_heads, head_size),
key_cache=packed_key_cache,
value_cache=packed_value_cache,
slot_mapping=slot_mapping,
isa=isa,
)
metadata = cpu_attn_get_scheduler_metadata(
num_reqs=num_seqs,
num_heads=num_query_heads,
num_kv_heads=num_kv_heads,
head_dim=head_size,
seq_lens=kv_lens_tensor,
dtype=dtype,
query_start_loc=cu_query_lens,
causal=True,
sliding_window_size=sliding_window if sliding_window is not None else -1,
isa=isa,
enable_kv_split=enable_kv_split,
)
out_with_split = torch.empty_like(query)
def run_benchmark(iters: int) -> list[float]:
times = []
for _ in range(iters):
start_time = time.perf_counter_ns()
cpu_attention_with_kv_cache(
query=query,
key_cache=packed_key_cache,
value_cache=packed_value_cache,
output=out_with_split,
query_start_loc=cu_query_lens,
seq_lens=kv_lens_tensor,
scale=scale,
causal=True,
alibi_slopes=None,
sliding_window=window_size,
block_table=block_tables,
softcap=0,
scheduler_metadata=metadata,
s_aux=s_aux,
)
end_time = time.perf_counter_ns()
times.append((end_time - start_time) / 1e6)
return times
# warmup
run_benchmark(5)
# benchmark
times = run_benchmark(iters)
time_min = min(times)
time_max = max(times)
time_mean = np.mean(times)
time_std = np.std(times)
print("\tmin (ms) = ", time_min)
print("\tmax (ms) = ", time_max)
print("\tmean (ms) = ", time_mean)
print("\tstd = ", time_std)
print("\tmedian (ms) = ", np.median(times))
def generate_seq_lens(
batch_size: int,
q_len_min: int,
q_len_max: int,
kv_len_min: int,
kv_len_max: int,
seed: int = 0,
) -> list[tuple[int, int]]:
assert 1 <= q_len_min <= q_len_max
assert 1 <= kv_len_min <= kv_len_max
assert kv_len_max >= q_len_min
g = torch.Generator(device="cpu").manual_seed(seed)
def rint(lo: int, hi: int) -> int:
return torch.randint(lo, hi + 1, (1,), generator=g).item()
seq_lens: list[tuple[int, int]] = []
for _ in range(batch_size):
# ensure q <= kv
kv = rint(max(kv_len_min, q_len_min), kv_len_max)
q = rint(q_len_min, min(q_len_max, kv))
seq_lens.append((q, kv))
return seq_lens
if __name__ == "__main__":
parser = FlexibleArgumentParser(description="Benchmark the paged attention kernel.")
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--q-len-min", type=int, default=512)
parser.add_argument("--q-len-max", type=int, default=512)
parser.add_argument("--kv-len-min", type=int, default=512)
parser.add_argument("--kv-len-max", type=int, default=512)
parser.add_argument("--num-blocks", type=int, default=4096)
parser.add_argument("--sliding-window", type=int, default=None)
parser.add_argument("--num-query-heads", type=int, default=32)
parser.add_argument("--num-kv-heads", type=int, default=8)
parser.add_argument(
"--head-size",
type=int,
choices=CPUAttentionBackend.get_supported_head_sizes(),
default=128,
)
parser.add_argument("--enable-kv-split", action="store_true")
parser.add_argument("--block-size", type=int, choices=[32, 64, 128], default=128)
parser.add_argument(
"--dtype", type=str, choices=["half", "bfloat16", "float"], default="bfloat16"
)
parser.add_argument("--use-sink", action="store_true")
parser.add_argument(
"--isa", type=str, choices=["vec", "neon", "amx", "vec16"], default=None
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--iters", type=int, default=20)
args = parser.parse_args()
print(args)
seq_lens = generate_seq_lens(
args.batch_size,
args.q_len_min,
args.q_len_max,
args.kv_len_min,
args.kv_len_max,
args.seed,
)
print("batch (query len, kv len) = ", seq_lens)
main(
seq_lens=seq_lens,
num_heads=(args.num_query_heads, args.num_kv_heads),
head_size=args.head_size,
sliding_window=args.sliding_window,
dtype=STR_DTYPE_TO_TORCH_DTYPE[args.dtype],
block_size=args.block_size,
num_blocks=args.num_blocks,
use_sink=args.use_sink,
enable_kv_split=args.enable_kv_split,
isa=args.isa
if args.isa is not None
else get_attn_isa(args.block_size, STR_DTYPE_TO_TORCH_DTYPE[args.dtype]),
seed=args.seed,
iters=args.iters,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/kernels/cpu/benchmark_cpu_attn.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/rocm/aiter/test_mla_fp8_support_check.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for AITER MLA FP8 support detection.
These tests verify that the _check_aiter_mla_fp8_support() function
correctly handles various error conditions without crashing.
"""
from unittest.mock import patch
import pytest
class TestAiterMlaFp8SupportCheck:
"""Test cases for _check_aiter_mla_fp8_support() function."""
def setup_method(self):
"""Reset the global cache before each test."""
import vllm._aiter_ops as aiter_ops
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_import_error_handling(self, mock_supported):
"""Test that ImportError is handled gracefully."""
import vllm._aiter_ops as aiter_ops
from vllm._aiter_ops import _check_aiter_mla_fp8_support
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
# Should return False without raising
with patch(
"vllm._aiter_ops.inspect.signature",
side_effect=ImportError("No module"),
):
result = _check_aiter_mla_fp8_support()
assert result is False
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_module_not_found_error_handling(self, mock_supported):
"""Test that ModuleNotFoundError is handled gracefully."""
import vllm._aiter_ops as aiter_ops
from vllm._aiter_ops import _check_aiter_mla_fp8_support
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
with patch(
"vllm._aiter_ops.inspect.signature",
side_effect=ModuleNotFoundError("Module not found"),
):
# Should return False without raising
assert _check_aiter_mla_fp8_support() is False
# Cache should be set to False
assert aiter_ops._AITER_MLA_SUPPORTS_FP8 is False
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_attribute_error_handling(self, mock_supported):
"""Test that AttributeError is handled gracefully."""
import vllm._aiter_ops as aiter_ops
from vllm._aiter_ops import _check_aiter_mla_fp8_support
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
with patch(
"vllm._aiter_ops.inspect.signature",
side_effect=AttributeError("No attribute"),
):
assert _check_aiter_mla_fp8_support() is False
assert aiter_ops._AITER_MLA_SUPPORTS_FP8 is False
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_value_error_handling(self, mock_supported):
"""Test that ValueError is handled gracefully (no signature)."""
import vllm._aiter_ops as aiter_ops
from vllm._aiter_ops import _check_aiter_mla_fp8_support
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
with patch(
"vllm._aiter_ops.inspect.signature",
side_effect=ValueError("No signature"),
):
assert _check_aiter_mla_fp8_support() is False
assert aiter_ops._AITER_MLA_SUPPORTS_FP8 is False
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_type_error_handling(self, mock_supported):
"""Test that TypeError is handled gracefully (not callable)."""
import vllm._aiter_ops as aiter_ops
from vllm._aiter_ops import _check_aiter_mla_fp8_support
aiter_ops._AITER_MLA_SUPPORTS_FP8 = None
with patch(
"vllm._aiter_ops.inspect.signature",
side_effect=TypeError("Not a callable"),
):
assert _check_aiter_mla_fp8_support() is False
assert aiter_ops._AITER_MLA_SUPPORTS_FP8 is False
@patch("vllm._aiter_ops.is_aiter_found_and_supported", return_value=True)
def test_result_caching(self, mock_supported):
"""Test that the result is cached after first check."""
import vllm._aiter_ops as aiter_ops
# Set cache to True
aiter_ops._AITER_MLA_SUPPORTS_FP8 = True
from vllm._aiter_ops import _check_aiter_mla_fp8_support
# Should return cached value without re-checking
result = _check_aiter_mla_fp8_support()
assert result is True
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/rocm/aiter/test_mla_fp8_support_check.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/models/multimodal/generation/test_nemotron_parse.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
import pytest
from transformers import AutoModel
from tests.models.utils import check_logprobs_close
from vllm.assets.image import ImageAsset
from ....conftest import HfRunner, PromptImageInput, VllmRunner
from ....utils import create_new_process_for_each_test
IMAGE = ImageAsset("paper-11").pil_image_ext(ext="png").convert("RGB")
PROMPT = "</s><s><predict_bbox><predict_classes><output_markdown>"
def run_test(
hf_runner: type[HfRunner],
vllm_runner: type[VllmRunner],
inputs: Sequence[tuple[list[str], PromptImageInput]],
model: str,
*,
dtype: str,
max_tokens: int,
num_logprobs: int,
) -> None:
"""Verify that the inference result is the same between hf and vllm."""
with vllm_runner(
model,
dtype=dtype,
max_num_seqs=64,
limit_mm_per_prompt={"image": 1},
trust_remote_code=True,
) as vllm_model:
vllm_outputs_per_case = [
vllm_model.generate_greedy_logprobs(
prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images,
)
for prompts, images in inputs
]
with hf_runner(model, dtype=dtype, auto_cls=AutoModel) as hf_model:
hf_outputs_per_case = [
hf_model.generate_greedy_logprobs_limit(
prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images,
use_cache=False, # HF Nemotron Parse crashes here without this
)
for prompts, images in inputs
]
for hf_outputs, vllm_outputs in zip(hf_outputs_per_case, vllm_outputs_per_case):
check_logprobs_close(
outputs_0_lst=hf_outputs,
outputs_1_lst=vllm_outputs,
name_0="hf",
name_1="vllm",
)
@pytest.mark.core_model
@pytest.mark.parametrize("model", ["nvidia/NVIDIA-Nemotron-Parse-v1.1"])
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("num_logprobs", [5])
@create_new_process_for_each_test("spawn")
def test_models(
hf_runner, vllm_runner, model: str, dtype: str, num_logprobs: int
) -> None:
run_test(
hf_runner,
vllm_runner,
inputs=[
(
[PROMPT] * 10,
[IMAGE] * 10,
),
],
model=model,
dtype=dtype,
max_tokens=100,
num_logprobs=num_logprobs,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/generation/test_nemotron_parse.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/nemotron_parse.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
#
# Adapted from https://github.com/amalad/vllm/blob/nemotron_parse/vllm/model_executor/models/nemotron_parse.py
# that's based on https://huggingface.co/nvidia/NVIDIA-Nemotron-Parse-v1.1/blob/main/hf_nemotron_parse_modeling.py
#
# Bart classes based on old vLLM codebase:
# https://github.com/vllm-project/vllm/blob/v0.10.2/vllm/model_executor/models/bart.py
import math
from collections.abc import Iterable, Mapping, Sequence
from typing import Annotated, Literal
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from PIL import Image
from timm.data.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from torchvision import transforms as T
from transformers import (
BartConfig,
BatchFeature,
PretrainedConfig,
TensorType,
)
from vllm.config import CacheConfig, VllmConfig
from vllm.config.lora import LoRAConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.interfaces import (
MultiModalEmbeddings,
SupportsMultiModal,
)
from vllm.model_executor.models.radio import RadioModel
from vllm.model_executor.models.whisper import WhisperAttention, WhisperCrossAttention
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import MultiModalDataItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseProcessingInfo,
EncDecMultiModalProcessor,
PromptReplacement,
PromptUpdate,
)
from vllm.renderers import TokenizeParams
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.configs.radio import RadioConfig
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from vllm.v1.attention.backend import AttentionType
logger = init_logger(__name__)
DEFAULT_FINAL_IMAGE_SIZE = (2048, 1648)
class BartScaledWordEmbedding(VocabParallelEmbedding):
"""
This module overrides VocabParallelEmbedding's
forward by multiplying with embeddings scale.
"""
def __init__(
self, num_embeddings: int, embedding_dim: int, embed_scale: float = 1.0
):
super().__init__(num_embeddings, embedding_dim)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor) -> torch.Tensor:
return super().forward(input_ids) * self.embed_scale
class BartParallelLMHead(ParallelLMHead):
"""
This module overrides ParallelLMHead's
forward by dividing by embeddings scale,
yielding effectively the inverse of
BartScaledWordEmbedding
"""
def __init__(
self, num_embeddings: int, embedding_dim: int, embed_scale: float = 1.0
):
super().__init__(num_embeddings, embedding_dim)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor) -> torch.Tensor:
return super().forward(input_ids) / self.embed_scale
class BartDecoderLayer(nn.Module):
def __init__(
self,
config: BartConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = WhisperAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
attn_type=AttentionType.DECODER,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.activation_fn = get_act_fn(config.activation_function)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
"""
afeldman-nm: personally I would call this "cross-attention",
however I left the name as "encoder_attn" to maintain consistency
with the name of the pretrained weights.
"""
self.encoder_attn = WhisperCrossAttention(
self.embed_dim,
config.decoder_attention_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.encoder_attn",
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
ffn_hidden_size = self.embed_dim
ffn_intermediate_size = config.encoder_ffn_dim
ffn_has_bias = True
self.fc1 = ColumnParallelLinear(
ffn_hidden_size,
ffn_intermediate_size,
bias=ffn_has_bias,
quant_config=quant_config,
prefix=f"{prefix}.fc1",
)
self.fc2 = RowParallelLinear(
ffn_intermediate_size,
ffn_hidden_size,
bias=ffn_has_bias,
quant_config=quant_config,
prefix=f"{prefix}.fc2",
)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
decoder_hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor | None = None,
) -> torch.Tensor:
r"""
Args:
decoder_hidden_states: torch.Tensor of *decoder* input embeddings.
encoder_hidden_states: torch.Tensor of *encoder* input embeddings.
Returns:
Decoder layer output torch.Tensor
"""
residual = decoder_hidden_states
# Self Attention
hidden_states = self.self_attn(hidden_states=decoder_hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
residual = hidden_states
hidden_states = self.encoder_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
fc1_out, _ = self.fc1(hidden_states)
hidden_states = self.activation_fn(fc1_out)
hidden_states, _ = self.fc2(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states
class MBartDecoderLayer(BartDecoderLayer):
def forward(
self,
decoder_hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor | None = None,
) -> torch.Tensor:
residual = decoder_hidden_states
hidden_states = self.self_attn_layer_norm(decoder_hidden_states)
# Self Attention
hidden_states = self.self_attn(hidden_states=hidden_states)
hidden_states = residual + hidden_states
# Cross-Attention Block
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states = self.encoder_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
fc1_out, _ = self.fc1(hidden_states)
hidden_states = self.activation_fn(fc1_out)
hidden_states, _ = self.fc2(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class MBartDecoderNoPos(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers.
Each layer is a [`BartDecoderLayer`]
Args:
config: BartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(
self,
config: BartConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
lora_config: LoRAConfig | None = None,
embed_tokens: nn.Embedding | None = None,
prefix: str = "",
):
super().__init__()
self.cache_config = cache_config
self.quant_config = quant_config
self.lora_config = lora_config
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = BartScaledWordEmbedding(
config.vocab_size, config.d_model, embed_scale=embed_scale
)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.layers = nn.ModuleList(
[
MBartDecoderLayer(
config,
cache_config,
quant_config,
prefix=f"{prefix}.layers.{layer_idx}",
)
for layer_idx in range(config.decoder_layers)
]
)
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.layer_norm = nn.LayerNorm(config.d_model)
def forward(
self,
decoder_input_ids: torch.Tensor | None,
*,
encoder_hidden_states: torch.Tensor | None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor:
r"""
Args:
decoder_input_ids: Indices of *decoder* input sequence tokens in the
vocabulary. Padding will be ignored by default should you provide it.
encoder_hidden_states: Tensor of encoder output embeddings
Returns:
Decoder output torch.Tensor
"""
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(decoder_input_ids)
hidden_states = self.layernorm_embedding(inputs_embeds)
# decoder layers
for decoder_layer in self.layers:
hidden_states = decoder_layer(
decoder_hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".self_attn.qkv_proj", ".self_attn.q_proj", "q"),
(".self_attn.qkv_proj", ".self_attn.k_proj", "k"),
(".self_attn.qkv_proj", ".self_attn.v_proj", "v"),
(".encoder_attn.kv_proj", ".encoder_attn.k_proj", "k"),
(".encoder_attn.kv_proj", ".encoder_attn.v_proj", "v"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if name.startswith("embed_positions"):
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class NemotronParsePixelInputs(TensorSchema):
"""
Dimensions:
- b: Batch size
- c: Number of channels (3)
- h: Height
- w: Width
"""
type: Literal["pixel_values"]
data: Annotated[torch.Tensor, TensorShape("b", 3, "h", "w")]
class NemotronParseImageProcessor:
"""
NemotronParse Image Processor
"""
def __init__(
self,
final_size: tuple = DEFAULT_FINAL_IMAGE_SIZE,
**kwargs,
):
# Ensure final_size is properly formatted
if isinstance(final_size, (list, tuple)) and len(final_size) >= 2:
self.final_size = (int(final_size[0]), int(final_size[1]))
elif isinstance(final_size, (int, float)):
self.final_size = (int(final_size), int(final_size))
else:
self.final_size = DEFAULT_FINAL_IMAGE_SIZE # Default fallback
self.norm_mean = torch.Tensor(OPENAI_CLIP_MEAN).reshape(1, 3, 1, 1)
self.norm_std = torch.Tensor(OPENAI_CLIP_STD).reshape(1, 3, 1, 1)
# Create transforms
self._create_transforms()
def _create_transforms(self):
"""Create transform objects."""
try:
import albumentations as A
except ImportError as err:
raise ImportError(
"The package `albumentations` is required to use "
"NemotronParse model. Please install it with `pip install "
"albumentations`."
) from err
# Ensure final_size is a tuple of integers
if isinstance(self.final_size, (list, tuple)):
self.target_height, self.target_width = (
int(self.final_size[0]),
int(self.final_size[1]),
)
else:
self.target_height = self.target_width = int(self.final_size)
import cv2
self.transform = A.Compose(
[
A.PadIfNeeded(
min_height=self.target_height,
min_width=self.target_width,
border_mode=cv2.BORDER_CONSTANT,
fill=[255, 255, 255],
p=1.0,
),
]
)
self.torch_transform = T.Compose(
[
T.ToTensor(),
]
)
def _resize_with_aspect_ratio(self, image: np.ndarray) -> np.ndarray:
"""Resize image maintaining aspect ratio (exact replica of original
LongestMaxSizeHW)."""
height, width = image.shape[:2]
max_size_height = self.target_height
max_size_width = self.target_width
# Original LongestMaxSizeHW algorithm from custom_augmentations.py
aspect_ratio = width / height
new_height = height
new_width = width
# If height too big then scale image down
if height > max_size_height:
new_height = max_size_height
new_width = int(new_height * aspect_ratio)
# If width too big, scale image down further
if new_width > max_size_width:
new_width = max_size_width
new_height = int(new_width / aspect_ratio)
# Use cv2.INTER_LINEAR like the original
import cv2
return cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
def _pad_to_size(self, image: np.ndarray) -> np.ndarray:
"""Pad image to target size with white padding (matches A.PadIfNeeded
behavior)."""
h, w = image.shape[:2]
min_height, min_width = self.target_height, self.target_width
# Only pad if image is smaller than target (matches A.PadIfNeeded logic)
pad_h = max(0, min_height - h)
pad_w = max(0, min_width - w)
if pad_h == 0 and pad_w == 0:
return image
# A.PadIfNeeded pads to bottom-right with constant value
if len(image.shape) == 3:
# Color image - pad bottom and right with white (255, 255, 255)
padded = np.pad(
image,
((0, pad_h), (0, pad_w), (0, 0)),
mode="constant",
constant_values=255,
)
else:
# Grayscale image - pad with white (255)
padded = np.pad(
image, ((0, pad_h), (0, pad_w)), mode="constant", constant_values=255
)
return padded
def preprocess(
self,
images: Image.Image | list[Image.Image],
**kwargs,
) -> dict[str, torch.Tensor]:
"""
Preprocess an image or batch of images for the NemotronParse model.
Args:
images: Input image(s)
"""
# Ensure images is a list
if not isinstance(images, list):
images = [images]
# Convert PIL images to numpy arrays if needed
processed_images = []
for image in images:
if isinstance(image, Image.Image):
image = np.asarray(image)
processed_images.append(image)
# Apply NemotronParse-specific transforms
pixel_values = []
for image in processed_images:
# Manual resize with aspect ratio preservation
# (replaces LongestMaxSizeHW)
processed_image = self._resize_with_aspect_ratio(image)
# Apply remaining albumentations transforms if available
if self.transform is not None:
transformed = self.transform(image=processed_image)
processed_image = transformed["image"]
else:
# Fallback: just pad to target size
processed_image = self._pad_to_size(processed_image)
# Convert to tensor
pixel_values_tensor = self.torch_transform(processed_image)
# Handle grayscale images
if pixel_values_tensor.shape[0] == 1:
pixel_values_tensor = pixel_values_tensor.expand(3, -1, -1)
pixel_values.append(pixel_values_tensor)
# Stack into batch
pixel_values = torch.stack(pixel_values)
# Normalize pixel values
normalized_values = (pixel_values - self.norm_mean) / self.norm_std
return {"pixel_values": normalized_values}
def __call__(
self, images: Image.Image | list[Image.Image], **kwargs
) -> dict[str, torch.Tensor]:
return self.preprocess(images, **kwargs)
class NemotronParseProcessor:
"""
NemotronParse Processor
"""
def __init__(
self,
config: PretrainedConfig,
tokenizer: TokenizerLike,
**kwargs,
) -> None:
super().__init__()
self.config = config
self.tokenizer = tokenizer
self.image_processor = NemotronParseImageProcessor(final_size=config.image_size)
def _make_batch_input(self, input_item=None):
if input_item is None:
input_item = []
if not isinstance(input_item, list):
input_item = [input_item]
return input_item
def __call__(
self,
text: str | None = None,
images: Image.Image | list[Image.Image] | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchFeature:
text, images = [self._make_batch_input(x) for x in (text, images)]
image_inputs = {} if len(images) == 0 else self.image_processor(images)
text_inputs = self.tokenizer(text, add_special_tokens=False, **kwargs)
combined_outputs = BatchFeature(
data={**text_inputs, **image_inputs},
tensor_type=return_tensors,
)
return combined_outputs
class NemotronParseProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config()
def get_hf_processor(self, **kwargs) -> NemotronParseProcessor:
return self.ctx.init_processor(
NemotronParseProcessor,
config=self.get_hf_config(),
tokenizer=self.get_tokenizer(),
**kwargs,
)
def get_default_tok_params(self) -> TokenizeParams:
return super().get_default_tok_params().with_kwargs(add_special_tokens=False)
@property
def skip_prompt_length_check(self) -> bool:
return True # Because the encoder prompt is padded
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": 1}
def get_num_image_tokens(self) -> int:
config = self.get_hf_config()
final_size = config.image_size
patch_size = config.encoder.patch_size
return (final_size[0] // patch_size) * ((final_size[1] // patch_size) // 4) + 1
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int] | None:
image_tokens = self.get_num_image_tokens()
return {"image": image_tokens}
class NemotronParseDummyInputsBuilder(
BaseDummyInputsBuilder[NemotronParseProcessingInfo]
):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
return ""
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
target_width, target_height = self.info.get_hf_config().image_size
return {
"image": self._get_dummy_images(
width=target_width, height=target_height, num_images=num_images
)
}
class NemotronParseMultiModalProcessor(
EncDecMultiModalProcessor[NemotronParseProcessingInfo]
):
def create_encoder_prompt(
self,
prompt: str | list[int],
mm_items: MultiModalDataItems,
) -> str | list[int]:
return [0]
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
if mm_data:
processed_outputs = super()._call_hf_processor(
prompt, mm_data, mm_kwargs, tok_kwargs
)
else:
hf_processor = self.info.get_hf_processor()
tokenizer = hf_processor.tokenizer
processed_outputs = tokenizer(
prompt, add_special_tokens=False, return_tensors="pt"
)
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return dict(pixel_values=MultiModalFieldConfig.batched("image"))
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
num_image_tokens = self.info.get_num_image_tokens()
return [
PromptReplacement(
modality="image",
target=[0],
replacement=[0] * num_image_tokens,
)
]
class RadioWithNeck(nn.Module):
"""Vision encoder using RADIO model with custom neck."""
def __init__(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.config = config.encoder
self.model_encoder = self.get_vit_model_from_radio_config(
config, quant_config=quant_config
)
# Neck components
last_hidden_state = 1024
self.conv1 = nn.Conv1d(1280, last_hidden_state, 1)
self.layer_norm1 = nn.LayerNorm(
last_hidden_state, eps=1e-06, elementwise_affine=True
)
self.conv2 = nn.Conv2d(
last_hidden_state,
last_hidden_state,
kernel_size=(1, 4),
stride=(1, 4),
padding=0,
bias=False,
)
self.layer_norm2 = nn.LayerNorm(
last_hidden_state, eps=1e-06, elementwise_affine=True
)
self.sum_proj = ColumnParallelLinear(
3840,
last_hidden_state,
quant_config=quant_config,
prefix=f"{prefix}.sum_proj",
)
self.layer_norm3 = nn.LayerNorm(
last_hidden_state, eps=1e-06, elementwise_affine=True
)
def get_vit_model_from_radio_config(
self,
hf_config: PretrainedConfig,
quant_config: QuantizationConfig | None = None,
) -> RadioModel:
hf_config_vision = hf_config.encoder
model_name = hf_config_vision.args.get("model")
if model_name is None:
raise ValueError(f"Unsupported vit model type: {model_name}")
radio_config = RadioConfig(
model_name=model_name,
image_size=hf_config.image_size,
**hf_config_vision.args,
)
return RadioModel(config=radio_config, quant_config=quant_config)
def forward(self, pixel_values: torch.Tensor, **kwargs) -> torch.Tensor:
summary, feature = self.model_encoder(pixel_values)
output = self.conv1(feature.permute(0, 2, 1)).permute(0, 2, 1)
output = self.layer_norm1(output)
patch_size = self.config.patch_size
output = rearrange(
output,
"b (h w) d -> b d h w",
h=pixel_values.shape[-2] // patch_size,
w=pixel_values.shape[-1] // patch_size,
)
output = self.conv2(output)
output = rearrange(output, "b d h w -> b (h w) d")
output = self.layer_norm2(output)
summary = self.layer_norm3(self.sum_proj(summary)[0])
output = torch.cat((output, summary.unsqueeze(1)), dim=1)
return output
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
model_encoder_weights = []
adaptor_dict = {
name: param
for name, param in dict(self.named_parameters()).items()
if not name.startswith("model_encoder")
}
for name, w in weights:
if name.startswith("model_encoder"):
model_encoder_weights.append((".".join(name.split(".")[1:]), w))
else:
param = adaptor_dict[name]
with torch.no_grad():
default_weight_loader(param, w)
self.model_encoder.load_weights(model_encoder_weights)
@MULTIMODAL_REGISTRY.register_processor(
NemotronParseMultiModalProcessor,
info=NemotronParseProcessingInfo,
dummy_inputs=NemotronParseDummyInputsBuilder,
)
class NemotronParseForConditionalGeneration(nn.Module, SupportsMultiModal):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.config = config
self.vision_config = config.encoder
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
with self._mark_tower_model(vllm_config, "image"):
self.encoder = RadioWithNeck(
config=config, quant_config=quant_config, prefix=f"{prefix}.encoder"
)
with self._mark_language_model(vllm_config):
self.decoder = MBartDecoderNoPos(
config.decoder,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.decoder",
)
self.vocab_size = config.decoder.vocab_size
self.lm_head = ParallelLMHead(
config.decoder.vocab_size, config.decoder.d_model, quant_config=quant_config
)
self.logits_processor = LogitsProcessor(
self.vocab_size, config.decoder.vocab_size
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return None
raise ValueError("Only image modality is supported")
def _parse_and_validate_image_input(
self, **kwargs: object
) -> NemotronParsePixelInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
image_embeds = kwargs.pop("image_embeds", None)
if pixel_values is None and image_embeds is None:
return None
if pixel_values is not None and image_embeds is not None:
raise ValueError("Both pixel values and image embeds are provided.")
if pixel_values is not None:
h, w = self.config.image_size
return NemotronParsePixelInputs(
type="pixel_values",
data=pixel_values,
resolve_bindings={
"h": h,
"w": w,
},
)
if image_embeds is not None:
raise NotImplementedError
raise AssertionError("This line should be unreachable.")
def _process_image_input(
self, image_input: NemotronParsePixelInputs
) -> torch.Tensor:
assert image_input["type"] == "pixel_values"
pixel_values = image_input["data"]
dtype = next(self.encoder.parameters()).dtype
pixel_values = pixel_values.to(dtype)
return self.encoder(pixel_values)
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return None
vision_embeddings = self._process_image_input(image_input)
return vision_embeddings
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
encoder_outputs: list[torch.Tensor] | None = None,
**kwargs,
) -> torch.Tensor:
r"""
Args:
input_ids: torch.Tensor of *decoder* input token ids.
positions: torch.Tensor of *decoder* position indices.
encoder_outputs: List of encoder output tensors (vision embeddings).
During profiling, this may be None or empty.
Returns:
Output torch.Tensor
"""
inputs_embeds = None
if encoder_outputs:
inputs_embeds = torch.cat(encoder_outputs, dim=0)
hidden_states = self.decoder(
decoder_input_ids=input_ids, encoder_hidden_states=inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
lm_head_dict = dict(self.lm_head.named_parameters())
def is_encoder(name: str) -> bool:
return name.startswith("encoder")
def is_decoder(name: str) -> bool:
return name.startswith("decoder")
def is_lm_head(name: str):
return name.startswith("lm_head")
# Separate weights by component
encoder_weights = []
decoder_weights = []
for name, w in weights:
if is_encoder(name):
encoder_weights.append((".".join(name.split(".")[1:]), w))
elif is_decoder(name):
decoder_weights.append((".".join(name.split(".")[1:]), w))
elif is_lm_head(name):
trimmed_name = ".".join(name.split(".")[1:])
param = lm_head_dict[trimmed_name]
with torch.no_grad():
default_weight_loader(param, w)
else:
logger.info("Found unexpected weight: %s", name)
# Load encoder weights
self.encoder.load_weights(encoder_weights)
# Load decoder weights
self.decoder.load_weights(decoder_weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/nemotron_parse.py",
"license": "Apache License 2.0",
"lines": 804,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/qwen3_reranker_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
What is the difference between the official original version and one
that has been converted into a sequence classification model?
Qwen3-Reranker is a language model that doing reranker by using the
logits of "no" and "yes" tokens.
This requires computing logits for all 151,669 tokens in the vocabulary,
making it inefficient and incompatible with vLLM's score() API.
A conversion method has been proposed to transform the original model into a
sequence classification model. This converted model:
1. Is significantly more efficient
2. Fully supports vLLM's score() API
3. Simplifies initialization parameters
Reference: https://huggingface.co/Qwen/Qwen3-Reranker-0.6B/discussions/3
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/convert_model_to_seq_cls.py
For the converted model, initialization would simply be:
llm = LLM(model="tomaarsen/Qwen3-Reranker-0.6B-seq-cls", runner="pooling")
This example demonstrates loading the ORIGINAL model with special overrides
to make it compatible with vLLM's score API.
"""
from pathlib import Path
from vllm import LLM
model_name = "Qwen/Qwen3-Reranker-0.6B"
def get_llm() -> LLM:
"""
Initializes and returns the LLM model for Qwen3-Reranker.
Returns:
LLM: Configured vLLM instance for reranking tasks.
Note:
This function loads the ORIGINAL Qwen3-Reranker model with specific
overrides to make it compatible with vLLM's score API.
"""
return LLM(
# Specify the original model from HuggingFace
model=model_name,
# Use pooling runner for score task
runner="pooling",
# HuggingFace model configuration overrides required for compatibility
hf_overrides={
# Manually route to sequence classification architecture
# This tells vLLM to use Qwen3ForSequenceClassification instead of
# the default Qwen3ForCausalLM
"architectures": ["Qwen3ForSequenceClassification"],
# Specify which token logits to extract from the language model head
# The original reranker uses "no" and "yes" token logits for scoring
"classifier_from_token": ["no", "yes"],
# Enable special handling for original Qwen3-Reranker models
# This flag triggers conversion logic that transforms the two token
# vectors into a single classification vector
"is_original_qwen3_reranker": True,
},
)
def main() -> None:
# Load the Jinja template for formatting query-document pairs
# The template ensures proper formatting for the reranker model
template_home = Path(__file__).parent / "template"
template_path = "qwen3_reranker.jinja"
chat_template = (template_home / template_path).read_text()
# Sample queries for testing the reranker
queries = [
"What is the capital of China?",
"Explain gravity",
]
# Corresponding documents to be scored against each query
documents = [
"The capital of China is Beijing.",
"Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun.",
]
# Initialize the LLM model with the original Qwen3-Reranker configuration
llm = get_llm()
# Compute relevance scores for each query-document pair
# The score() method returns a relevance score for each pair
# Higher scores indicate better relevance
outputs = llm.score(queries, documents, chat_template=chat_template)
# Extract and print the relevance scores from the outputs
# Each output contains a score representing query-document relevance
print("-" * 30)
print("Relevance scores:", [output.outputs.score for output in outputs])
print("-" * 30)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/qwen3_reranker_offline.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/qwen3_reranker_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
What is the difference between the official original version and one
that has been converted into a sequence classification model?
Qwen3-Reranker is a language model that doing reranker by using the
logits of "no" and "yes" tokens.
This requires computing logits for all 151,669 tokens in the vocabulary,
making it inefficient and incompatible with vLLM's score() API.
A conversion method has been proposed to transform the original model into a
sequence classification model. This converted model:
1. Is significantly more efficient
2. Fully supports vLLM's score() API
3. Simplifies initialization parameters
Reference: https://huggingface.co/Qwen/Qwen3-Reranker-0.6B/discussions/3
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/convert_model_to_seq_cls.py
For the converted model, initialization would simply be:
vllm serve tomaarsen/Qwen3-Reranker-0.6B-seq-cls --runner pooling --chat-template examples/pooling/score/template/qwen3_reranker.jinja
This example demonstrates loading the ORIGINAL model with special overrides
to make it compatible with vLLM's score API.
vllm serve Qwen/Qwen3-Reranker-0.6B --runner pooling --hf_overrides '{"architectures": ["Qwen3ForSequenceClassification"],"classifier_from_token": ["no", "yes"],"is_original_qwen3_reranker": true}' --chat-template examples/pooling/score/template/qwen3_reranker.jinja
"""
import json
import requests
# URL of the vLLM server's score endpoint
# Default vLLM server runs on localhost port 8000
url = "http://127.0.0.1:8000/score"
# HTTP headers for the request
headers = {"accept": "application/json", "Content-Type": "application/json"}
# Example queries & documents
queries = [
"What is the capital of China?",
"Explain gravity",
]
documents = [
"The capital of China is Beijing.",
"Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun.",
]
# Request payload for the score API
data = {
"model": "Qwen/Qwen3-Reranker-0.6B",
"queries": queries,
"documents": documents,
}
def main():
"""Main function to send a score request to the vLLM server.
This function sends a POST request to the /score endpoint with
the query and documents, then prints the relevance scores.
"""
# Send POST request to the vLLM server's score endpoint
response = requests.post(url, headers=headers, json=data)
# Check if the request was successful
if response.status_code == 200:
print("Request successful!")
# Pretty print the JSON response containing relevance scores
# The response includes scores for each document's relevance to the query
print(json.dumps(response.json(), indent=2))
else:
# Handle request failure
print(f"Request failed with status code: {response.status_code}")
print(response.text)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/qwen3_reranker_online.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/using_template_offline.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
from argparse import Namespace
from pathlib import Path
from typing import Any
from vllm import LLM, EngineArgs
from vllm.utils.argparse_utils import FlexibleArgumentParser
def parse_args():
"""Parse command line arguments for the reranking example.
This function sets up the argument parser with default values
specific to reranking models, including the model name and
runner type.
"""
parser = FlexibleArgumentParser()
# Add all EngineArgs command line arguments to the parser
parser = EngineArgs.add_cli_args(parser)
# Set default values specific to this reranking example
# These defaults ensure the script works out-of-the-box for reranking tasks
parser.set_defaults(
model="nvidia/llama-nemotron-rerank-1b-v2", # Default reranking model
runner="pooling", # Required for cross-encoder/reranking models
trust_remote_code=True, # Allow loading models with custom code
)
return parser.parse_args()
def get_chat_template(model: str) -> str:
"""Load the appropriate chat template for the specified model.
Reranking models require specific prompt templates to format
query-document pairs correctly. This function maps model names
to their corresponding template files.
"""
# Directory containing all chat template files
template_home = Path(__file__).parent / "template"
# Mapping from model names to their corresponding template files
# Each reranking model has its own specific prompt format
model_name_to_template_path_map = {
"BAAI/bge-reranker-v2-gemma": "bge-reranker-v2-gemma.jinja",
"Qwen/Qwen3-Reranker-0.6B": "qwen3_reranker.jinja",
"Qwen/Qwen3-Reranker-4B": "qwen3_reranker.jinja",
"Qwen/Qwen3-Reranker-8B": "qwen3_reranker.jinja",
"tomaarsen/Qwen3-Reranker-0.6B-seq-cls": "qwen3_reranker.jinja",
"tomaarsen/Qwen3-Reranker-4B-seq-cls": "qwen3_reranker.jinja",
"tomaarsen/Qwen3-Reranker-8B-seq-cls": "qwen3_reranker.jinja",
"mixedbread-ai/mxbai-rerank-base-v2": "mxbai_rerank_v2.jinja",
"mixedbread-ai/mxbai-rerank-large-v2": "mxbai_rerank_v2.jinja",
"nvidia/llama-nemotron-rerank-1b-v2": "nemotron-rerank.jinja",
}
# Get the template filename for the specified model
template_path = model_name_to_template_path_map.get(model)
if template_path is None:
raise ValueError(f"This demo does not support model name: {model}.")
# Read and return the template content
return (template_home / template_path).read_text()
def get_hf_overrides(model: str) -> dict[str, Any]:
"""Convert Large Language Models (LLMs) to Sequence Classification models.
note:
Some reranking models require special configuration overrides to work
correctly with vLLM's score API.
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/qwen3_reranker_offline.py
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/convert_model_to_seq_cls.py
"""
model_name_to_hf_overrides_map = {
"BAAI/bge-reranker-v2-gemma": {
"architectures": ["GemmaForSequenceClassification"],
"classifier_from_token": ["Yes"],
"method": "no_post_processing",
},
"Qwen/Qwen3-Reranker-0.6B": {
"architectures": ["Qwen3ForSequenceClassification"],
"classifier_from_token": ["no", "yes"],
"is_original_qwen3_reranker": True,
},
"Qwen/Qwen3-Reranker-4B": {
"architectures": ["Qwen3ForSequenceClassification"],
"classifier_from_token": ["no", "yes"],
"is_original_qwen3_reranker": True,
},
"Qwen/Qwen3-Reranker-8B": {
"architectures": ["Qwen3ForSequenceClassification"],
"classifier_from_token": ["no", "yes"],
"is_original_qwen3_reranker": True,
},
"tomaarsen/Qwen3-Reranker-0.6B-seq-cls": {},
"tomaarsen/Qwen3-Reranker-4B-seq-cls": {},
"tomaarsen/Qwen3-Reranker-8B-seq-cls": {},
"mixedbread-ai/mxbai-rerank-base-v2": {
"architectures": ["Qwen2ForSequenceClassification"],
"classifier_from_token": ["0", "1"],
"method": "from_2_way_softmax",
},
"mixedbread-ai/mxbai-rerank-large-v2": {
"architectures": ["Qwen2ForSequenceClassification"],
"classifier_from_token": ["0", "1"],
"method": "from_2_way_softmax",
},
"nvidia/llama-nemotron-rerank-1b-v2": {},
}
hf_overrides = model_name_to_hf_overrides_map.get(model)
if hf_overrides is None:
raise ValueError(f"This demo does not support model name: {model}.")
return hf_overrides
def main(args: Namespace):
"""Main execution function for the reranking example."""
# Get the overrides for the specified model
args.hf_overrides = get_hf_overrides(args.model)
# Initialize the LLM with all provided arguments
llm = LLM(**vars(args))
# Example query for demonstration
query = "how much protein should a female eat?"
# Example documents to be reranked based on relevance to the query
documents = [
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.",
"Calorie intake should not fall below 1,200 a day in women or 1,500 a day in men, except under the supervision of a health professional.",
]
# Load the appropriate chat template for the selected model
# The template formats query-document pairs for the reranking model
chat_template = get_chat_template(args.model)
# Score documents based on relevance to the query
# The score method returns relevance scores for each document
outputs = llm.score(query, documents, chat_template=chat_template)
# Display the relevance scores
# Higher scores indicate more relevant documents
print("-" * 30)
print([output.outputs.score for output in outputs])
print("-" * 30)
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/using_template_offline.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/score/using_template_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
Example of using the rerank API with template.
This script demonstrates how to interact with a vLLM server running
a reranking model via the REST API.
Before running this script, start the vLLM server with one of the
supported reranking models using the commands below.
note:
Some reranking models require special configuration overrides to work correctly
with vLLM's score API.
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/qwen3_reranker_online.py
Reference: https://github.com/vllm-project/vllm/blob/main/examples/pooling/score/convert_model_to_seq_cls.py
run:
vllm serve BAAI/bge-reranker-v2-gemma --hf_overrides '{"architectures": ["GemmaForSequenceClassification"],"classifier_from_token": ["Yes"],"method": "no_post_processing"}' --chat-template examples/pooling/score/template/bge-reranker-v2-gemma.jinja
vllm serve tomaarsen/Qwen3-Reranker-0.6B-seq-cls --chat-template examples/pooling/score/template/qwen3_reranker.jinja
vllm serve mixedbread-ai/mxbai-rerank-base-v2 --hf_overrides '{"architectures": ["Qwen2ForSequenceClassification"],"classifier_from_token": ["0", "1"], "method": "from_2_way_softmax"}' --chat-template examples/pooling/score/template/mxbai_rerank_v2.jinja
vllm serve nvidia/llama-nemotron-rerank-1b-v2 --runner pooling --trust-remote-code --chat-template examples/pooling/score/template/nemotron-rerank.jinja
vllm serve Qwen/Qwen3-Reranker-0.6B --runner pooling --hf_overrides '{"architectures": ["Qwen3ForSequenceClassification"],"classifier_from_token": ["no", "yes"],"is_original_qwen3_reranker": true}' --chat-template examples/pooling/score/template/qwen3_reranker.jinja
"""
import json
import requests
# URL of the vLLM server's rerank endpoint
# Default vLLM server runs on localhost port 8000
url = "http://127.0.0.1:8000/rerank"
# HTTP headers for the request
headers = {"accept": "application/json", "Content-Type": "application/json"}
# Example query & documents
query = "how much protein should a female eat?"
documents = [
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.",
"Calorie intake should not fall below 1,200 a day in women or 1,500 a day in men, except under the supervision of a health professional.",
]
# Request payload for the rerank API
data = {
"model": "nvidia/llama-nemotron-rerank-1b-v2", # Model to use for reranking
"query": query, # The query to score documents against
"documents": documents, # List of documents to be scored
}
def main():
"""Main function to send a rerank request to the vLLM server.
This function sends a POST request to the /rerank endpoint with
the query and documents, then prints the relevance scores.
"""
# Send POST request to the vLLM server's rerank endpoint
response = requests.post(url, headers=headers, json=data)
# Check if the request was successful
if response.status_code == 200:
print("Request successful!")
# Pretty print the JSON response containing relevance scores
# The response includes scores for each document's relevance to the query
print(json.dumps(response.json(), indent=2))
else:
# Handle request failure
print(f"Request failed with status code: {response.status_code}")
print(response.text)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/using_template_online.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/multimodal/processing/test_qwen3_omni.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for Qwen3 Omni audio processing and sample rate handling."""
from typing import Any
import numpy as np
import pytest
from vllm.multimodal import MULTIMODAL_REGISTRY
from ...utils import build_model_context
@pytest.mark.parametrize("model_id", ["Qwen/Qwen3-Omni-30B-A3B-Instruct"])
@pytest.mark.parametrize(
("audio_sample_rate", "audio_duration_sec"),
[
(16000, 1.0), # Native Whisper sample rate, 1 second
(16000, 2.0), # Native Whisper sample rate, 2 seconds
],
)
def test_processor_with_audio_sample_rate(
model_id: str,
audio_sample_rate: int,
audio_duration_sec: float,
) -> None:
"""
Test that vLLM's processor generates expected outputs with audio_sample_rate.
This validates that the processor correctly handles audio_sample_rate
passed via hf_processor_mm_kwargs and generates audio tokens.
"""
ctx = build_model_context(
model_id,
limit_mm_per_prompt={"audio": 1, "image": 0, "video": 0},
)
processor = MULTIMODAL_REGISTRY.create_processor(ctx.model_config)
tokenizer = processor.info.get_tokenizer()
# Create audio data at the specified sample rate
audio_length = int(audio_sample_rate * audio_duration_sec)
rng = np.random.RandomState(42)
audio_data = rng.rand(audio_length).astype(np.float32)
# Build prompt with audio placeholder
prompt = "<|audio_start|><|audio_pad|><|audio_end|>"
mm_data = {"audio": [(audio_data, audio_sample_rate)]}
# Apply processor with audio_sample_rate in mm_kwargs
hf_processor_mm_kwargs: dict[str, Any] = {
"audio_sample_rate": audio_sample_rate,
}
processed_inputs = processor(
prompt,
mm_items=processor.info.parse_mm_data(mm_data),
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
)
# Verify audio tokens are generated
hf_processor = processor.info.get_hf_processor(**hf_processor_mm_kwargs)
audio_token_id = tokenizer.convert_tokens_to_ids(hf_processor.audio_token)
aud_tok_count = processed_inputs["prompt_token_ids"].count(audio_token_id)
assert aud_tok_count >= 1, (
f"Expected at least 1 audio token but got {aud_tok_count}. "
f"sample_rate: {audio_sample_rate}Hz, duration: {audio_duration_sec}s"
)
@pytest.mark.parametrize("model_id", ["Qwen/Qwen3-Omni-30B-A3B-Instruct"])
def test_longer_audio_generates_more_tokens(model_id: str) -> None:
"""
Test that longer audio generates more tokens than shorter audio.
This validates that audio_sample_rate is being used correctly by checking
that audio duration affects token count as expected.
"""
ctx = build_model_context(
model_id,
limit_mm_per_prompt={"audio": 1, "image": 0, "video": 0},
)
processor = MULTIMODAL_REGISTRY.create_processor(ctx.model_config)
tokenizer = processor.info.get_tokenizer()
audio_sample_rate = 16000
rng = np.random.RandomState(42)
def get_token_count(duration: float) -> int:
audio_length = int(audio_sample_rate * duration)
audio_data = rng.rand(audio_length).astype(np.float32)
prompt = "<|audio_start|><|audio_pad|><|audio_end|>"
mm_data = {"audio": [(audio_data, audio_sample_rate)]}
hf_processor_mm_kwargs: dict[str, Any] = {
"audio_sample_rate": audio_sample_rate,
}
processed = processor(
prompt,
mm_items=processor.info.parse_mm_data(mm_data),
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
)
hf_proc = processor.info.get_hf_processor(**hf_processor_mm_kwargs)
audio_token_id = tokenizer.convert_tokens_to_ids(hf_proc.audio_token)
return processed["prompt_token_ids"].count(audio_token_id)
short_tokens = get_token_count(1.0)
long_tokens = get_token_count(2.0)
assert long_tokens > short_tokens, (
f"Expected longer audio (2s) to have more tokens than shorter (1s). "
f"Got short={short_tokens}, long={long_tokens}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/processing/test_qwen3_omni.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/config/test_model_arch_config.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for ModelArchitectureConfig and its integration with ModelConfig."""
import json
from pathlib import Path
import pytest
from vllm.config import ModelConfig, ParallelConfig, SpeculativeConfig
from vllm.transformers_utils.model_arch_config_convertor import (
ModelArchConfigConvertorBase,
)
BASE_TRUST_REMOTE_CODE_MODELS = {
"nvidia/Llama-3_3-Nemotron-Super-49B-v1",
"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16",
"XiaomiMiMo/MiMo-7B-RL",
# Excluded: Not available online right now
# "FreedomIntelligence/openPangu-Ultra-MoE-718B-V1.1",
"meituan-longcat/LongCat-Flash-Chat",
}
BASE_MODELS_TO_TEST = [
"state-spaces/mamba-130m-hf",
"mistralai/Mamba-Codestral-7B-v0.1",
# Excluded: terratorch/torchgeo version mismatch in CPU CI environment
# (NonGeoDataset import error). Tested in model initialization tests.
# "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11",
"Zyphra/Zamba2-7B-instruct",
# FIXME: mosaicml/mpt-7b has been deleted
# "mosaicml/mpt-7b",
# FIXME: databricks/dbrx-instruct has been deleted
# "databricks/dbrx-instruct",
"tiiuae/falcon-7b",
"tiiuae/falcon-40b",
"luccafong/deepseek_mtp_main_random",
"Qwen/Qwen3-Next-80B-A3B-Instruct",
"tiny-random/qwen3-next-moe",
"zai-org/GLM-4.5",
"baidu/ERNIE-4.5-21B-A3B-PT",
# Models using base convertor
"lmsys/gpt-oss-20b-bf16",
"deepseek-ai/DeepSeek-V3.2-Exp",
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
] + list(BASE_TRUST_REMOTE_CODE_MODELS)
# (target_model, draft_model, trust_remote_code)
SPECULATIVE_MODELS = [
("JackFram/llama-68m", "abhigoyal/vllm-medusa-llama-68m-random", False),
("luccafong/deepseek_mtp_main_random", "luccafong/deepseek_mtp_draft_random", True),
("eagle618/deepseek-v3-random", "eagle618/eagle-deepseek-v3-random", True),
("meta-llama/Meta-Llama-3-8B-Instruct", "yuhuili/EAGLE-LLaMA3-Instruct-8B", True),
("meta-llama/Llama-3.1-8B-Instruct", "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B", True),
]
def _load_groundtruth(filename: str) -> dict:
"""Load groundtruth JSON from the test directory."""
groundtruth_path = Path(__file__).parent / filename
with open(groundtruth_path) as f:
return json.load(f)
def _assert_model_arch_config(
model_config, expected: dict, check_head_size: bool = True
):
"""Assert model_arch_config matches expected values."""
model_arch_config = model_config.model_arch_config
assert model_arch_config.architectures == expected["architectures"]
assert model_arch_config.model_type == expected["model_type"]
assert model_arch_config.text_model_type == expected["text_model_type"]
assert model_arch_config.hidden_size == expected["hidden_size"]
assert (
model_arch_config.total_num_hidden_layers == expected["total_num_hidden_layers"]
)
assert (
model_arch_config.total_num_attention_heads
== expected["total_num_attention_heads"]
)
assert model_arch_config.vocab_size == expected["vocab_size"]
assert model_arch_config.total_num_kv_heads == expected["total_num_kv_heads"]
assert model_arch_config.num_experts == expected["num_experts"]
assert model_arch_config.is_deepseek_mla == expected["is_deepseek_mla"]
torch_dtype = ModelArchConfigConvertorBase.get_torch_dtype(
model_config.hf_config,
model_config.model,
revision=model_config.revision,
config_format="hf",
)
assert str(torch_dtype) == expected["dtype"]
if check_head_size:
assert model_arch_config.head_size == expected["head_size"]
def _assert_model_config_methods(
model_config, expected: dict, check_head_size: bool = True
):
"""Assert model_config methods return expected values."""
assert model_config.architectures == expected["architectures"]
assert model_config.get_vocab_size() == expected["vocab_size"]
assert model_config.get_hidden_size() == expected["hidden_size"]
assert model_config.get_total_num_kv_heads() == expected["total_num_kv_heads"]
assert model_config.get_num_experts() == expected["num_experts"]
assert (
model_config.get_total_num_hidden_layers()
== expected["total_num_hidden_layers"]
)
if check_head_size:
assert model_config.get_head_size() == expected["head_size"]
@pytest.mark.parametrize("model", BASE_MODELS_TO_TEST)
def test_base_model_arch_config(model: str):
"""Test model architecture config for base models."""
groundtruth = _load_groundtruth("base_model_arch_groundtruth.json")
expected = groundtruth[model]
model_config = ModelConfig(
model, trust_remote_code=model in BASE_TRUST_REMOTE_CODE_MODELS
)
_assert_model_arch_config(model_config, expected)
_assert_model_config_methods(model_config, expected)
@pytest.mark.parametrize(
"target_model,draft_model,trust_remote_code", SPECULATIVE_MODELS
)
def test_draft_model_arch_config(
target_model: str, draft_model: str, trust_remote_code: bool
):
"""Test model architecture config for draft/speculative models."""
groundtruth = _load_groundtruth("draft_model_arch_groundtruth.json")
expected = groundtruth[draft_model]
target_model_config = ModelConfig(target_model, trust_remote_code=trust_remote_code)
speculative_config = SpeculativeConfig(
model=draft_model,
num_speculative_tokens=1,
target_model_config=target_model_config,
target_parallel_config=ParallelConfig(),
)
model_config = speculative_config.draft_model_config
# For medusa models, head_size may cause division by zero before
# model_arch_config was introduced, so we conditionally check it
check_head_size = isinstance(expected["head_size"], int)
_assert_model_arch_config(model_config, expected, check_head_size=check_head_size)
_assert_model_config_methods(
model_config, expected, check_head_size=check_head_size
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/config/test_model_arch_config.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/config/model_arch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
from pydantic import ConfigDict
from pydantic.dataclasses import dataclass
from vllm.logger import init_logger
logger = init_logger(__name__)
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
class ModelArchitectureConfig:
"""
Configuration for model architecture that required by vLLM runtime
"""
architectures: list[str] | None
"""List of model architecture class names (e.g., ['LlamaForCausalLM']).
It can be None upon calling `vllm_config.with_hf_config(config.text_config)`"""
model_type: str
"""Model type identifier (e.g., 'llama', 'gpt_oss')."""
text_model_type: str | None
"""Text model type identifier (e.g., 'llama4_text')."""
hidden_size: int
"""Hidden size of the model."""
total_num_hidden_layers: int
"""Number of hidden layers in the model."""
total_num_attention_heads: int
"""Number of attention heads in the model."""
head_size: int
"""Head dimension of the model."""
vocab_size: int
"""Vocabulary size of the model."""
total_num_kv_heads: int
"""Number of key value heads in the model."""
num_experts: int
"""Number of experts in the model."""
quantization_config: dict[str, Any] | None
"""Quantization configuration dictionary containing quantization parameters."""
is_deepseek_mla: bool
"""Whether the model is a DeepSeek MLA model."""
derived_max_model_len_and_key: tuple[float, str | None]
"""Derived maximum model length and key from the hf config."""
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/config/model_arch.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/model_arch_config_convertor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterator
from contextlib import contextmanager
from typing import final
import torch
from huggingface_hub import constants
from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
from transformers import PretrainedConfig
from vllm import envs
from vllm.config.model_arch import (
ModelArchitectureConfig,
)
from vllm.config.utils import getattr_iter
from vllm.logger import init_logger
from vllm.transformers_utils.config import (
ConfigFormat,
try_get_safetensors_metadata,
)
from vllm.utils.torch_utils import common_broadcastable_dtype
logger = init_logger(__name__)
@contextmanager
def _maybe_patch_hf_hub_constants(config_format: ConfigFormat) -> Iterator[None]:
if config_format == "mistral":
hf_safetensors_single_file = constants.SAFETENSORS_SINGLE_FILE
hf_safetensors_index_file = constants.SAFETENSORS_INDEX_FILE
constants.SAFETENSORS_SINGLE_FILE = "consolidated.safetensors"
constants.SAFETENSORS_INDEX_FILE = "consolidated.safetensors.index.json"
try:
yield
finally:
constants.SAFETENSORS_SINGLE_FILE = hf_safetensors_single_file
constants.SAFETENSORS_INDEX_FILE = hf_safetensors_index_file
else:
yield
class ModelArchConfigConvertorBase:
def __init__(self, hf_config: PretrainedConfig, hf_text_config: PretrainedConfig):
self.hf_config = hf_config
self.hf_text_config = hf_text_config
def get_architectures(self) -> list[str]:
return getattr(self.hf_config, "architectures", [])
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_hidden_layers", 0)
def get_total_num_attention_heads(self) -> int:
return getattr(self.hf_text_config, "num_attention_heads", 0)
def get_vocab_size(self) -> int:
return getattr(self.hf_text_config, "vocab_size", 0)
def get_hidden_size(self) -> int:
return getattr(self.hf_text_config, "hidden_size", 0)
def get_head_size(self) -> int:
if self.is_deepseek_mla():
qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim", 0)
if not envs.VLLM_MLA_DISABLE:
return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
else:
qk_nope_head_dim = getattr(self.hf_text_config, "qk_nope_head_dim", 0)
if qk_rope_head_dim and qk_nope_head_dim:
return qk_rope_head_dim + qk_nope_head_dim
# NOTE: Some configs may set head_dim=None in the config
if getattr(self.hf_text_config, "head_dim", None) is not None:
return self.hf_text_config.head_dim
# NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
if getattr(self.hf_text_config, "hidden_size_per_head", None) is not None:
return self.hf_text_config.hidden_size_per_head
# FIXME(woosuk): This may not be true for all models.
return (
self.hf_text_config.hidden_size // self.hf_text_config.num_attention_heads
)
def get_total_num_kv_heads(self) -> int:
attributes = [
# For Falcon:
"n_head_kv",
"num_kv_heads",
# For LLaMA-2:
"num_key_value_heads",
# For ChatGLM:
"multi_query_group_num",
]
# For non-grouped-query attention models, the number of KV heads is
# equal to the number of attention heads.
default_factory = lambda: self.hf_text_config.num_attention_heads
return getattr_iter(
self.hf_text_config, attributes, default_factory=default_factory
)
def get_num_experts_from_block_configs(self) -> int:
"""Check block_configs for heterogeneous models (e.g., NemotronH).
For heterogeneous models with varying expert counts per layer,
returns the MAX to ensure all expert weights can be loaded.
"""
max_experts = 0
block_configs = getattr(self.hf_text_config, "block_configs", None)
if block_configs:
for block in block_configs:
if isinstance(block, dict):
if block.get("block_type", "") == "moe":
max_experts = max(max_experts, block.get("n_routed_experts", 0))
else:
if getattr(block, "block_type", "") == "moe":
max_experts = max(
max_experts, getattr(block, "n_routed_experts", 0)
)
return max_experts
def get_num_experts(self) -> int:
"""Returns the number of experts in the model."""
num_expert_names = [
"num_experts", # Jamba
"moe_num_experts", # Dbrx
"n_routed_experts", # DeepSeek
"num_local_experts", # Mixtral
]
num_experts = getattr_iter(self.hf_text_config, num_expert_names, 0)
if isinstance(num_experts, list):
# Ernie VL's remote code uses list[int]...
# The values are always the same so we just take the first one.
return num_experts[0]
if not num_experts:
num_experts = self.get_num_experts_from_block_configs()
return num_experts
@final
@classmethod
def get_torch_dtype(
cls,
hf_config: PretrainedConfig,
model_id: str,
revision: str | None,
config_format: ConfigFormat,
):
# NOTE: getattr(config, "dtype", torch.float32) is not correct
# because config.dtype can be None.
config_dtype = getattr(hf_config, "dtype", None)
# Fallbacks for multi-modal models if the root config
# does not define dtype
if config_dtype is None:
config_dtype = getattr(hf_config.get_text_config(), "dtype", None)
if config_dtype is None and hasattr(hf_config, "vision_config"):
config_dtype = getattr(hf_config.vision_config, "dtype", None)
if config_dtype is None and hasattr(hf_config, "encoder_config"):
config_dtype = getattr(hf_config.encoder_config, "dtype", None)
# Try to read the dtype of the weights if they are in safetensors format
if config_dtype is None:
with _maybe_patch_hf_hub_constants(config_format):
repo_mt = try_get_safetensors_metadata(model_id, revision=revision)
if repo_mt and (files_mt := repo_mt.files_metadata):
param_dtypes: set[torch.dtype] = {
_SAFETENSORS_TO_TORCH_DTYPE[dtype_str]
for file_mt in files_mt.values()
for dtype_str in file_mt.parameter_count
if dtype_str in _SAFETENSORS_TO_TORCH_DTYPE
}
if param_dtypes:
return common_broadcastable_dtype(param_dtypes)
if config_dtype is None:
config_dtype = torch.float32
return config_dtype
def _normalize_quantization_config(self, config: PretrainedConfig):
quant_cfg = getattr(config, "quantization_config", None)
if quant_cfg is None:
# compressed-tensors uses a "compression_config" key
quant_cfg = getattr(config, "compression_config", None)
else:
# Set quant_method for ModelOpt models.
producer_name = quant_cfg.get("producer", {}).get("name")
if producer_name == "modelopt":
quant_algo = quant_cfg.get("quantization", {}).get("quant_algo")
if quant_algo is not None:
quant_algo_upper = str(quant_algo).upper()
if quant_algo_upper in {
"FP8",
"FP8_PER_CHANNEL_PER_TOKEN",
"FP8_PB_WO",
}:
quant_cfg["quant_method"] = "modelopt"
elif quant_algo_upper == "NVFP4":
quant_cfg["quant_method"] = "modelopt_fp4"
else:
raise ValueError(f"Unknown ModelOpt quant algo: {quant_algo}")
if quant_cfg is not None:
# Use the community standard 'quant_method'
quant_method = quant_cfg.get("quant_method", "").lower()
# Normalize library names
quant_method = quant_method.replace(
"compressed_tensors", "compressed-tensors"
)
quant_cfg["quant_method"] = quant_method
return quant_cfg
def get_quantization_config(self):
quant_cfg = self._normalize_quantization_config(self.hf_config)
if quant_cfg is None and (
text_config := getattr(self.hf_config, "text_config", None)
):
# Check the text config as well for multi-modal models.
quant_cfg = self._normalize_quantization_config(text_config)
return quant_cfg
def is_deepseek_mla(self) -> bool:
if not hasattr(self.hf_text_config, "model_type"):
return False
elif self.hf_text_config.model_type in (
"AXK1",
"deepseek_v2",
"deepseek_v3",
"deepseek_v32",
"deepseek_mtp",
"glm_moe_dsa",
"glm4_moe_lite",
"glm4_moe_lite_mtp",
"kimi_k2",
"kimi_linear",
"longcat_flash",
"pangu_ultra_moe",
"pangu_ultra_moe_mtp",
"bailing_hybrid",
):
return self.hf_text_config.kv_lora_rank is not None
elif self.hf_text_config.model_type == "eagle":
# if the model is an EAGLE module, check for the
# underlying architecture
return (
self.hf_text_config.model.model_type
in (
"AXK1",
"deepseek_v2",
"deepseek_v3",
"deepseek_v32",
"deepseek_mtp",
)
and self.hf_text_config.kv_lora_rank is not None
)
return False
def derive_max_model_len_and_key(self) -> tuple[float, str | None]:
derived_max_model_len = float("inf")
possible_keys = [
# OPT
"max_position_embeddings",
# GPT-2
"n_positions",
# MPT
"max_seq_len",
# ChatGLM2
"seq_length",
# Command-R
"model_max_length",
# Whisper
"max_target_positions",
# Others
"max_sequence_length",
"max_seq_length",
"seq_len",
]
# Choose the smallest "max_length" from the possible keys
max_len_key = None
for key in possible_keys:
max_len = getattr(self.hf_text_config, key, None)
if max_len is not None:
if max_len < derived_max_model_len:
max_len_key = key
derived_max_model_len = min(derived_max_model_len, max_len)
# For Command-R / Cohere, Cohere2 / Aya Vision models
if tmp_max_len := getattr(self.hf_text_config, "model_max_length", None):
max_len_key = "model_max_length"
derived_max_model_len = tmp_max_len
return derived_max_model_len, max_len_key
def convert(self) -> ModelArchitectureConfig:
model_arch_config = ModelArchitectureConfig(
architectures=self.get_architectures(),
model_type=self.hf_config.model_type,
text_model_type=getattr(self.hf_text_config, "model_type", None),
hidden_size=self.get_hidden_size(),
total_num_hidden_layers=self.get_num_hidden_layers(),
total_num_attention_heads=self.get_total_num_attention_heads(),
head_size=self.get_head_size(),
vocab_size=self.get_vocab_size(),
total_num_kv_heads=self.get_total_num_kv_heads(),
num_experts=self.get_num_experts(),
quantization_config=self.get_quantization_config(),
is_deepseek_mla=self.is_deepseek_mla(),
derived_max_model_len_and_key=self.derive_max_model_len_and_key(),
)
return model_arch_config
class MambaModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_head_size(self) -> int:
return 0
def get_total_num_kv_heads(self) -> int:
return 0
class TerratorchModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_head_size(self) -> int:
return 0
def get_total_num_kv_heads(self) -> int:
return 0
class MedusaModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_head_size(self) -> int:
return 0
def get_total_num_kv_heads(self) -> int:
return 0
class Zamba2ModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_head_size(self) -> int:
return getattr(self.hf_text_config, "attention_head_dim", 0)
class FalconModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_total_num_kv_heads(self) -> int:
# NOTE: for falcon, when new_decoder_architecture is True, the
# multi_query flag is ignored and we use n_head_kv for the number of
# KV heads.
new_decoder_arch_falcon = getattr(
self.hf_text_config, "new_decoder_architecture", False
)
if not new_decoder_arch_falcon and getattr(
self.hf_text_config, "multi_query", False
):
# Multi-query attention, only one KV head.
return 1
# Use the base implementation which checks n_head_kv, num_kv_heads, etc.
return super().get_total_num_kv_heads()
class MPTModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_total_num_kv_heads(self) -> int:
if "kv_n_heads" in self.hf_text_config.attn_config:
return self.hf_text_config.attn_config["kv_n_heads"]
return self.hf_text_config.num_attention_heads
class DbrxModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_total_num_kv_heads(self) -> int:
return getattr(
self.hf_text_config.attn_config,
"kv_n_heads",
self.hf_text_config.num_attention_heads,
)
class NemotronNasModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_total_num_kv_heads(self) -> int:
for block in self.hf_text_config.block_configs:
if not block.attention.no_op:
return (
self.hf_text_config.num_attention_heads
// block.attention.n_heads_in_group
)
raise RuntimeError(
"Could not determine the number of key-value attention heads "
"from model configuration. "
f"Architecture: {self.get_architectures()}. "
"This usually indicates an unsupported model architecture or "
"missing configuration. "
"Please check if your model is supported at: "
"https://docs.vllm.ai/en/latest/models/supported_models.html"
)
class DeepSeekMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class MimoMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class GLM4MoeMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class ErnieMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class Qwen3NextMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class Qwen3_5MTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "mtp_num_hidden_layers", 0)
class PanguUltraMoeMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 0)
class LongCatFlashMTPModelArchConfigConvertor(ModelArchConfigConvertorBase):
def get_num_hidden_layers(self) -> int:
return getattr(self.hf_text_config, "num_nextn_predict_layers", 1)
# hf_config.model_type -> convertor class
MODEL_ARCH_CONFIG_CONVERTORS = {
"mamba": MambaModelArchConfigConvertor,
"falcon_mamba": MambaModelArchConfigConvertor,
"timm_wrapper": TerratorchModelArchConfigConvertor,
"medusa": MedusaModelArchConfigConvertor,
"zamba2": Zamba2ModelArchConfigConvertor,
"mpt": MPTModelArchConfigConvertor,
"dbrx": DbrxModelArchConfigConvertor,
"falcon": FalconModelArchConfigConvertor,
"RefinedWeb": FalconModelArchConfigConvertor,
"RefinedWebModel": FalconModelArchConfigConvertor,
"nemotron-nas": NemotronNasModelArchConfigConvertor,
"deepseek_mtp": DeepSeekMTPModelArchConfigConvertor,
"qwen3_next_mtp": Qwen3NextMTPModelArchConfigConvertor,
"qwen3_5_mtp": Qwen3_5MTPModelArchConfigConvertor,
"mimo_mtp": MimoMTPModelArchConfigConvertor,
"glm4_moe_mtp": GLM4MoeMTPModelArchConfigConvertor,
"glm_ocr_mtp": GLM4MoeMTPModelArchConfigConvertor,
"ernie_mtp": ErnieMTPModelArchConfigConvertor,
"pangu_ultra_moe_mtp": PanguUltraMoeMTPModelArchConfigConvertor,
"longcat_flash_mtp": LongCatFlashMTPModelArchConfigConvertor,
}
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/model_arch_config_convertor.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/benchmarks/mm_processor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
r"""Benchmark multimodal processor latency.
This benchmark measures the latency of the mm processor module
using multimodal prompts from datasets.
MM processor stats are automatically enabled.
Run:
vllm bench mm-processor \
--model <your_model> \
--dataset-name random-mm \
--num-prompts 10 \
"""
import argparse
import dataclasses
import json
import time
from collections import defaultdict
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal
import numpy as np
from vllm.benchmarks.datasets import (
MultiModalConversationDataset,
VisionArenaDataset,
)
from vllm.benchmarks.throughput import get_requests
from vllm.engine.arg_utils import EngineArgs
from vllm.utils.gc_utils import freeze_gc_heap
from vllm.utils.import_utils import PlaceholderModule
try:
import pandas as pd
except ImportError:
pd = PlaceholderModule("pandas")
if TYPE_CHECKING: # Avoid having to mock during docs build
from vllm.v1.engine.llm_engine import LLMEngine
else:
LLMEngine = object
def get_timing_stats_from_engine(llm_engine: LLMEngine) -> dict[str, dict[str, float]]:
"""
Get all multimodal timing stats from the LLM engine.
Collects both preprocessing stats (HF processor, hashing, cache lookup,
prompt update) and encoder forward pass timing, merged by request_id.
Args:
llm_engine: The LLM engine (has input_processor and workers).
Returns:
Dictionary mapping request_id to merged stats dict containing
both preprocessing and encoder timing metrics.
Example:
{
'request-123': {
'get_mm_hashes_secs': 0.02,
'get_cache_missing_items_secs': 0.01,
'apply_hf_processor_secs': 0.45,
'merge_mm_kwargs_secs': 0.01,
'apply_prompt_updates_secs': 0.03,
'preprocessor_total_secs': 0.51,
'encoder_forward_secs': 0.23,
'num_encoder_calls': 1
}
}
"""
observability_config = llm_engine.vllm_config.observability_config
if not observability_config or not observability_config.enable_mm_processor_stats:
return {}
renderer = llm_engine.renderer
mm_processor_stats = renderer._mm_timing_registry.stat()
encoder_stats = dict[str, dict[str, float]]()
for worker_stats in llm_engine.collective_rpc("get_encoder_timing_stats"):
if not worker_stats:
continue
for request_id, stats_dict in worker_stats.items():
if request_id not in encoder_stats:
encoder_stats[request_id] = dict(stats_dict)
else:
# Aggregate timing metrics across workers
current_time = encoder_stats[request_id].get(
"encoder_forward_secs", 0.0
)
new_time = stats_dict.get("encoder_forward_secs", 0.0)
encoder_stats[request_id]["encoder_forward_secs"] = max(
current_time, new_time
)
current_calls = encoder_stats[request_id].get("num_encoder_calls", 0)
new_calls = stats_dict.get("num_encoder_calls", 0)
encoder_stats[request_id]["num_encoder_calls"] = max(
current_calls, new_calls
)
merged_stats = dict[str, dict[str, float]]()
for request_id, prep_dict in mm_processor_stats.items():
merged_stats[request_id] = dict(prep_dict)
for request_id, enc_dict in encoder_stats.items():
if request_id in merged_stats:
merged_stats[request_id].update(enc_dict)
continue
# In V1 engine, the request_id in encoder_stats has a suffix
# appended to the original request_id (which is used in
# preprocessing_stats).
# We try to strip the suffix to find the matching request.
possible_original_id = request_id.rpartition("-")[0]
if possible_original_id and possible_original_id in merged_stats:
merged_stats[possible_original_id].update(enc_dict)
else:
merged_stats[request_id] = dict(enc_dict)
return merged_stats
def collect_mm_processor_stats(llm_engine: LLMEngine) -> dict[str, list[float]]:
"""
Collect multimodal processor timing stats.
Returns a dictionary mapping stage names to lists of timing values (in seconds).
"""
all_stats = get_timing_stats_from_engine(llm_engine)
stats_by_stage = defaultdict[str, list[float]](list)
for stats_dict in all_stats.values():
for stat_key, stat_val in stats_dict.items():
stats_by_stage[stat_key].append(stat_val)
return stats_by_stage
def calculate_mm_processor_metrics(
stats_by_stage: dict[str, list[float]],
selected_percentiles: list[float],
*,
unit: Literal["us", "ms", "s"] = "ms",
) -> dict[str, dict[str, float]]:
"""
Calculate aggregate metrics from stats by stage.
"""
unit2mult = {"us": 1000000, "ms": 1000, "s": 1}
unit_mult = unit2mult[unit]
metrics = {}
for stage, times in stats_by_stage.items():
stage_name = stage.replace("_secs", "_" + unit)
if not times:
metrics[stage_name] = {
"mean": 0.0,
"median": 0.0,
"std": 0.0,
**{f"p{p}": 0.0 for p in selected_percentiles},
}
continue
is_count_metric = stage == "num_encoder_calls"
values = times if is_count_metric else [t * unit_mult for t in times]
metrics[stage_name] = {
"mean": float(np.mean(values)),
"median": float(np.median(values)),
"std": float(np.std(values)),
**{f"p{p}": float(np.percentile(values, p)) for p in selected_percentiles},
}
return metrics
def validate_args(args):
"""
Validate command-line arguments for mm_processor benchmark.
"""
if not getattr(args, "tokenizer", None):
args.tokenizer = args.model
if not hasattr(args, "dataset_path"):
args.dataset_path = None
if not hasattr(args, "lora_path"):
args.lora_path = None
if not hasattr(args, "max_loras"):
args.max_loras = None
if args.dataset_name == "hf" and not args.dataset_path:
raise ValueError(
"--dataset-path is required when using --dataset-name hf. "
"For multimodal benchmarking, specify a dataset like "
"'lmarena-ai/VisionArena-Chat'."
)
if args.dataset_name == "hf":
supported_mm_datasets = (
VisionArenaDataset.SUPPORTED_DATASET_PATHS.keys()
| MultiModalConversationDataset.SUPPORTED_DATASET_PATHS
)
if args.dataset_path not in supported_mm_datasets:
raise ValueError(
f"{args.dataset_path} is not a supported multimodal dataset. "
f"Supported multimodal datasets are: {sorted(supported_mm_datasets)}"
)
def benchmark_multimodal_processor(
args: argparse.Namespace,
) -> dict[str, Any]:
"""
Run the multimodal processor benchmark.
"""
from vllm import LLM, SamplingParams
validate_args(args)
if args.seed is None:
args.seed = 0
engine_args = EngineArgs.from_cli_args(args)
llm = LLM(**dataclasses.asdict(engine_args))
tokenizer = llm.get_tokenizer()
requests = get_requests(args, tokenizer)
assert all(
llm.llm_engine.model_config.max_model_len
>= (request.prompt_len + request.expected_output_len)
for request in requests
), (
"Please ensure that max_model_len is greater than the sum of "
"prompt_len and expected_output_len for all requests."
)
prompts = [request.prompt for request in requests]
expected_output_lens = [request.expected_output_len for request in requests]
sampling_params = [
SamplingParams(
n=1,
temperature=0.0,
max_tokens=output_len,
detokenize=True,
)
for output_len in expected_output_lens
]
selected_percentiles = [
float(p) for p in getattr(args, "metric_percentiles", "99").split(",")
]
freeze_gc_heap()
num_warmups = getattr(args, "num_warmups", 0)
if num_warmups > 0:
print(f"Processing {num_warmups} warmup requests...")
# Create a temporary args object for warmup requests
warmup_args = argparse.Namespace(**vars(args))
warmup_args.num_prompts = num_warmups
warmup_args.seed += 1
warmup_requests = get_requests(warmup_args, tokenizer)
warmup_prompts = [req.prompt for req in warmup_requests]
warmup_output_lens = [req.expected_output_len for req in warmup_requests]
warmup_sampling_params = [
SamplingParams(max_tokens=output_len) for output_len in warmup_output_lens
]
llm.chat(
warmup_prompts,
warmup_sampling_params,
use_tqdm=not getattr(args, "disable_tqdm", False),
)
# Clear stats from warmup requests
collect_mm_processor_stats(llm.llm_engine)
print(f"Processing {len(prompts)} requests...")
start_time = time.perf_counter()
outputs = llm.chat(
prompts, sampling_params, use_tqdm=not getattr(args, "disable_tqdm", False)
)
end_time = time.perf_counter()
total_time = end_time - start_time
mm_stats_by_stage = collect_mm_processor_stats(llm.llm_engine)
if not any(mm_stats_by_stage.values()):
print(
"\n⚠️ Warning: No MM processor stats found in registry.\n"
" This may indicate that:\n"
" - No multimodal requests were processed\n"
" - Stats were already retrieved (registry is cleared after retrieval)\n"
)
mm_processor_metrics = calculate_mm_processor_metrics(
mm_stats_by_stage, selected_percentiles
)
completed = len([o for o in outputs if o.finished])
failed = len(outputs) - completed
e2el_times = []
for output in outputs:
if not output.finished or output.metrics is None:
continue
metrics = output.metrics
# Calculate E2E latency as: TTFT + (last_token_ts - first_token_ts)
if (
getattr(metrics, "first_token_latency", None) is not None
and getattr(metrics, "last_token_ts", None) is not None
and getattr(metrics, "first_token_ts", None) is not None
):
ttft = metrics.first_token_latency
# Decode time is the duration between the first and last token generation
decode_time = max(0.0, metrics.last_token_ts - metrics.first_token_ts)
e2el_times.append((ttft + decode_time) * 1000)
if not e2el_times and completed > 0:
print(
"\n⚠️ Warning: Detailed end-to-end latency metrics not available.\n"
" Falling back to average request latency "
"(total_time / num_completed_requests).\n"
)
avg_time_per_request = total_time / completed
e2el_times = [avg_time_per_request * 1000] * completed
if e2el_times:
mean_e2el_ms = float(np.mean(e2el_times))
median_e2el_ms = float(np.median(e2el_times))
std_e2el_ms = float(np.std(e2el_times))
percentiles_e2el_ms = [
(p, float(np.percentile(e2el_times, p))) for p in selected_percentiles
]
else:
mean_e2el_ms = 0.0
median_e2el_ms = 0.0
std_e2el_ms = 0.0
percentiles_e2el_ms = [(p, 0.0) for p in selected_percentiles]
encoder_summary = {}
if (
"num_encoder_calls" in mm_stats_by_stage
and mm_stats_by_stage["num_encoder_calls"]
):
encoder_calls = mm_stats_by_stage["num_encoder_calls"]
encoder_summary = {
"total_encoder_calls": int(sum(encoder_calls)),
"num_requests_with_encoder_calls": len(encoder_calls),
}
benchmark_result = {
"completed": completed,
"failed": failed,
"mean_e2el_ms": mean_e2el_ms,
"median_e2el_ms": median_e2el_ms,
"std_e2el_ms": std_e2el_ms,
"percentiles_e2el_ms": percentiles_e2el_ms,
"mm_processor_stats": mm_processor_metrics,
"encoder_summary": encoder_summary,
}
return benchmark_result
def add_cli_args(parser: argparse.ArgumentParser) -> None:
"""Add CLI arguments for the multimodal processor benchmark."""
from vllm.engine.arg_utils import EngineArgs
EngineArgs.add_cli_args(parser)
parser.set_defaults(enable_mm_processor_stats=True)
parser.add_argument(
"--dataset-name",
type=str,
default="random-mm",
choices=["random-mm", "hf"],
help="Name of the dataset to benchmark on. Defaults to 'random-mm'.",
)
parser.add_argument(
"--num-prompts",
type=int,
default=10,
help="Number of prompts to process.",
)
parser.add_argument(
"--num-warmups",
type=int,
default=1,
help="Number of warmup prompts to process.",
)
from vllm.benchmarks.datasets import (
add_random_dataset_base_args,
add_random_multimodal_dataset_args,
)
add_random_dataset_base_args(parser)
add_random_multimodal_dataset_args(parser)
# HuggingFace dataset arguments
parser.add_argument(
"--dataset-path",
type=str,
default=None,
help="Path to the dataset file or HuggingFace dataset name "
"(e.g., 'yale-nlp/MMVU', 'lmarena-ai/VisionArena-Chat').",
)
parser.add_argument(
"--hf-subset",
type=str,
default=None,
help="Subset of the HuggingFace dataset (optional).",
)
parser.add_argument(
"--hf-split",
type=str,
default=None,
help="Split of the HuggingFace dataset (e.g., 'train', 'test', 'validation').",
)
parser.add_argument(
"--output-len",
type=int,
default=None,
help="Output length for each request. "
"Overrides the default output lengths from the dataset.",
)
parser.add_argument(
"--output-json",
type=str,
default=None,
help="Path to save the benchmark results in JSON format.",
)
parser.add_argument(
"--metric-percentiles",
type=str,
default="99",
help="Comma-separated list of percentiles to calculate (e.g., '50,90,99').",
)
parser.add_argument(
"--disable-tqdm",
action="store_true",
help="Disable tqdm progress bar.",
)
def main(args: argparse.Namespace) -> None:
"""Main entry point for the multimodal processor benchmark."""
print("Starting multimodal processor benchmark...")
result = benchmark_multimodal_processor(args)
print("\n" + "=" * 80)
print("Multimodal Processor Benchmark Results")
print("=" * 80)
if "mm_processor_stats" in result:
print("\nMM Processor Metrics:")
selected_percentiles = [
float(p) for p in getattr(args, "metric_percentiles", "99").split(",")
]
mm_data = []
for stage, metrics in result["mm_processor_stats"].items():
row = {
"Stage": stage,
"Mean": f"{metrics['mean']:.2f}",
"Median": f"{metrics['median']:.2f}",
"Std": f"{metrics['std']:.2f}",
}
for p in selected_percentiles:
row[f"P{p}"] = f"{metrics.get(f'p{p}', 0.0):.2f}"
mm_data.append(row)
mm_df = pd.DataFrame(mm_data)
print(mm_df.to_string(index=False))
if "encoder_summary" in result and result["encoder_summary"]:
total_calls = result["encoder_summary"]["total_encoder_calls"]
num_requests = result["encoder_summary"]["num_requests_with_encoder_calls"]
print(
f"\nSummary: {total_calls} total encoder calls "
f"across {num_requests} requests."
)
if "mean_e2el_ms" in result:
print("\nEnd-to-End Latency (ms):")
selected_percentiles = [
float(p) for p in getattr(args, "metric_percentiles", "99").split(",")
]
e2el_data = [
{"Metric": "Mean", "Value (ms)": f"{result['mean_e2el_ms']:.2f}"},
{"Metric": "Median", "Value (ms)": f"{result['median_e2el_ms']:.2f}"},
{"Metric": "Std", "Value (ms)": f"{result['std_e2el_ms']:.2f}"},
]
for p in selected_percentiles:
percentile_value = next(
(val for pct, val in result["percentiles_e2el_ms"] if pct == p),
0.0,
)
e2el_data.append(
{
"Metric": f"P{p}",
"Value (ms)": f"{percentile_value:.2f}",
}
)
e2el_df = pd.DataFrame(e2el_data)
print(e2el_df.to_string(index=False))
if args.output_json:
result["config"] = {
"model": args.model,
"num_prompts": args.num_prompts,
"input_len": getattr(args, "random_input_len", None),
"output_len": getattr(args, "random_output_len", None),
}
result["timestamp"] = datetime.now().isoformat()
with open(args.output_json, "w") as f:
json.dump(result, f, indent=2)
print(f"\nResults saved to {args.output_json}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Benchmark mm processor latency")
add_cli_args(parser)
args = parser.parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/benchmarks/mm_processor.py",
"license": "Apache License 2.0",
"lines": 451,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/cli/benchmark/mm_processor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
from vllm.benchmarks.mm_processor import add_cli_args, main
from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase
class BenchmarkMMProcessorSubcommand(BenchmarkSubcommandBase):
"""The `mm-processor` subcommand for `vllm bench`."""
name = "mm-processor"
help = "Benchmark multimodal processor latency across different configurations."
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
add_cli_args(parser)
@staticmethod
def cmd(args: argparse.Namespace) -> None:
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/cli/benchmark/mm_processor.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/glmasr.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable, Mapping, Sequence
from typing import Annotated, Any, Literal, TypeAlias
import numpy as np
import torch
import torch.nn as nn
from transformers import BatchFeature
from transformers.models.glmasr import GlmAsrConfig, GlmAsrProcessor
from transformers.models.whisper import WhisperFeatureExtractor
from vllm.config import ModelConfig, SpeechToTextConfig, VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.distributed.parallel_state import get_tensor_model_parallel_world_size
from vllm.inputs.data import PromptType, TokensPrompt
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import MMEncoderAttention
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding.common import ApplyRotaryEmb
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import (
DictEmbeddingItems,
ModalityData,
ModalityDataItems,
MultiModalDataItems,
MultiModalDataParser,
)
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.sequence import IntermediateTensors
from vllm.tokenizers import cached_tokenizer_from_config
from vllm.transformers_utils.processor import cached_processor_from_config
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .glmasr_utils import (
DEFAULT_CONV_PARAMS,
DEFAULT_MAX_AUDIO_LEN_S,
DEFAULT_MERGE_FACTOR,
_flatten_audio_features_by_length,
_get_audio_output_lengths_for_tower,
_group_audio_embeddings,
_normalize_chunk_counts,
)
from .interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
SupportsTranscription,
)
from .utils import AutoWeightsLoader, init_vllm_registered_model, maybe_prefix
from .whisper import ISO639_1_SUPPORTED_LANGS
class GlmAsrEncoderRotaryEmbedding(nn.Module):
"""
Rotary Position Embedding for GLM-ASR encoder.
Computes rotary position embeddings on-demand for efficiency.
Only caches inv_freq as a buffer; cos/sin are computed during forward
to avoid wasted computation during initialization and ensure correct
device placement.
"""
def __init__(self, config) -> None:
super().__init__()
# Compute inverse frequencies following transformers implementation
head_dim = getattr(
config, "head_dim", config.hidden_size // config.num_attention_heads
)
# Handle rope_parameters if present (for compatibility with transformers config)
if hasattr(config, "rope_parameters") and config.rope_parameters:
base = config.rope_parameters.get("rope_theta", 10000.0)
partial_rotary_factor = config.rope_parameters.get(
"partial_rotary_factor", 1.0
)
dim = int(head_dim * partial_rotary_factor)
self.attention_scaling = config.rope_parameters.get(
"attention_scaling", 1.0
)
else:
base = getattr(config, "rope_theta", 10000.0)
dim = head_dim
self.attention_scaling = 1.0
self.dim = dim
self.head_dim = head_dim
# Only cache inv_freq; cos/sin computed on-demand in correct device
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, seq_len: int) -> torch.Tensor:
"""
Compute rotary position frequencies for given sequence length.
Args:
seq_len: The sequence length to compute embeddings for.
Returns:
Frequency tensor with shape [seq_len, dim/2]. Use .cos() and
.sin() to get the rotary embedding components.
"""
# Compute on the same device as inv_freq (automatically correct after .to())
seq = torch.arange(
seq_len, device=self.inv_freq.device, dtype=self.inv_freq.dtype
)
freqs = torch.outer(seq, self.inv_freq)
return freqs * self.attention_scaling
class GlmAsrEncoderAttention(nn.Module):
"""
Optimized Multi-headed Grouped Query Attention for GLM-ASR encoder.
Uses vLLM's QKVParallelLinear for fused projections, ApplyRotaryEmb for
rotary position embeddings, and MMEncoderAttention for hardware-optimized
attention computation with automatic backend selection.
"""
def __init__(
self,
config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.num_kv_heads = getattr(
config, "num_key_value_heads", config.num_attention_heads
)
self.head_dim = self.hidden_size // self.num_heads
self.tp_size = get_tensor_model_parallel_world_size()
self.num_heads_per_rank = self.num_heads // self.tp_size
self.num_kv_heads_per_rank = max(1, self.num_kv_heads // self.tp_size)
# Use QKVParallelLinear for fused QKV projection
# Note: GLM-ASR uses bias on Q and V, but not K
# For simplicity with QKVParallelLinear, we use bias=True for all
self.qkv_proj = QKVParallelLinear(
self.hidden_size,
self.head_dim,
self.num_heads,
self.num_kv_heads,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.hidden_size,
self.hidden_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
# Use vLLM's ApplyRotaryEmb CustomOp
# enforce_enable=True ensures the op is always enabled (important for ViT)
rope_params = getattr(config, "rope_parameters", None)
if rope_params:
partial_rotary_factor = rope_params.get("partial_rotary_factor", 0.5)
else:
partial_rotary_factor = getattr(config, "partial_rotary_factor", 0.5)
self.rotary_dim = int(self.head_dim * partial_rotary_factor)
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
# Use vLLM's MMEncoderAttention for hardware-optimized attention
# Automatically selects Flash Attention, SDPA, or Pallas based on device
self.attn = MMEncoderAttention(
num_heads=self.num_heads_per_rank,
head_size=self.head_dim,
scale=self.head_dim**-0.5,
num_kv_heads=self.num_kv_heads_per_rank,
prefix=f"{prefix}.attn",
)
def forward(
self,
hidden_states: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
) -> torch.Tensor:
"""
Args:
hidden_states: [batch_size, seq_len, hidden_size]
rotary_pos_emb_cos: [seq_len, rotary_dim/2] - cosine of rotary embeddings
rotary_pos_emb_sin: [seq_len, rotary_dim/2] - sine of rotary embeddings
Returns:
[batch_size, seq_len, hidden_size]
"""
batch_size, seq_len, _ = hidden_states.shape
# QKV projection - fused for efficiency
qkv, _ = self.qkv_proj(hidden_states)
# Split into q, k, v
q_size = self.num_heads_per_rank * self.head_dim
kv_size = self.num_kv_heads_per_rank * self.head_dim
q, k, v = qkv.split([q_size, kv_size, kv_size], dim=-1)
# Reshape to [batch, seq, num_heads, head_dim] for ApplyRotaryEmb
q = q.view(batch_size, seq_len, self.num_heads_per_rank, self.head_dim)
k = k.view(batch_size, seq_len, self.num_kv_heads_per_rank, self.head_dim)
v = v.view(batch_size, seq_len, self.num_kv_heads_per_rank, self.head_dim)
# Apply rotary position embeddings using vLLM's ApplyRotaryEmb
# ApplyRotaryEmb expects x: [batch, seq, heads, head_dim]
# cos/sin: [seq_len, rotary_dim/2]
q[..., : self.rotary_dim] = self.apply_rotary_emb(
q[..., : self.rotary_dim], rotary_pos_emb_cos, rotary_pos_emb_sin
)
k[..., : self.rotary_dim] = self.apply_rotary_emb(
k[..., : self.rotary_dim], rotary_pos_emb_cos, rotary_pos_emb_sin
)
# MMEncoderAttention expects [batch, seq, num_heads, head_dim]
# It handles GQA internally via repeat_interleave
attn_output = self.attn(q, k, v)
# Reshape back to [batch, seq, hidden_size]
attn_output = attn_output.view(batch_size, seq_len, -1)
# Output projection
output, _ = self.o_proj(attn_output)
return output
class GlmAsrEncoderMLP(nn.Module):
"""
Optimized MLP for GLM-ASR encoder.
Uses vLLM's parallel linear layers for better performance.
"""
def __init__(
self,
config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.fc1 = ColumnParallelLinear(
self.hidden_size,
self.intermediate_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc1",
)
self.act_fn = get_act_fn(config.hidden_act)
self.fc2 = RowParallelLinear(
self.intermediate_size,
self.hidden_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc2",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states, _ = self.fc1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states, _ = self.fc2(hidden_states)
return hidden_states
class GlmAsrEncoderLayer(nn.Module):
"""
Optimized Transformer encoder layer for GLM-ASR.
Combines attention and MLP with residual connections and layer norms.
"""
def __init__(
self,
config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GlmAsrEncoderAttention(
config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.mlp = GlmAsrEncoderMLP(
config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
layer_norm_eps = getattr(config, "layer_norm_eps", 1e-5)
self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=layer_norm_eps)
self.post_attention_layernorm = nn.LayerNorm(
self.hidden_size, eps=layer_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
) -> torch.Tensor:
"""
Args:
hidden_states: [batch_size, seq_len, hidden_size]
rotary_pos_emb_cos: [seq_len, rotary_dim/2] - cosine of rotary embeddings
rotary_pos_emb_sin: [seq_len, rotary_dim/2] - sine of rotary embeddings
Returns:
[batch_size, seq_len, hidden_size]
"""
# Self-attention with residual
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.self_attn(
hidden_states=hidden_states,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
)
hidden_states = residual + hidden_states
# MLP with residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class _GlmAsrEncoderOutput:
"""
Simple output container compatible with transformers' BaseModelOutput.
This lightweight container holds the encoder output and is compatible
with the transformers library's output format while being more efficient
than a full dataclass.
Attributes:
last_hidden_state: Final layer hidden states from the encoder.
Shape: [batch_size, seq_len, hidden_size]
"""
__slots__ = ("last_hidden_state",)
def __init__(self, last_hidden_state: torch.Tensor):
self.last_hidden_state = last_hidden_state
class GlmAsrEncoder(nn.Module):
"""
Optimized GLM-ASR Audio Encoder with vLLM native implementation.
This encoder processes audio features through convolutional layers
followed by transformer layers with rotary position embeddings.
Optimized for performance with:
- QKVParallelLinear for fused attention projections
- Tensor parallelism support via ColumnParallelLinear/RowParallelLinear
- Quantization support
- Flash Attention (SDPA)
"""
# Mapping for weight loading: transformers uses separate q/k/v, we use fused qkv
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
}
def __init__(
self,
config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.config = config
# Convolutional feature extraction layers
self.conv1 = nn.Conv1d(
config.num_mel_bins,
config.hidden_size,
kernel_size=3,
padding=1,
)
self.conv2 = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=3,
stride=2,
padding=1,
)
# Transformer encoder layers
self.layers = nn.ModuleList(
[
GlmAsrEncoderLayer(
config,
quant_config=quant_config,
prefix=f"{prefix}.layers.{layer_idx}",
)
for layer_idx in range(config.num_hidden_layers)
]
)
# Final layer norm
layer_norm_eps = getattr(config, "layer_norm_eps", 1e-5)
self.norm = nn.LayerNorm(config.hidden_size, eps=layer_norm_eps)
# Rotary position embeddings
self.rotary_emb = GlmAsrEncoderRotaryEmbedding(config)
def _get_feat_extract_output_lengths(
self, input_lengths: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the output length after convolutions.
Args:
input_lengths: Input sequence lengths [batch_size]
Returns:
Tuple of (output after conv1, output after conv2)
"""
# Conv1: kernel=3, stride=1, padding=1
output_lengths_conv1 = (input_lengths + 2 * 1 - 3) // 1 + 1
# Conv2: kernel=3, stride=2, padding=1
output_lengths_conv2 = (output_lengths_conv1 + 2 * 1 - 3) // 2 + 1
return output_lengths_conv1, output_lengths_conv2
def forward(self, input_features: torch.Tensor) -> _GlmAsrEncoderOutput:
"""
Forward pass through the encoder.
Args:
input_features: [batch_size, num_mel_bins, seq_len]
Returns:
_GlmAsrEncoderOutput: Object with .last_hidden_state attribute \
containing [batch_size, seq_len', hidden_size] where seq_len' \
is the sequence length after convolutions
"""
# Apply convolutional layers with GELU activation
hidden_states = torch.nn.functional.gelu(self.conv1(input_features))
hidden_states = torch.nn.functional.gelu(self.conv2(hidden_states))
# Transpose to [batch_size, seq_len, hidden_size]
hidden_states = hidden_states.transpose(1, 2)
output_seq_len = hidden_states.shape[1]
# Compute rotary position embeddings on-demand
rotary_pos_emb = self.rotary_emb(output_seq_len)
rotary_pos_emb_cos = rotary_pos_emb.cos().to(dtype=hidden_states.dtype)
rotary_pos_emb_sin = rotary_pos_emb.sin().to(dtype=hidden_states.dtype)
# Apply transformer layers
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states, rotary_pos_emb_cos, rotary_pos_emb_sin
)
# Final layer norm
hidden_states = self.norm(hidden_states)
# Return in a format compatible with transformers' BaseModelOutput
return _GlmAsrEncoderOutput(last_hidden_state=hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
"""Custom weight loading to handle q_proj/k_proj/v_proj -> qkv_proj mapping."""
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Default weight loading for non-stacked params
if name.endswith(".bias") and name not in params_dict:
continue
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class GlmAsrFeatureInputs(TensorSchema):
"""
Dimensions:
- num_chunks: Number of audio chunks (flattened)
- nmb: Number of mel bins
- num_audios: Number of original audio files
"""
type: Literal["audio_features"]
input_features: Annotated[
torch.Tensor | list[torch.Tensor],
TensorShape("num_chunks", "nmb", "chunk_length", dynamic_dims={"chunk_length"}),
]
feature_attention_mask: Annotated[
torch.Tensor | list[torch.Tensor],
TensorShape("num_chunks", "chunk_length", dynamic_dims={"chunk_length"}),
]
chunk_counts: Annotated[
torch.Tensor | list[torch.Tensor],
TensorShape("num_audios"),
]
class GlmAsrEmbeddingInputs(TensorSchema):
"""
Dimensions:
- bn: Batch size
- naf: Number of audio features
- hs: Hidden size (must match the hidden size of language model
backbone)
"""
type: Literal["audio_embeds"] = "audio_embeds"
audio_embeds: Annotated[
list[torch.Tensor],
TensorShape("bn", "naf", "hs", dynamic_dims={"naf"}),
]
GlmAsrInputs: TypeAlias = GlmAsrFeatureInputs | GlmAsrEmbeddingInputs
class GlmAsrMultiModalProjector(nn.Module):
"""
Projects audio encoder outputs to language model hidden space.
This projector uses a two-layer MLP to map audio features from the
encoder's intermediate size to the language model's hidden size.
Uses vLLM's parallel linear layers for tensor parallelism support.
Architecture:
- Linear layer: intermediate_size -> hidden_size * 2
- Activation function (e.g., GELU)
- Linear layer: hidden_size * 2 -> hidden_size
"""
def __init__(
self,
config: GlmAsrConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.linear_1 = ColumnParallelLinear(
input_size=config.audio_config.intermediate_size,
output_size=config.text_config.hidden_size * 2,
quant_config=quant_config,
prefix=f"{prefix}.linear_1",
)
self.act = get_act_fn(config.projector_hidden_act)
self.linear_2 = RowParallelLinear(
input_size=config.text_config.hidden_size * 2,
output_size=config.text_config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.linear_2",
)
def forward(self, audio_features: torch.Tensor) -> torch.Tensor:
hidden_states, _ = self.linear_1(audio_features)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.linear_2(hidden_states)
return hidden_states
def _glmasr_field_config(
hf_inputs: Mapping[str, torch.Tensor],
) -> dict[str, MultiModalFieldConfig]:
"""
Configure multimodal field batching strategy for GLM-ASR.
Determines how to batch audio inputs based on whether chunking is used.
When chunk_counts is present, features are flattened across chunks;
otherwise, they are batched normally.
Args:
hf_inputs: Dictionary of preprocessed inputs from HuggingFace processor.
Returns:
Dictionary mapping field names to MultiModalFieldConfig objects \
that specify batching behavior.
"""
chunk_counts = hf_inputs.get("chunk_counts")
if chunk_counts is not None:
return dict(
audio_embeds=MultiModalFieldConfig.batched("audio"),
input_features=MultiModalFieldConfig.flat_from_sizes(
"audio", chunk_counts, dim=0
),
feature_attention_mask=MultiModalFieldConfig.flat_from_sizes(
"audio", chunk_counts, dim=0
),
chunk_counts=MultiModalFieldConfig.batched("audio"),
)
return dict(
audio_embeds=MultiModalFieldConfig.batched("audio"),
input_features=MultiModalFieldConfig.batched("audio"),
feature_attention_mask=MultiModalFieldConfig.batched("audio"),
chunk_counts=MultiModalFieldConfig.batched("audio"),
)
class GlmAsrMultiModalDataParser(MultiModalDataParser):
"""
Custom parser for GLM-ASR multimodal data.
Extends the base parser to handle GLM-ASR specific audio data formats,
including both pre-computed audio embeddings and raw audio features.
"""
def _parse_audio_data(
self,
data: dict[str, torch.Tensor] | ModalityData[Any],
) -> ModalityDataItems[Any, Any] | None:
if isinstance(data, dict):
return DictEmbeddingItems(
data,
modality="audio",
required_fields={"audio_embeds"},
fields_factory=_glmasr_field_config,
)
return super()._parse_audio_data(data)
class GlmAsrProcessingInfo(BaseProcessingInfo):
"""
Processing information provider for GLM-ASR model.
Provides access to model configuration, processor, and feature extractor
needed for audio preprocessing and multimodal integration.
"""
def get_hf_config(self) -> GlmAsrConfig:
return self.ctx.get_hf_config(GlmAsrConfig)
def get_hf_processor(self, **kwargs: object) -> GlmAsrProcessor:
return self.ctx.get_hf_processor(GlmAsrProcessor, **kwargs)
def get_feature_extractor(self, **kwargs: object) -> WhisperFeatureExtractor:
return self.get_hf_processor(**kwargs).feature_extractor
def get_data_parser(self):
feature_extractor = self.get_feature_extractor()
return GlmAsrMultiModalDataParser(
target_sr=feature_extractor.sampling_rate,
expected_hidden_size=self._get_expected_hidden_size(),
)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
class GlmAsrDummyInputsBuilder(BaseDummyInputsBuilder[GlmAsrProcessingInfo]):
"""
Builder for dummy inputs used in profiling and testing.
Generates dummy text prompts and audio data that match the expected
format for GLM-ASR model inputs. Used for memory profiling and
performance benchmarking.
"""
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_audios = mm_counts.get("audio", 0)
hf_processor = self.info.get_hf_processor()
return hf_processor.audio_token * num_audios
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
feature_extractor = self.info.get_feature_extractor()
sampling_rate = feature_extractor.sampling_rate
num_audios = mm_counts.get("audio", 0)
audio_overrides = mm_options.get("audio")
max_audio_len = getattr(
self.info.get_hf_processor(), "max_audio_len", DEFAULT_MAX_AUDIO_LEN_S
)
audio_len = int(max_audio_len * sampling_rate)
return {
"audio": self._get_dummy_audios(
length=audio_len,
num_audios=num_audios,
overrides=audio_overrides,
)
}
class GlmAsrMultiModalProcessor(BaseMultiModalProcessor["GlmAsrProcessingInfo"]):
"""
GLM-ASR processor that inherits directly from BaseMultiModalProcessor
for better performance and cleaner implementation.
"""
def _calculate_chunk_counts(
self,
audio_list: list[Any],
feature_extractor: WhisperFeatureExtractor,
processor: GlmAsrProcessor,
) -> list[int]:
sampling_rate = feature_extractor.sampling_rate
chunk_length = feature_extractor.chunk_length
max_audio_len = getattr(processor, "max_audio_len", DEFAULT_MAX_AUDIO_LEN_S)
window_size = int(sampling_rate * chunk_length)
max_windows = int(max_audio_len // chunk_length)
chunk_counts = []
for audio in audio_list:
n_samples = len(audio) if isinstance(audio, list) else audio.shape[0]
n_chunks = max(1, (n_samples + window_size - 1) // window_size)
chunk_counts.append(min(n_chunks, max_windows))
return chunk_counts
def _call_hf_processor(
self,
prompt: str,
mm_data: dict[str, object],
mm_kwargs: Mapping[str, Any],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
# Normalize input: handle deprecated key and list conversion.
if "audios" in mm_data:
mm_data["audio"] = mm_data.pop("audios")
audio = mm_data.get("audio", [])
audio_list = [audio] if audio and not isinstance(audio, list) else audio
# Early return for text-only.
if not audio_list:
prompt_ids = self.info.get_tokenizer().encode(prompt)
prompt_ids = self._apply_hf_processor_tokens_only(prompt_ids)
return BatchFeature(dict(input_ids=[prompt_ids]), tensor_type="pt")
# Handle sampling_rate
feature_extractor = self.info.get_feature_extractor(**mm_kwargs)
mm_kwargs = dict(
**mm_kwargs,
sampling_rate=feature_extractor.sampling_rate,
)
# Call parent method
outputs = super()._call_hf_processor(
prompt=prompt,
mm_data=mm_data,
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
# Postprocess: rename mask and add chunk counts
# Handle different key names from different transformers versions
if "input_features_mask" in outputs:
outputs["feature_attention_mask"] = outputs.pop("input_features_mask")
elif "input_features_mask" not in outputs and "input_features" in outputs:
# If no mask is provided, create one from input_features
input_features = outputs["input_features"]
if isinstance(input_features, torch.Tensor):
# Create a mask of all ones matching the sequence length
mask = torch.ones(
input_features.shape[0],
input_features.shape[-1],
dtype=torch.long,
)
outputs["feature_attention_mask"] = mask
# Get processor for chunk counts calculation
processor = self.info.get_hf_processor(**mm_kwargs)
# Override chunk counts calculation with GLM-ASR specific logic
chunk_counts = self._calculate_chunk_counts(
audio_list, processor.feature_extractor, processor
)
outputs["chunk_counts"] = torch.tensor(chunk_counts, dtype=torch.long)
return outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return _glmasr_field_config(hf_inputs)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
tokenizer = self.info.get_tokenizer()
vocab = tokenizer.get_vocab()
config = self.info.get_hf_config()
audio_token = getattr(processor, "audio_token", "<|pad|>")
audio_token_id = vocab.get(audio_token)
if audio_token_id is None:
audio_token_id = processor.audio_token_id
merge_factor = getattr(config, "merge_factor", DEFAULT_MERGE_FACTOR)
conv_params = getattr(config, "conv_params", DEFAULT_CONV_PARAMS)
out_mm_data = out_mm_kwargs.get_data()
feature_attention_mask = out_mm_data.get("feature_attention_mask")
chunk_counts = out_mm_data.get("chunk_counts")
# Pre-compute audio output lengths if feature_attention_mask is available
audio_output_lengths: list[int] = []
if feature_attention_mask is not None:
# Compute output lengths for all audio items
from .glmasr_utils import (
_as_list_chunk_counts,
_get_audio_output_lengths_from_mask,
)
if chunk_counts is not None:
start_idx = 0
for count in _as_list_chunk_counts(chunk_counts):
end_idx = start_idx + count
mask = feature_attention_mask[start_idx:end_idx]
if isinstance(mask, list):
mask = torch.stack(mask)
lengths = _get_audio_output_lengths_from_mask(
mask, merge_factor, conv_params
)
audio_output_lengths.append(int(lengths.sum().item()))
start_idx = end_idx
else:
# Single chunk per audio
for idx in range(len(feature_attention_mask)):
mask = feature_attention_mask[idx : idx + 1]
if isinstance(mask, list):
mask = torch.tensor(mask).unsqueeze(0)
lengths = _get_audio_output_lengths_from_mask(
mask, merge_factor, conv_params
)
audio_output_lengths.append(int(lengths.sum().item()))
def get_replacement_glmasr(item_idx: int):
# Use pre-computed lengths if available, otherwise fall back to audio_embeds
if audio_output_lengths:
num_features = audio_output_lengths[item_idx]
else:
audio_embeds = out_mm_data.get("audio_embeds")
if audio_embeds is not None:
embed = audio_embeds[item_idx]
num_features = embed.shape[0]
else:
raise ValueError(
"Either feature_attention_mask or audio_embeds must be provided"
)
if num_features == 0:
raise ValueError("Audio is too short")
audio_tokens = [audio_token_id] * int(num_features)
return PromptUpdateDetails.select_token_id(
audio_tokens,
embed_token_id=audio_token_id,
)
return [
PromptReplacement(
modality="audio",
target=audio_token,
replacement=get_replacement_glmasr,
)
]
@MULTIMODAL_REGISTRY.register_processor(
GlmAsrMultiModalProcessor,
info=GlmAsrProcessingInfo,
dummy_inputs=GlmAsrDummyInputsBuilder,
)
class GlmAsrForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA, SupportsTranscription
):
supported_languages = ISO639_1_SUPPORTED_LANGS
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.quant_config = quant_config
with self._mark_tower_model(vllm_config, "audio"):
self.audio_tower = GlmAsrEncoder(
config.audio_config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "audio_tower"),
)
self.multi_modal_projector = GlmAsrMultiModalProjector(
config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "multi_modal_projector"),
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
architectures=["LlamaForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("audio"):
return "<|begin_of_audio|><|pad|><|end_of_audio|>"
raise ValueError("Only audio modality is supported")
def get_mm_mapping(self) -> MultiModelKeys:
return MultiModelKeys.from_string_field(
language_model="language_model.",
connector="multi_modal_projector.",
tower_model="audio_tower.",
)
def _parse_and_validate_audio_input(self, **kwargs: object) -> GlmAsrInputs | None:
audio_embeds = kwargs.pop("audio_embeds", None)
if audio_embeds is not None:
return GlmAsrEmbeddingInputs(type="audio_embeds", audio_embeds=audio_embeds)
input_features = kwargs.pop("input_features", None)
if input_features is None:
return None
return GlmAsrFeatureInputs(
type="audio_features",
input_features=input_features,
feature_attention_mask=kwargs.pop("feature_attention_mask", None),
chunk_counts=kwargs.pop("chunk_counts", None),
)
def _process_audio_input(
self, audio_input: GlmAsrInputs
) -> torch.Tensor | tuple[torch.Tensor, ...]:
if audio_input["type"] == "audio_embeds":
return tuple(audio_input["audio_embeds"])
input_features = audio_input["input_features"]
feature_attention_mask = audio_input["feature_attention_mask"]
if isinstance(input_features, list):
input_features = torch.cat(input_features, dim=0)
feature_attention_mask = torch.cat(feature_attention_mask, dim=0)
num_chunks = input_features.shape[0]
chunk_counts = _normalize_chunk_counts(
audio_input.get("chunk_counts"), num_chunks=num_chunks
)
# Convert input_features to model dtype (e.g., bfloat16) to match model weights
input_features = input_features.to(dtype=self.audio_tower.conv1.weight.dtype)
# audio_tower returns [batch_size, seq_len, hidden_size] where hidden_size=1280
audio_hidden_states = self.audio_tower(input_features).last_hidden_state
# GLM-ASR merges consecutive frames: 4 frames with hidden_size=1280
# -> 1 frame with intermediate_size=5120
hidden_size = self.config.audio_config.hidden_size
intermediate_size = self.config.audio_config.intermediate_size
merge_ratio = intermediate_size // hidden_size
# Truncate sequence length to be divisible by merge_ratio
seq_len = audio_hidden_states.shape[1]
seq_len_truncated = (seq_len // merge_ratio) * merge_ratio
if seq_len_truncated < seq_len:
audio_hidden_states = audio_hidden_states[:, :seq_len_truncated, :]
# Reshape to merge consecutive frames
audio_hidden_states = audio_hidden_states.reshape(
num_chunks,
-1,
intermediate_size,
)
audio_features = self.multi_modal_projector(audio_hidden_states)
merge_factor = getattr(self.config, "merge_factor", DEFAULT_MERGE_FACTOR)
conv_params = getattr(self.config, "conv_params", DEFAULT_CONV_PARAMS)
audio_output_lengths = _get_audio_output_lengths_for_tower(
self.audio_tower,
feature_attention_mask.sum(-1),
merge_factor,
conv_params,
)
masked_audio_features = _flatten_audio_features_by_length(
audio_features, audio_output_lengths
)
chunk_embeddings = torch.split(
masked_audio_features, audio_output_lengths.flatten().tolist()
)
return _group_audio_embeddings(chunk_embeddings, chunk_counts)
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
audio_input = self._parse_and_validate_audio_input(**kwargs)
if audio_input is None:
return []
masked_audio_features = self._process_audio_input(audio_input)
return masked_audio_features
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
skip_prefixes = ["audio_tower.embed_positions"]
loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
return loader.load_weights(weights)
@classmethod
def _get_audio_token(cls, model_config: ModelConfig) -> str:
"""Get the audio token from processor.
Similar to get_placeholder_str but returns single token.
"""
processor = cached_processor_from_config(model_config)
return getattr(processor, "audio_token", "<|pad|>")
@classmethod
def get_speech_to_text_config(
cls, model_config: ModelConfig, task_type: str
) -> SpeechToTextConfig:
processor = cached_processor_from_config(model_config)
feature_extractor = processor.feature_extractor
max_audio_clip_s = getattr(processor, "max_audio_len", DEFAULT_MAX_AUDIO_LEN_S)
return SpeechToTextConfig(
max_audio_clip_s=max_audio_clip_s,
sample_rate=feature_extractor.sampling_rate,
)
@classmethod
def get_generation_prompt(
cls,
audio: np.ndarray,
model_config: ModelConfig,
stt_config: SpeechToTextConfig,
language: str | None,
task_type: Literal["transcribe", "translate"],
request_prompt: str,
to_language: str | None,
) -> PromptType:
"""Get the generation prompt to be used for transcription requests."""
tokenizer = cached_tokenizer_from_config(model_config)
audio_token = cls._get_audio_token(model_config)
if task_type == "translate":
full_lang_name_to = cls.supported_languages.get(to_language, to_language)
user_content = f"{audio_token}translate the speech to {full_lang_name_to}"
elif task_type == "transcribe":
user_content = (
f"{audio_token}can you transcribe the speech into a written format?"
)
else:
raise ValueError(f"Unsupported task type {task_type}")
messages = [{"role": "user", "content": user_content}]
prompt = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
prompt_token_ids = tokenizer.encode(prompt)
return TokensPrompt(
prompt_token_ids=prompt_token_ids,
multi_modal_data={"audio": audio},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glmasr.py",
"license": "Apache License 2.0",
"lines": 981,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/glmasr_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
from typing import cast
import torch
import torch.nn as nn
DEFAULT_MAX_AUDIO_LEN_S = 655
DEFAULT_MERGE_FACTOR = 4
# Default convolution parameters: (padding, kernel_size, stride)
# These correspond to the two conv layers in GlmAsrEncoder
DEFAULT_CONV_PARAMS = [(1, 3, 1), (1, 3, 2)]
def _calculate_conv_output_length(
input_length: torch.Tensor, padding: int, kernel_size: int, stride: int
) -> torch.Tensor:
"""Calculate Conv1d output length using standard formula."""
# in sync with `hf_processor._get_audio_token_length`
return (input_length + 2 * padding - (kernel_size - 1) - 1) // stride + 1
def _as_list_chunk_counts(
chunk_counts: torch.Tensor | list[int] | list[torch.Tensor],
) -> list[int]:
if isinstance(chunk_counts, torch.Tensor):
return chunk_counts.tolist()
if chunk_counts and isinstance(chunk_counts[0], torch.Tensor):
tensor_counts = cast(list[torch.Tensor], chunk_counts)
return [int(c.item()) for c in tensor_counts]
return [int(c) for c in chunk_counts]
def _normalize_chunk_counts(
chunk_counts: torch.Tensor | list[int] | list[torch.Tensor] | None,
num_chunks: int,
) -> list[int]:
if chunk_counts is None:
return [1] * num_chunks
return _as_list_chunk_counts(chunk_counts)
def _get_audio_output_lengths_from_lengths(
audio_lengths: torch.Tensor,
merge_factor: int,
conv_params: list[tuple[int, int, int]],
) -> torch.Tensor:
for padding, kernel_size, stride in conv_params:
audio_lengths = _calculate_conv_output_length(
audio_lengths, padding, kernel_size, stride
)
return (audio_lengths - merge_factor) // merge_factor + 1
def _get_audio_output_lengths_from_mask(
mask: torch.Tensor,
merge_factor: int,
conv_params: list[tuple[int, int, int]],
) -> torch.Tensor:
audio_lengths = mask.sum(-1)
return _get_audio_output_lengths_from_lengths(
audio_lengths, merge_factor, conv_params
)
def _get_audio_output_lengths_for_tower(
audio_tower: nn.Module,
audio_lengths: torch.Tensor,
merge_factor: int,
conv_params: list[tuple[int, int, int]],
) -> torch.Tensor:
"""
Calculate the output lengths after audio processing.
The output length accounts for:
1. Convolution layers (downsampling)
2. Merge factor (further downsampling during projection)
Args:
audio_tower: The audio encoder module
audio_lengths: Input feature lengths [batch_size]
merge_factor: Factor for merging adjacent features
conv_params: List of (padding, kernel_size, stride) for each conv layer
Returns:
Output lengths after all processing [batch_size]
"""
# First, calculate the output length after convolutions
if hasattr(audio_tower, "_get_feat_extract_output_lengths"):
_, conv_output_lengths = audio_tower._get_feat_extract_output_lengths(
audio_lengths
)
else:
conv_output_lengths = audio_lengths
for padding, kernel_size, stride in conv_params:
conv_output_lengths = _calculate_conv_output_length(
conv_output_lengths, padding, kernel_size, stride
)
# Then, apply merge_factor to get final output length
# Formula: (conv_output_lengths - merge_factor) // merge_factor + 1
return (conv_output_lengths - merge_factor) // merge_factor + 1
def _flatten_audio_features_by_length(
audio_features: torch.Tensor,
audio_output_lengths: torch.Tensor,
) -> torch.Tensor:
num_chunks, max_audio_tokens, embed_dim = audio_features.shape
audio_output_lengths = audio_output_lengths.unsqueeze(1)
audio_features_mask = (
torch.arange(max_audio_tokens)
.expand(num_chunks, max_audio_tokens)
.to(audio_output_lengths.device)
< audio_output_lengths
)
return audio_features[audio_features_mask].view(-1, embed_dim)
def _group_audio_embeddings(
chunk_embeddings: Sequence[torch.Tensor],
chunk_counts: Sequence[int],
) -> tuple[torch.Tensor, ...]:
grouped_embeddings = []
current_idx = 0
for count in chunk_counts:
audio_chunks = chunk_embeddings[current_idx : current_idx + count]
grouped_embeddings.append(torch.cat(audio_chunks, dim=0))
current_idx += count
return tuple(grouped_embeddings)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glmasr_utils.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/flash_attn_diffkv.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Attention layer with FlashAttention."""
import torch
from vllm.v1.attention.backend import AttentionType
from vllm.v1.attention.backends.fa_utils import is_flash_attn_varlen_func_available
from vllm.v1.attention.ops.triton_reshape_and_cache_flash import (
triton_reshape_and_cache_flash_diffkv,
)
if is_flash_attn_varlen_func_available():
from vllm.v1.attention.backends.fa_utils import flash_attn_varlen_func
from vllm.logger import init_logger
from vllm.v1.attention.backends.utils import get_kv_cache_layout
from .flash_attn import (
FlashAttentionBackend,
FlashAttentionImpl,
FlashAttentionMetadata,
cascade_attention,
)
logger = init_logger(__name__)
class FlashAttentionDiffKVBackend(FlashAttentionBackend):
# Default to 128 for this backend
head_size_v: int = 128
@classmethod
def set_head_size_v(cls, head_size_v: int) -> None:
cls.head_size_v = head_size_v
@staticmethod
def get_name() -> str:
return "FLASH_ATTN_DIFFKV"
@staticmethod
def get_impl_cls() -> type["FlashAttentionImpl"]:
return FlashAttentionDiffKVImpl
# Do not modify the interface of get_kv_cache_shape,
# but consider head_size_v when returning result.
@staticmethod
def get_kv_cache_shape(
num_blocks: int,
block_size: int,
num_kv_heads: int,
head_size: int,
cache_dtype_str: str = "auto",
) -> tuple[int, ...]:
if block_size % 16 != 0:
raise ValueError("Block size must be a multiple of 16.")
return (
num_blocks,
block_size,
num_kv_heads,
head_size + FlashAttentionDiffKVBackend.head_size_v,
)
@staticmethod
def get_kv_cache_stride_order(
include_num_layers_dimension: bool = False,
) -> tuple[int, ...]:
# `stride_order` indicates the permutation that gets
# us from `get_kv_cache_shape` to the actual memory layout we want.
cache_layout = get_kv_cache_layout()
if cache_layout == "NHD" and include_num_layers_dimension:
# (num_blocks, num_layers, block_size,
# num_kv_heads, head_size + head_size_v)
return (1, 0, 2, 3, 4)
elif cache_layout == "NHD":
stride_order = (0, 1, 2, 3)
elif cache_layout == "HND" and include_num_layers_dimension:
# (num_blocks, num_kv_heads, num_layers,
# block_size, head_size + head_size_v)
return (1, 3, 0, 2, 4)
elif cache_layout == "HND":
stride_order = (0, 2, 1, 3)
else:
raise ValueError(f"Unknown cache layout format {cache_layout}.")
return stride_order
class FlashAttentionDiffKVImpl(FlashAttentionImpl):
def forward(
self,
layer: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
kv_cache: torch.Tensor,
attn_metadata: FlashAttentionMetadata,
output: torch.Tensor | None = None,
output_scale: torch.Tensor | None = None,
output_block_scale: torch.Tensor | None = None,
) -> torch.Tensor:
"""Forward pass with FlashAttention.
Args:
query: shape = [num_tokens, num_heads, head_size]
key: shape = [num_tokens, num_kv_heads, head_size]
value: shape = [num_tokens, num_kv_heads, head_size_v]
kv_cache: shape =
[num_blocks, block_size, num_kv_heads, head_size + head_size_v]
attn_metadata: Metadata for attention.
Returns:
shape = [num_tokens, num_heads * head_size_v]
NOTE: FP8 quantization, flash-attn expect the size of
{q,k,v}_descale to be (num_sequences, num_kv_heads).
We use torch's .expand() to avoid duplicating values
"""
assert output is not None, "Output tensor must be provided."
assert self.vllm_flash_attn_version is not None, (
"FlashAttention version not detected."
)
if output_scale is not None or output_block_scale is not None:
raise NotImplementedError(
"fused output quantization is not yet supported for FlashAttentionImpl"
)
if attn_metadata is None:
# Profiling run.
return output.fill_(0)
attn_type = self.attn_type
# IMPORTANT!
# NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in
# eager-mode PyTorch. Thus, we need to be careful about any CPU overhead
# in this method. For example, `view` and `slice` (or `[:n]`) operations
# are surprisingly slow even in the case they do not invoke any GPU ops.
# Minimize the PyTorch ops in this method as much as possible.
# Whenever making a change in this method, please benchmark the
# performance to make sure it does not introduce any overhead.
num_actual_tokens = attn_metadata.num_actual_tokens
# Handle encoder attention differently - no KV cache needed
if attn_type in (AttentionType.ENCODER_ONLY, AttentionType.ENCODER):
# For encoder attention,
# we use direct Q, K, V tensors without caching
return self._forward_encoder_attention(
query[:num_actual_tokens],
key[:num_actual_tokens],
value[:num_actual_tokens],
output[:num_actual_tokens],
attn_metadata,
layer,
)
# For decoder and cross-attention, use KV cache as before
# Different head_size for K and V
key_cache = kv_cache[..., : self.head_size]
value_cache = kv_cache[..., self.head_size :]
# key and value may be None in the case of cross attention. They are
# calculated once based on the output from the encoder and then cached
# in KV cache.
if (
self.kv_sharing_target_layer_name is None
and key is not None
and value is not None
):
# Reshape the input keys and values and store them in the cache.
# Skip this if sharing KV cache with an earlier attention layer.
# NOTE(woosuk): Here, key and value are padded while slot_mapping is
# not padded. However, we don't need to do key[:num_actual_tokens]
# and value[:num_actual_tokens] because the reshape_and_cache_flash
# op uses the slot_mapping's shape to determine the number of
# actual tokens.
# kv_cache update for different head_size K and V
triton_reshape_and_cache_flash_diffkv(
key,
value,
kv_cache,
attn_metadata.slot_mapping,
self.kv_cache_dtype,
layer._k_scale,
layer._v_scale,
)
if self.kv_cache_dtype.startswith("fp8"):
# queries are quantized in the attention layer
dtype = FlashAttentionBackend.get_fp8_dtype_for_flashattn(
self.kv_cache_dtype
)
key_cache = key_cache.view(dtype)
value_cache = value_cache.view(dtype)
if not attn_metadata.use_cascade:
cu_seqlens_q = attn_metadata.query_start_loc
seqused_k = attn_metadata.seq_lens
max_seqlen_q = attn_metadata.max_query_len
max_seqlen_k = attn_metadata.max_seq_len
block_table = attn_metadata.block_table
scheduler_metadata = attn_metadata.scheduler_metadata
descale_shape = (cu_seqlens_q.shape[0] - 1, self.num_kv_heads)
if self.dcp_world_size > 1:
self._forward_with_dcp(
query[:num_actual_tokens],
key[:num_actual_tokens],
value[:num_actual_tokens],
key_cache,
value_cache,
output[:num_actual_tokens],
attn_metadata,
q_descale=layer._q_scale.expand(descale_shape),
k_descale=layer._k_scale.expand(descale_shape),
v_descale=layer._v_scale.expand(descale_shape),
)
return output
else:
sliding_window_size = (
list(self.sliding_window)
if self.sliding_window is not None
else None
)
flash_attn_varlen_func(
q=query[:num_actual_tokens],
k=key_cache,
v=value_cache,
out=output[:num_actual_tokens],
cu_seqlens_q=cu_seqlens_q,
max_seqlen_q=max_seqlen_q,
seqused_k=seqused_k,
max_seqlen_k=max_seqlen_k,
softmax_scale=self.scale,
causal=attn_metadata.causal,
alibi_slopes=self.alibi_slopes,
window_size=sliding_window_size,
block_table=block_table,
softcap=self.logits_soft_cap,
scheduler_metadata=scheduler_metadata,
fa_version=self.vllm_flash_attn_version,
q_descale=layer._q_scale.expand(descale_shape),
k_descale=layer._k_scale.expand(descale_shape),
v_descale=layer._v_scale.expand(descale_shape),
num_splits=attn_metadata.max_num_splits,
s_aux=self.sinks,
)
return output
# Cascade attention (rare case).
cascade_attention(
output[:num_actual_tokens],
query[:num_actual_tokens],
key_cache,
value_cache,
cu_query_lens=attn_metadata.query_start_loc,
max_query_len=attn_metadata.max_query_len,
cu_prefix_query_lens=attn_metadata.cu_prefix_query_lens,
prefix_kv_lens=attn_metadata.prefix_kv_lens,
suffix_kv_lens=attn_metadata.suffix_kv_lens,
max_kv_len=attn_metadata.max_seq_len,
softmax_scale=self.scale,
alibi_slopes=self.alibi_slopes,
sliding_window=self.sliding_window,
logits_soft_cap=self.logits_soft_cap,
block_table=attn_metadata.block_table,
common_prefix_len=attn_metadata.common_prefix_len,
max_num_splits=attn_metadata.max_num_splits,
fa_version=self.vllm_flash_attn_version,
prefix_scheduler_metadata=attn_metadata.prefix_scheduler_metadata,
suffix_scheduler_metadata=attn_metadata.scheduler_metadata,
q_descale=layer._q_scale,
k_descale=layer._k_scale,
v_descale=layer._v_scale,
s_aux=self.sinks,
)
return output
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/flash_attn_diffkv.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/serve/instrumentator/offline_docs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Offline FastAPI documentation support for air-gapped environments."""
import pathlib
from fastapi import FastAPI
from fastapi.openapi.docs import (
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.staticfiles import StaticFiles
from vllm.logger import init_logger
logger = init_logger(__name__)
def attach_router(app: FastAPI) -> None:
"""Attach offline docs router if enabled via args."""
args = getattr(app.state, "args", None)
if args is None or not getattr(args, "enable_offline_docs", False):
return
static_dir = pathlib.Path(__file__).parent / "static"
if not static_dir.exists():
logger.warning(
"Static directory not found at %s. Offline docs will not be available.",
static_dir,
)
return
app.mount("/static", StaticFiles(directory=str(static_dir)), name="static")
@app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=app.openapi_url,
title=app.title + " - Swagger UI",
oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
swagger_js_url="/static/swagger-ui-bundle.js",
swagger_css_url="/static/swagger-ui.css",
)
@app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
logger.info("Offline documentation enabled with vendored static assets")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/instrumentator/offline_docs.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/isaac.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
import math
from collections.abc import Iterable, Iterator, Mapping, Sequence
from typing import Annotated, Any
import numpy as np
import PIL.Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from transformers.image_processing_utils import BatchFeature
from transformers.utils import TensorType
from typing_extensions import TypedDict, Unpack
from vllm.config import VllmConfig
from vllm.config.model import ModelConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.distributed import parallel_state
from vllm.distributed import utils as dist_utils
from vllm.model_executor.layers.attention import MMEncoderAttention
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
)
from vllm.model_executor.models.interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMRoPE,
SupportsMultiModal,
SupportsPP,
)
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.siglip import SiglipMLP
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFeatureSpec,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import ImageSize, MultiModalDataItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.sequence import IntermediateTensors
from vllm.tokenizers import get_tokenizer
from vllm.tokenizers.hf import get_cached_tokenizer
from vllm.transformers_utils.config import patch_rope_parameters
from vllm.transformers_utils.configs import (
IsaacConfig,
PixelShuffleSiglip2VisionConfig,
)
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .vision import is_vit_use_data_parallel
def create_cumulative_seq_lengths(
seq_sizes: torch.Tensor, device: torch.device
) -> tuple[torch.Tensor, torch.Tensor]:
"""Create cumulative sequence lengths for variable-length attention."""
cu_seqlens = torch.zeros(len(seq_sizes) + 1, dtype=torch.int32, device=device)
cu_seqlens[1:] = seq_sizes.cumsum(0)
max_seqlen = (
seq_sizes.max()
if len(seq_sizes) > 0
else torch.tensor(0, dtype=torch.int32, device=device)
)
return cu_seqlens, max_seqlen
class Siglip2VariableSequenceEmbeddings(nn.Module):
def __init__(self, config: PixelShuffleSiglip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.patch_size = config.patch_size
self.patch_embedding = ReplicatedLinear(
input_size=config.num_channels * self.patch_size * self.patch_size,
output_size=self.embed_dim,
return_bias=False,
)
self.num_patches = config.num_patches
self.position_embedding_size = int(self.num_patches**0.5)
self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
def positional_embeddings(
self, packed_seq_patches: tuple[torch.Tensor, torch.Tensor, torch.Tensor]
) -> torch.Tensor:
# Prepare positional embeddings grid: (1, embed_dim, h, w)
positional_embeddings = (
self.position_embedding.weight.reshape(
self.position_embedding_size, self.position_embedding_size, -1
)
.permute(2, 0, 1)
.unsqueeze(0)
)
_seq_patches, _seq_sizes, spatial_shapes = packed_seq_patches
pos_embeds_list = []
mode = "bilinear"
align_corners = False
antialias = True
for spatial_shape in spatial_shapes:
height, width = int(spatial_shape[0]), int(spatial_shape[1])
# Guard to ensure height and width are positive for torch.compile
if height > 0 and width > 0:
resized_pos_embed = F.interpolate(
positional_embeddings,
size=(height, width),
mode=mode,
align_corners=align_corners,
antialias=antialias,
)
# Reshape from (1, embed_dim, height, width) to
# (height*width, embed_dim)
resized_pos_embed = resized_pos_embed.reshape(
self.embed_dim, height * width
).transpose(0, 1)
else:
# Fallback - should never happen in practice
resized_pos_embed = positional_embeddings.reshape(
self.embed_dim,
self.position_embedding_size * self.position_embedding_size,
).transpose(0, 1)[: height * width]
pos_embeds_list.append(resized_pos_embed)
# Concatenate all positional embeddings along the sequence dimension
pos_embeds = torch.cat(pos_embeds_list, dim=0)
return pos_embeds
def forward(
self, packed_seq_patches: tuple[torch.Tensor, torch.Tensor, torch.Tensor]
):
seq_patches, _seq_sizes, _spatial_shapes = packed_seq_patches
target_weight = self.patch_embedding.weight
seq_patches = seq_patches.to(
device=target_weight.device, dtype=target_weight.dtype
)
patch_embeds = self.patch_embedding(seq_patches)
pos_embeds = self.positional_embeddings(packed_seq_patches)
# Flatten patch embeddings to match positional embeddings format
if patch_embeds.dim() == 3:
patch_embeds = patch_embeds.view(-1, patch_embeds.size(-1))
# Add positional embeddings to patch embeddings
embeddings = patch_embeds + pos_embeds
return embeddings
def create_pixel_shuffle_index_map(
seq_sizes: torch.Tensor,
token_grids: torch.Tensor,
scale_factor: int = 1,
device: torch.device | None = None,
) -> torch.Tensor:
"""
Build a gather-index map that tells us, for every *output* token after
pixel-shuffle, which `scale_factor**2` *input* tokens are being merged.
Args
----
seq_sizes : (num_images,) - #patches in each image (row-major order)
token_grids : (num_images,2) - (height, width) for every image
scale_factor : spatial down-scale factor (≥2)
device : (optional) overrides `seq_sizes.device`
Returns
-------
gather_idx : (new_total_seq_len, scale_factor**2) int64 tensor.
gather_idx[i, j] is the *flat* index into the *original*
packed sequence for the j-th sub-patch that forms the
i-th output token.
"""
if device is None:
device = seq_sizes.device
r = int(scale_factor)
if r < 2:
raise ValueError("`scale_factor` must be ≥ 2")
# Safety: all spatial dims must be divisible by r
# Cannot run under torch compile fullgraph mode hence
if not torch.compiler.is_compiling() and not (
(token_grids[:, 0] % r == 0).all() and (token_grids[:, 1] % r == 0).all()
):
raise AssertionError(
"Every (H,W) in `token_grids` must be divisible by "
f"scale_factor={r}, got {token_grids.tolist()}"
)
gather_chunks: list[torch.Tensor] = []
tok_offset = 0
for seq_len, (h, w) in zip(seq_sizes.tolist(), token_grids.tolist(), strict=False):
# Build the (H, W) grid of flat indices for this image
grid = torch.arange(seq_len, device=device, dtype=torch.int64) + tok_offset
grid = grid.view(h, w) # (H, W)
# -------- identical ordering to your fixed-res routine --------
# Step 1: split width into blocks of r
grid = grid.view(h, w // r, r) # (H, W/r, r)
# Step 2: now split height into blocks of r
grid = grid.view(h // r, r, w // r, r) # (H/r, r, W/r, r)
# Step 3: final permutation to (H/r, W/r, r, r)
grid = grid.permute(0, 2, 1, 3).contiguous() # (H/r, W/r, r, r)
# Step 4: each (r, r) block forms one output token
gather_chunks.append(grid.reshape(-1, r * r)) # (H*W / r², r²)
tok_offset += seq_len
# Concatenate over all images in the packed batch
gather_idx = torch.cat(gather_chunks, dim=0) # (Σ_i HᵢWᵢ/r², r²)
return gather_idx
def pixel_shuffle_varlen(
x: torch.Tensor,
token_grids: torch.Tensor,
scale_factor: int = 1,
) -> torch.Tensor:
r"""Apply pixel shuffle to a packed vision sequence without unpacking per image.
Args:
x (`torch.Tensor`):
Concatenated vision embeddings. Accepts `(seq_len, hidden_size)` or
`(1, seq_len, hidden_size)` shapes produced by stacking image
patches.
token_grids (`torch.Tensor`):
Integer tensor of shape `(num_images, 2)` whose rows give the
`(height, width)` patch grid sizes corresponding to each image
segment inside `x`.
scale_factor (`int`, *optional*, defaults to 1):
Spatial down-sampling factor specific to pixel shuffle. Values
greater than one merge `scale_factor**2` neighboring patches into a
single embedding channel-group.
Returns:
`torch.Tensor`: Pixel-shuffled embeddings with shape matching the input
convention: `(seq_len, hidden_size * scale_factor**2)` when the input
was 2D, or `(1, seq_len, hidden_size * scale_factor**2)` if the
singleton batch dimension was present.
Raises:
ValueError: If more than one batch item is provided.
"""
keep_batch_dim = x.dim() == 3
if keep_batch_dim:
if x.size(0) != 1:
raise AssertionError("Packed sequence is expected to have batch_size == 1")
x_ = x.squeeze(0) # (seq, embed)
else:
x_ = x # (seq, embed)
embed_dim = x_.size(-1)
r = int(scale_factor)
# Calculate seq_sizes from token_grids
seq_sizes = torch.prod(token_grids, dim=-1)
# Build index map and gather in one go
gather_idx = create_pixel_shuffle_index_map(
seq_sizes=seq_sizes,
token_grids=token_grids,
scale_factor=r,
device=x_.device,
) # (new_seq, r²)
# Gather → (new_seq, r², embed_dim)
gathered = x_[gather_idx] # fancy indexing keeps gradient
# Merge the r² group dimension into channels to finish the shuffle
out = gathered.reshape(gathered.size(0), embed_dim * r * r)
# Restore batch dimension if needed
if keep_batch_dim:
out = out.unsqueeze(0)
return out
# ============================================================================
# Configuration
# ============================================================================
MAX_PIXELS = 60_000_000 # 60-megapixel ceiling ≈ 8200 × 7300 px
# Vision preprocessing constants
VISION_MEAN = (0.5, 0.5, 0.5)
VISION_STD = (0.5, 0.5, 0.5)
VISION_SCALE = 1 / 255
def _make_writeable(arr: np.ndarray) -> np.ndarray:
"""Return *arr* itself if it is already writeable, otherwise try to flip the
write flag in-place and finally fall back to `arr.copy()`.
This guarantees the buffer handed to `torch.from_numpy()` is always
writeable, silencing the PyTorch warning about undefined behaviour.
"""
if arr.flags.writeable:
return arr
# First, try the cheap path — in-place flag toggle (works for mmap'd arrays
# and some shared memory buffers):
try:
arr.setflags(write=True)
return arr # success: no data copy
except ValueError:
# Buffer is inherently read-only (e.g. backed by PyAV / PIL): make copy
return arr.copy()
def extract_image_pil(image: PIL.Image.Image) -> torch.Tensor | None:
if image.width * image.height > MAX_PIXELS:
raise ValueError(
f"Image (w={image.width}, h={image.height}) > MAX=`{MAX_PIXELS}`"
)
img = image if image.mode == "RGB" else image.convert("RGB")
arr = np.asarray(img)
arr = _make_writeable(arr)
return torch.from_numpy(arr)
def get_image_size_for_max_num_patches(
image_height: int,
image_width: int,
patch_size: int,
max_num_patches: int,
min_num_patches: int | None = None,
eps: float = 1e-5,
pixel_shuffle_scale: int = 1,
) -> tuple[int, int]:
r"""Compute a target resolution whose patch grid satisfies patching parametrization.
Args:
image_height (`int`):
Height in pixels of the source image prior to any resizing.
image_width (`int`):
Width in pixels of the source image prior to any resizing.
patch_size (`int`):
Size of the square patch used by the vision encoder.
max_num_patches (`int`):
Upper bound on `(height / patch_size) * (width / patch_size)` after
resizing.
min_num_patches (`int`, *optional*):
Lower bound on the number of patches. When provided the image will
be scaled up if necessary.
eps (`float`, *optional*, defaults to 1e-5):
Convergence tolerance for the internal binary search to determine
the target dimensions.
pixel_shuffle_scale (`int`, *optional*, defaults to 1):
Additional stride multiplier applied when pixel shuffle later
reduces spatial resolution.
Returns:
`tuple[int, int]`: Height and width (in pixels) that are multiples of
`patch_size * pixel_shuffle_scale` and respect both the maximum and
optional minimum patch-count constraints.
"""
def get_scaled_image_size(scale, original_size, patch_size, pixel_shuffle_scale):
scaled_size = scale * original_size
divisor = patch_size * pixel_shuffle_scale
scaled_size = math.ceil(scaled_size / divisor) * divisor
scaled_size = max(divisor, scaled_size)
return int(scaled_size)
# Ensure divisibility
divisor = patch_size * pixel_shuffle_scale
adjusted_height = math.ceil(image_height / divisor) * divisor
adjusted_height = max(divisor, adjusted_height)
adjusted_width = math.ceil(image_width / divisor) * divisor
adjusted_width = max(divisor, adjusted_width)
num_patches = (adjusted_height / patch_size) * (adjusted_width / patch_size)
if min_num_patches is not None and num_patches < min_num_patches:
# Scale up
scale_min, scale_max = 1.0, 100.0
while (scale_max - scale_min) >= eps:
scale = (scale_min + scale_max) / 2
target_height = get_scaled_image_size(
scale, image_height, patch_size, pixel_shuffle_scale
)
target_width = get_scaled_image_size(
scale, image_width, patch_size, pixel_shuffle_scale
)
num_patches = (target_height / patch_size) * (target_width / patch_size)
if num_patches >= min_num_patches:
scale_max = scale
else:
scale_min = scale
scale = scale_max
target_height = get_scaled_image_size(
scale, image_height, patch_size, pixel_shuffle_scale
)
target_width = get_scaled_image_size(
scale, image_width, patch_size, pixel_shuffle_scale
)
return target_height, target_width
elif num_patches <= max_num_patches:
return adjusted_height, adjusted_width
else:
# Scale down
scale_min, scale_max = eps / 10, 1.0
while (scale_max - scale_min) >= eps:
scale = (scale_min + scale_max) / 2
target_height = get_scaled_image_size(
scale, image_height, patch_size, pixel_shuffle_scale
)
target_width = get_scaled_image_size(
scale, image_width, patch_size, pixel_shuffle_scale
)
num_patches = (target_height / patch_size) * (target_width / patch_size)
if num_patches <= max_num_patches:
scale_min = scale
else:
scale_max = scale
scale = scale_min
target_height = get_scaled_image_size(
scale, image_height, patch_size, pixel_shuffle_scale
)
target_width = get_scaled_image_size(
scale, image_width, patch_size, pixel_shuffle_scale
)
return target_height, target_width
_MEAN_TENSOR = torch.tensor(VISION_MEAN, dtype=torch.float32).view(1, 1, 1, -1)
_STD_TENSOR = torch.tensor(VISION_STD, dtype=torch.float32).view(1, 1, 1, -1)
def _resolve_vision_token_id(model_config: ModelConfig, vision_token: str) -> int:
tokenizer_name = model_config.tokenizer or model_config.model
tokenizer = get_cached_tokenizer(
get_tokenizer(
tokenizer_name,
tokenizer_mode=model_config.tokenizer_mode,
trust_remote_code=model_config.trust_remote_code,
revision=model_config.tokenizer_revision or model_config.revision,
)
)
return tokenizer.encode(vision_token, add_special_tokens=False)[0]
def prepare_image_tensor(
image: torch.Tensor,
scale: float = VISION_SCALE,
) -> torch.Tensor:
r"""Standardize RGB images prior to patch extraction via rescaling and whitening.
Args:
image (`torch.Tensor`):
Tensor with shape `(..., height, width, 3)` containing RGB values.
The tensor is converted to floating point if needed.
scale (`float`, *optional*, defaults to `VISION_SCALE`):
Scalar multiplier applied before normalization.
Returns:
`torch.Tensor`: Normalized tensor with the same shape as the input and
dtype `torch.float32`.
"""
if not torch.is_floating_point(image):
image = image.float()
rescaled = image * scale
# Use precomputed tensors and move to the correct device if needed
mean_tensor = _MEAN_TENSOR.to(image.device)
std_tensor = _STD_TENSOR.to(image.device)
normalized = (rescaled - mean_tensor) / std_tensor
return normalized
def patchify_vision(image: torch.Tensor, patch_size: int) -> torch.Tensor:
r"""Convert normalized images into flattened ViT-style patches.
Args:
image (`torch.Tensor`):
Tensor of shape `(num_images, height, width, channels)`.
patch_size (`int`):
Edge length of the square patches
Returns:
`torch.Tensor`:
Patch tensor where each position stores the flattened pixels
belonging to that patch.
Raises:
ValueError: If `height` or `width` is not divisible by `patch_size`.
"""
num_images, height, width, channels = image.shape
if height % patch_size or width % patch_size:
raise ValueError(
"Dimensions of images "
f"{image.shape} are not divisible by patch_size={patch_size}."
)
patches = image.reshape(
num_images,
height // patch_size,
patch_size,
width // patch_size,
patch_size,
channels,
)
patches = patches.permute(0, 1, 3, 2, 4, 5)
patches = patches.reshape(
num_images,
height // patch_size,
width // patch_size,
channels * patch_size * patch_size,
)
return patches
def process_vision_for_patches(
images: torch.Tensor,
patch_size: int,
max_num_patches: int,
min_num_patches: int | None = None,
pixel_shuffle_scale: int = 1,
) -> tuple[torch.Tensor, list[int]]:
r"""Resize, normalize, and patchify RGB images for the vision encoder.
Args:
images (`torch.Tensor`):
Either `(height, width, channels)` for a single image or
`(num_images, height, width, channels)` for a batch. Channels are
expected to be RGB.
patch_size (`int`):
Edge length of square patches; implicitly controls resize grid granularity.
max_num_patches (`int`):
Maximum number of patches allowed after resizing.
min_num_patches (`int`, *optional*):
Minimum number of patches. If provided, the routine upsamples images
as needed to satisfy the lower bound.
pixel_shuffle_scale (`int`, *optional*, defaults to 1):
Pixel shuffle scale factor; influences the target grid that the
function produces.
Returns:
`tuple[torch.Tensor, list[int]]`: A pair `(patches, dims_virtual)`
where `patches` has shape `(num_images, target_h / patch_size, target_w
/ patch_size, channels * patch_size**2)` and `dims_virtual` encodes
effective `(images, height, width)` dimensions after optional pixel
shuffling.
"""
# Add batch dim if single image
if images.dim() == 3:
images = images.unsqueeze(0)
# Permute to channel first for resize
images = images.permute(0, 3, 1, 2)
# Get target dimensions
_, _, orig_height, orig_width = images.shape
target_height, target_width = get_image_size_for_max_num_patches(
orig_height,
orig_width,
patch_size,
max_num_patches,
min_num_patches=min_num_patches,
pixel_shuffle_scale=pixel_shuffle_scale,
)
# Resize
images = F.interpolate(
images,
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
# Back to channel last
images = images.permute(0, 2, 3, 1)
# Normalize
images = prepare_image_tensor(images)
# Patchify
patches = patchify_vision(images, patch_size=patch_size)
# Calculate dimensions for the patches
n_images, h_patches, w_patches, _ = patches.shape
dims_virtual = (
[1, h_patches, w_patches]
if pixel_shuffle_scale == 1
else [1, h_patches // pixel_shuffle_scale, w_patches // pixel_shuffle_scale]
)
return patches, dims_virtual
class IsaacImageProcessorKwargs(TypedDict, total=False):
patch_size: int
max_num_patches: int
min_num_patches: int
pixel_shuffle_scale: int
class IsaacImageProcessor:
patch_size = 16
max_num_patches = 6144
min_num_patches = 256
pixel_shuffle_scale = 2
valid_kwargs = IsaacImageProcessorKwargs
model_input_names = ["pixel_values", "image_grid_thw"]
def __init__(self, kwargs):
self.patch_size = kwargs.pop("patch_size", self.patch_size)
self.vision_max_num_patches = kwargs.pop(
"vision_max_num_patches", self.max_num_patches
)
self.vision_min_num_patches = kwargs.pop(
"vision_min_num_patches", self.min_num_patches
)
self.pixel_shuffle_scale = kwargs.pop("pixel_shuffle_scale", 2)
def preprocess(
self,
images: list[torch.Tensor],
return_tensors: str | TensorType | None,
**kwargs: Unpack[IsaacImageProcessorKwargs],
) -> BatchFeature:
"""Preprocess images into format compatibile with vLLM input processing."""
all_pixel_values: list[torch.Tensor] = []
all_image_grids: list[torch.Tensor] = []
for image in images:
image_tensor = extract_image_pil(image)
patches, dims_virtual = process_vision_for_patches(
image_tensor,
patch_size=self.patch_size,
max_num_patches=self.vision_max_num_patches,
min_num_patches=self.vision_min_num_patches,
pixel_shuffle_scale=self.pixel_shuffle_scale,
)
# Isaac packs a dummy temporal dim for images
patches = patches.unsqueeze(1) # [N, T=1, Hp, Wp, D]
hp, wp, dim = patches.shape[-3], patches.shape[-2], patches.shape[-1]
current_num_patches = hp * wp
pixel_values = patches.reshape(current_num_patches, dim) # [N_tokens, D]
# Use real patch dimensions for image_grid_thw, not virtual dimensions
# This ensures the vision model receives correct grid info for pixel shuffle
dims_real = [1, hp, wp] # Real patch dimensions
image_grid_thw = torch.tensor(dims_real).unsqueeze(0)
all_pixel_values.append(pixel_values)
all_image_grids.append(image_grid_thw)
if all_pixel_values:
final_pixel_values = torch.cat(all_pixel_values, dim=0)
final_image_grids = torch.cat(all_image_grids, dim=0)
else:
final_pixel_values = torch.empty(0, 0)
final_image_grids = torch.empty(0, 3)
return BatchFeature(
data={
"pixel_values": final_pixel_values,
"image_grid_thw": final_image_grids,
},
tensor_type=return_tensors,
)
class IsaacProcessor:
"""Processor wrapper (tokenizer + IsaacImageProcessor)."""
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
self.image_token = kwargs.pop("image_token", "<image>")
self.image_processor = image_processor or IsaacImageProcessor(kwargs)
self.tokenizer = tokenizer
def __call__(self, text=None, images=None, **kwargs) -> BatchFeature:
result = {}
if images is not None:
image_inputs = self.image_processor.preprocess(images, **kwargs)
image_grid_thw = image_inputs["image_grid_thw"]
result.update(image_inputs)
if text is not None:
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
merge_length = self.image_processor.pixel_shuffle_scale**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(
self.image_token, "<|placeholder|>" * num_image_tokens, 1
)
index += 1
text[i] = text[i].replace("<|placeholder|>", "<|image_pad|>")
if text is not None:
result.update(self.tokenizer(text, **kwargs))
return BatchFeature(result)
def apply_chat_template(
self,
messages: list[dict[str, Any]],
tokenize: bool = False,
add_generation_prompt: bool = False,
**kwargs,
) -> Any:
# Convert mixed content messages to simple text format
processed_messages = []
for message in messages:
if "content" in message and isinstance(message["content"], list):
# Handle mixed content (text + image)
text_parts = []
for content_item in message["content"]:
if content_item.get("type") == "text":
text_parts.append(content_item.get("text", ""))
elif content_item.get("type") == "image":
# Replace image with vision token
text_parts.append(self.image_token)
processed_message = {
"role": message.get("role", "user"),
"content": "".join(text_parts),
}
processed_messages.append(processed_message)
else:
# Regular text message
processed_messages.append(message)
kwargs["return_dict"] = False
return self.tokenizer.apply_chat_template(
processed_messages,
tokenize=tokenize,
add_generation_prompt=add_generation_prompt,
**kwargs,
)
class IsaacProcessingInfo(BaseProcessingInfo):
def get_hf_config(self) -> IsaacConfig:
if hasattr(self.ctx, "get_hf_config"):
original_config = self.ctx.get_hf_config()
# Map HF config parameters to our vLLM config parameters
return IsaacConfig(
# Vision parameters - map from HF names
vision_config=getattr(original_config, "vision_config", None),
vision_patch_size=getattr(original_config, "video_patch_size", 16),
vision_max_num_patches=getattr(
original_config, "vision_max_num_patches", 256
),
vision_min_num_patches=getattr(
original_config, "vision_min_num_patches", None
),
pixel_shuffle_scale=getattr(original_config, "pixel_shuffle_scale", 1),
max_sequence_length=getattr(
original_config, "max_sequence_length", 16384
),
vision_token=getattr(original_config, "vision_token", "<image>"),
vision_attn_implementation=getattr(
original_config, "vision_attn_implementation", None
),
)
return IsaacConfig()
def get_hf_processor(self, **kwargs) -> IsaacProcessor:
hf_config = self.get_hf_config()
processor_kwargs = {
"image_token": hf_config.vision_token,
}
processor_kwargs.update(kwargs)
return self.ctx.get_hf_processor(IsaacProcessor, **processor_kwargs)
def get_tokenizer(self):
return self.ctx.tokenizer
def get_image_size_with_most_features(self) -> ImageSize:
hf_config = self.get_hf_config()
# Get target dimensions
target_height, target_width = get_image_size_for_max_num_patches(
9999999,
9999999,
hf_config.video_patch_size,
hf_config.vision_max_num_patches,
min_num_patches=hf_config.vision_min_num_patches,
pixel_shuffle_scale=hf_config.pixel_shuffle_scale,
)
return ImageSize(width=target_width, height=target_height)
def get_image_processor(self, **kwargs) -> IsaacImageProcessor:
return self.get_hf_processor(**kwargs).image_processor
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int]:
hf_config = self.get_hf_config()
num_vision_tokens = hf_config.vision_max_num_patches // (
hf_config.pixel_shuffle_scale**2
)
return {"image": num_vision_tokens}
class IsaacDummyInputsBuilder(BaseDummyInputsBuilder[IsaacProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
hf_processor = self.info.get_hf_processor()
image_token: str = hf_processor.image_token
return image_token * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
target_width, target_height = self.info.get_image_size_with_most_features()
image_overrides = mm_options.get("image")
return {
"image": self._get_dummy_images(
width=target_width,
height=target_height,
num_images=num_images,
overrides=image_overrides,
),
}
class IsaacImagePixelInputs(TensorSchema):
"""
Schema for validating Isaac image inputs.
Dimensions:
- np: Number of patches
- d: Patch dimension
- ni: Number of images
The schema enforces:
- pixel_values must be 2D: (num_patches, patch_dim)
- image_grid_thw must be 2D: (num_images, 3)
where 3 represents [T, H, W]
"""
pixel_values: Annotated[
torch.Tensor,
TensorShape("np", "d"),
]
image_grid_thw: Annotated[
torch.Tensor,
TensorShape("ni", 3),
]
class IsaacMultiModalProcessor(BaseMultiModalProcessor):
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
# Configure multimodal fields for Isaac model
image_grid_thw = hf_inputs.get("image_grid_thw", torch.empty((0, 3)))
image_grid_sizes = image_grid_thw.prod(-1)
return {
"pixel_values": MultiModalFieldConfig.flat_from_sizes(
"image", image_grid_sizes
),
"image_grid_thw": MultiModalFieldConfig.batched("image"),
}
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, Any],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
image_processor = self.info.get_image_processor(**hf_processor_mm_kwargs)
pixel_shuffle_scale = getattr(image_processor, "pixel_shuffle_scale", 2)
merge_length = pixel_shuffle_scale**2
def get_replacement_isaac(item_idx: int):
out_item = out_mm_kwargs["image"][item_idx]
grid_thw = out_item["image_grid_thw"].data
assert isinstance(grid_thw, torch.Tensor)
feature_size = int(grid_thw.prod()) // merge_length
repl_full = "<|image_pad|>" * feature_size
return PromptUpdateDetails.select_text(repl_full, "<|image_pad|>")
return [
PromptReplacement(
modality="image",
target="<image>",
replacement=get_replacement_isaac,
)
]
class Siglip2VisionAttention(nn.Module):
def __init__(
self,
config: PixelShuffleSiglip2VisionConfig,
quant_config: QuantizationConfig | None = None,
*,
prefix: str = "",
) -> None:
super().__init__()
use_data_parallel = is_vit_use_data_parallel()
self.tp_size = (
1
if use_data_parallel
else parallel_state.get_tensor_model_parallel_world_size()
)
self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
self.hidden_size_per_attention_head = dist_utils.divide(
config.hidden_size, config.num_attention_heads
)
self.num_attention_heads_per_partition = dist_utils.divide(
config.num_attention_heads, self.tp_size
)
self.qkv_proj = QKVParallelLinear(
hidden_size=config.hidden_size,
head_size=self.hidden_size_per_attention_head,
total_num_heads=config.num_attention_heads,
total_num_kv_heads=config.num_attention_heads,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
disable_tp=use_data_parallel,
)
self.out_proj = RowParallelLinear(
input_size=config.hidden_size,
output_size=config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.out_proj",
disable_tp=use_data_parallel,
)
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
prefix=f"{prefix}.attn",
)
def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
seq_len, bs, _ = qkv.shape
q, k, v = qkv.chunk(3, dim=2)
new_shape = (
seq_len,
bs,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
q, k, v = (x.view(*new_shape) for x in (q, k, v))
return q, k, v
def forward(
self,
hidden_states: torch.Tensor,
*,
cu_seqlens: torch.Tensor,
max_seqlen: torch.Tensor | None,
) -> torch.Tensor:
batch_size, _, _ = hidden_states.shape
if batch_size != 1:
raise ValueError("packed variable-length attention expects batch_size=1")
x = rearrange(hidden_states, "b s d -> s b d")
x, _ = self.qkv_proj(x)
q, k, v = self.split_qkv(x)
q, k, v = (rearrange(t, "s b h d -> b s h d") for t in (q, k, v))
context_layer = self.attn(
query=q,
key=k,
value=v,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
context_layer = rearrange(context_layer, "b s h d -> s b (h d)").contiguous()
output, _ = self.out_proj(context_layer)
output = rearrange(output, "s b d -> b s d")
return output
class Siglip2EncoderLayer(nn.Module):
def __init__(
self,
config: PixelShuffleSiglip2VisionConfig,
quant_config: QuantizationConfig | None = None,
*,
prefix: str = "",
) -> None:
super().__init__()
self.embed_dim = config.hidden_size
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.self_attn = Siglip2VisionAttention(
config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = SiglipMLP(
config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
def forward(
self,
hidden_states: torch.Tensor,
*,
cu_seqlens: torch.Tensor,
max_seqlen: torch.Tensor | None,
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.self_attn(
hidden_states=hidden_states,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class Siglip2Encoder(nn.Module):
def __init__(
self,
config: PixelShuffleSiglip2VisionConfig,
quant_config: QuantizationConfig | None = None,
*,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
self.layers = nn.ModuleList(
[
Siglip2EncoderLayer(
config,
quant_config=quant_config,
prefix=f"{prefix}.layers.{layer_idx}",
)
for layer_idx in range(config.num_hidden_layers)
]
)
def forward(
self,
inputs_embeds: torch.Tensor,
*,
cu_seqlens: torch.Tensor | None = None,
max_seqlen: torch.Tensor | None = None,
) -> torch.Tensor:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
return hidden_states
class Siglip2VisionTransformer(nn.Module):
def __init__(
self,
config: PixelShuffleSiglip2VisionConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.config = config
self.quant_config = quant_config
embed_dim = config.hidden_size
self.embeddings = Siglip2VariableSequenceEmbeddings(config)
self.pixel_shuffle_scale_factor = config.pixel_shuffle_scale_factor
self.encoder = Siglip2Encoder(
config,
quant_config=quant_config,
prefix=f"{prefix}.encoder",
)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def forward(
self,
packed_seq_patches: tuple[torch.Tensor, torch.Tensor],
) -> torch.Tensor:
r"""
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
Tensor containing the spatial dimensions (height, width)
of the input images.
"""
seq_patches, token_grids = packed_seq_patches
seq_sizes = torch.prod(token_grids, dim=-1)
# Get embeddings from packed sequence
hidden_states = self.embeddings((seq_patches, seq_sizes, token_grids))
# Add a pseudo batch dimension for the encoder
hidden_states = hidden_states.unsqueeze(0)
cu_seqlens, max_seqlen = create_cumulative_seq_lengths(
seq_sizes, hidden_states.device
)
hidden_states = self.encoder(
inputs_embeds=hidden_states,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
hidden_states = self.post_layernorm(hidden_states)
if self.pixel_shuffle_scale_factor > 1:
hidden_states = pixel_shuffle_varlen(
x=hidden_states,
token_grids=token_grids,
scale_factor=self.pixel_shuffle_scale_factor,
)
# Remove the pseudo batch dimension we added earlier
hidden_states = hidden_states.squeeze(0)
# return last_hidden_state
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class IsaacVisionEmbedding(nn.Module):
def __init__(
self,
vision_cfg: PixelShuffleSiglip2VisionConfig,
hidden_dim: int,
output_dim: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.transformer = Siglip2VisionTransformer(
vision_cfg,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "0"),
)
self.linear_fc1 = ColumnParallelLinear(
hidden_dim,
4 * hidden_dim,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "1"),
return_bias=False,
)
self.act = nn.SiLU()
self.linear_fc2 = RowParallelLinear(
4 * hidden_dim,
output_dim,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "3"),
return_bias=False,
)
def forward(
self, packed_seq_patches: tuple[torch.Tensor, torch.Tensor]
) -> torch.Tensor:
hidden_states = self.transformer(packed_seq_patches)
hidden_states = self.linear_fc1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_fc2(hidden_states)
return hidden_states
@MULTIMODAL_REGISTRY.register_processor(
IsaacMultiModalProcessor,
info=IsaacProcessingInfo,
dummy_inputs=IsaacDummyInputsBuilder,
)
class IsaacForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP, SupportsMRoPE
):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
supports_encoder_tp_data = True
# To ensure correct weight loading and mapping.
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"lm_head.": "language_model.lm_head.",
"model.text_model.lm_head.": "language_model.lm_head.",
"model.text_model.": "language_model.model.",
"model.vision_embedding.0": "vision_embedding.transformer",
"model.vision_embedding.1": "vision_embedding.linear_fc1",
"model.vision_embedding.2": "vision_embedding.act",
"model.vision_embedding.3": "vision_embedding.linear_fc2",
"model.vision_embedding.": "vision_embedding.",
"model.lm_head.": "language_model.lm_head.",
"model.": "language_model.model.",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
raise ValueError("Only image modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
super().__init__()
config: IsaacConfig = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
head_dim = config.head_dim
calculated_mrope_section = [
head_dim // 4, # 2x more for temporal dim
head_dim // 8,
head_dim // 8,
]
self.vision_token_id = _resolve_vision_token_id(
vllm_config.model_config, config.vision_token
)
config.image_token_id = self.vision_token_id
text_cfg = getattr(config, "text_config", None)
target_cfg = (
text_cfg
if text_cfg is not None and not isinstance(text_cfg, dict)
else config
)
rope_scaling = getattr(target_cfg, "rope_scaling", None)
if rope_scaling is None and target_cfg is config:
rope_scaling = getattr(config, "_rope_scaling", None)
patch_rope_parameters(target_cfg)
rope_parameters = target_cfg.rope_parameters
rope_parameters["mrope_section"] = calculated_mrope_section
if rope_scaling is not None and "mrope_interleaved" in rope_scaling:
rope_parameters.setdefault(
"mrope_interleaved", rope_scaling["mrope_interleaved"]
)
target_cfg.rope_parameters = rope_parameters
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
architectures=["Qwen3ForCausalLM"],
prefix=maybe_prefix(prefix, "language_model"),
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
vision_cfg = config.vision_config
if vision_cfg is None:
raise ValueError("IsaacConfig should always have vision_config")
attn_impl = (
config.vision_attn_implementation
if config.vision_attn_implementation is not None
else getattr(config, "_attn_implementation", None)
)
if attn_impl is not None:
vision_cfg._attn_implementation = attn_impl
hidden_dim = vision_cfg.hidden_size * (vision_cfg.pixel_shuffle_scale_factor**2)
with self._mark_tower_model(vllm_config, "image"):
self.vision_embedding = IsaacVisionEmbedding(
vision_cfg=vision_cfg,
hidden_dim=hidden_dim,
output_dim=config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vision_embedding"),
)
def iter_mm_grid_hw(
self, input_tokens: list[int], mm_features: list[MultiModalFeatureSpec]
) -> Iterator[tuple[int, int, int]]:
spatial_merge_size = self.config.vision_config.pixel_shuffle_scale_factor
for mm_feature in sorted(mm_features, key=lambda f: f.mm_position.offset):
offset = mm_feature.mm_position.offset
if mm_feature.modality == "image":
t, h, w = mm_feature.data["image_grid_thw"].data.tolist()
assert t == 1, f"Image must have 1 frame, got {t}"
yield offset, h // spatial_merge_size, w // spatial_merge_size
else:
raise ValueError(f"Unsupported modality: {mm_feature.modality}")
def get_mrope_input_positions(
self,
input_tokens: list[int],
mm_features: list[MultiModalFeatureSpec],
) -> tuple[torch.Tensor, int]:
llm_pos_ids_list = []
st = 0
for offset, llm_grid_h, llm_grid_w in self.iter_mm_grid_hw(
input_tokens, mm_features
):
text_len = offset - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(
np.broadcast_to(np.arange(text_len), (3, text_len)) + st_idx
)
grid_indices = np.indices((1, llm_grid_h, llm_grid_w)).reshape(3, -1)
grid_indices[0, :] = grid_indices[0, :] + text_len + st_idx
llm_pos_ids_list.append(grid_indices)
st = offset + llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1][0, -1] + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(
np.broadcast_to(np.arange(text_len), (3, text_len)) + st_idx
)
llm_positions = np.concatenate(llm_pos_ids_list, axis=1).reshape(3, -1)
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
return torch.from_numpy(llm_positions), mrope_position_delta
def _parse_and_validate_image_input(
self, **kwargs: object
) -> IsaacImagePixelInputs | None:
pixel_values = kwargs.get("pixel_values")
image_grid_thw = kwargs.get("image_grid_thw")
if pixel_values is None or image_grid_thw is None:
return None
# TensorSchema will automatically validate shapes on initialization
return IsaacImagePixelInputs(
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
def _process_image_input(
self,
image_input: IsaacImagePixelInputs,
) -> tuple[torch.Tensor, ...]:
pixel_values = image_input["pixel_values"]
image_grid_thw = image_input["image_grid_thw"]
if pixel_values.numel() == 0:
return ()
device = next(self.language_model.parameters()).device
dtype = self.vision_embedding.linear_fc1.weight.dtype
pixel_values = pixel_values.to(device=device, dtype=dtype)
spatial_grids = image_grid_thw[:, 1:3].to(device, dtype=torch.int32)
vision_embeddings = self.vision_embedding((pixel_values, spatial_grids))
merge_size = self.config.vision_config.pixel_shuffle_scale_factor
sizes = spatial_grids.prod(-1) // (merge_size * merge_size)
return tuple(vision_embeddings.split(sizes.tolist()))
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return ()
return self._process_image_input(image_input)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
return self.language_model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
**kwargs,
)
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="vision_embedding.linear_fc2", # The final linear layer
tower_model="vision_embedding",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/isaac.py",
"license": "Apache License 2.0",
"lines": 1276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/isaac.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
from transformers import Qwen3Config
from transformers.models.siglip2.configuration_siglip2 import Siglip2VisionConfig
class PixelShuffleSiglip2VisionConfig(Siglip2VisionConfig):
"""Vision configuration for Isaac with Pixel Shuffle support.
Extends Siglip2VisionConfig with additional fields for pixel shuffle.
"""
model_type = "pixel_shuffle_siglip2"
base_config_key = "vision_config"
def __init__(
self,
pixel_shuffle_scale_factor: int = 1,
num_patches: int = 256,
**kwargs,
):
super().__init__(**kwargs)
# Add our custom fields
self.pixel_shuffle_scale_factor = pixel_shuffle_scale_factor
self.num_patches = num_patches
class IsaacConfig(Qwen3Config):
"""Configuration class for Isaac multimodal model."""
model_type = "isaac"
sub_configs = {
"vision_config": PixelShuffleSiglip2VisionConfig,
"text_config": Qwen3Config,
}
def __init__(
self,
text_config=None,
vision_config=None,
vision_patch_size: int = 16,
vision_max_num_patches: int = 256,
vision_min_num_patches: int | None = None,
pixel_shuffle_scale: int = 1,
max_sequence_length: int = 16384,
vision_token: str = "<image>",
vision_attn_implementation: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
if isinstance(text_config, dict):
# from HF config
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
# For BC use all kwargs to init text config.
self.text_config = self.sub_configs["text_config"](**kwargs)
else:
# from Qwen3Config
self.text_config = text_config
# EventStreamProcessor parameters (for backward compatibility)
self.video_patch_size = vision_patch_size
self.vision_max_num_patches = vision_max_num_patches
self.vision_min_num_patches = vision_min_num_patches
self.pixel_shuffle_scale = pixel_shuffle_scale
# Processing parameters
self.max_sequence_length = max_sequence_length
self.vision_token = vision_token
# Handle vision config - PixelShuffleSiglip2VisionConfig instance
if isinstance(vision_config, dict):
self.vision_config = PixelShuffleSiglip2VisionConfig(**vision_config)
elif vision_config is None:
self.vision_config = PixelShuffleSiglip2VisionConfig()
else:
self.vision_config = vision_config
# Ensure compatibility with pretrained checkpoints
self.vision_config.pixel_shuffle_scale_factor = getattr(
self.vision_config,
"pixel_shuffle_scale_factor",
pixel_shuffle_scale,
)
self.vision_config.num_patches = getattr(
self.vision_config,
"num_patches",
vision_max_num_patches,
)
self.vision_attn_implementation = vision_attn_implementation
__all__ = [
"IsaacConfig",
"PixelShuffleSiglip2VisionConfig",
]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/isaac.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/tool_parsers/test_functiongemma_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import MagicMock
import pytest
from vllm.entrypoints.openai.chat_completion.protocol import ChatCompletionRequest
from vllm.tool_parsers.functiongemma_tool_parser import FunctionGemmaToolParser
@pytest.fixture
def mock_tokenizer():
tokenizer = MagicMock()
tokenizer.encode.return_value = [1, 2, 3]
tokenizer.get_vocab.return_value = {}
return tokenizer
@pytest.fixture
def parser(mock_tokenizer):
return FunctionGemmaToolParser(mock_tokenizer)
@pytest.fixture
def mock_request():
request = MagicMock(spec=ChatCompletionRequest)
request.tools = []
request.tool_choice = "auto"
return request
class TestExtractToolCalls:
def test_no_tool_calls(self, parser, mock_request):
model_output = "Hello, how can I help you today?"
result = parser.extract_tool_calls(model_output, mock_request)
assert result.tools_called is False
assert result.tool_calls == []
assert result.content == model_output
def test_single_tool_call(self, parser, mock_request):
model_output = (
"<start_function_call>call:get_weather{location:<escape>London<escape>}"
"<end_function_call>"
)
result = parser.extract_tool_calls(model_output, mock_request)
assert result.tools_called is True
assert len(result.tool_calls) == 1
assert result.tool_calls[0].function.name == "get_weather"
assert '"location": "London"' in result.tool_calls[0].function.arguments
def test_multiple_arguments(self, parser, mock_request):
model_output = (
"<start_function_call>call:get_weather{"
"location:<escape>San Francisco<escape>,"
"unit:<escape>celsius<escape>}"
"<end_function_call>"
)
result = parser.extract_tool_calls(model_output, mock_request)
assert result.tools_called is True
assert len(result.tool_calls) == 1
assert result.tool_calls[0].function.name == "get_weather"
args = result.tool_calls[0].function.arguments
assert "San Francisco" in args
assert "celsius" in args
def test_text_before_tool_call(self, parser, mock_request):
model_output = (
"Let me check the weather for you. "
"<start_function_call>call:get_weather{location:<escape>Paris<escape>}"
"<end_function_call>"
)
result = parser.extract_tool_calls(model_output, mock_request)
assert result.tools_called is True
assert result.content == "Let me check the weather for you."
def test_multiple_tool_calls(self, parser, mock_request):
model_output = (
"<start_function_call>call:get_weather{location:<escape>London<escape>}"
"<end_function_call>"
"<start_function_call>call:get_time{timezone:<escape>UTC<escape>}"
"<end_function_call>"
)
result = parser.extract_tool_calls(model_output, mock_request)
assert result.tools_called is True
assert len(result.tool_calls) == 2
assert result.tool_calls[0].function.name == "get_weather"
assert result.tool_calls[1].function.name == "get_time"
class TestParseArguments:
def test_empty_arguments(self, parser):
result = parser._parse_arguments("")
assert result == {}
def test_single_string_argument(self, parser):
result = parser._parse_arguments("city:<escape>Tokyo<escape>")
assert result == {"city": "Tokyo"}
def test_multiple_arguments(self, parser):
args_str = "city:<escape>Tokyo<escape>,country:<escape>Japan<escape>"
result = parser._parse_arguments(args_str)
assert result == {"city": "Tokyo", "country": "Japan"}
def test_numeric_argument(self, parser):
result = parser._parse_arguments("count:<escape>42<escape>")
assert result == {"count": 42}
def test_boolean_argument(self, parser):
result = parser._parse_arguments("enabled:<escape>true<escape>")
assert result == {"enabled": True}
def test_argument_with_spaces(self, parser):
result = parser._parse_arguments("message:<escape>Hello World<escape>")
assert result == {"message": "Hello World"}
class TestAdjustRequest:
def test_skip_special_tokens_disabled(self, parser, mock_request):
mock_request.tools = [{"type": "function", "function": {"name": "test"}}]
mock_request.tool_choice = "auto"
mock_request.skip_special_tokens = True
result = parser.adjust_request(mock_request)
assert result.skip_special_tokens is False
def test_skip_special_tokens_when_tool_choice_none(self, parser, mock_request):
mock_request.tools = [{"type": "function", "function": {"name": "test"}}]
mock_request.tool_choice = "none"
mock_request.skip_special_tokens = True
result = parser.adjust_request(mock_request)
assert result.skip_special_tokens is True
class TestBufferDeltaText:
def test_regular_text_not_buffered(self, parser):
result = parser._buffer_delta_text("hello")
assert result == "hello"
assert parser.buffered_delta_text == ""
def test_complete_tag_flushed(self, parser):
parser.buffered_delta_text = "<start_function_"
result = parser._buffer_delta_text("call>")
assert "<start_function_call>" in result
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/tool_parsers/test_functiongemma_tool_parser.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/tool_parsers/functiongemma_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from collections.abc import Sequence
import regex as re
from vllm.entrypoints.chat_utils import make_tool_call_id
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
)
from vllm.entrypoints.openai.engine.protocol import (
DeltaFunctionCall,
DeltaMessage,
DeltaToolCall,
ExtractedToolCallInformation,
FunctionCall,
ToolCall,
)
from vllm.logger import init_logger
from vllm.tokenizers import TokenizerLike
from vllm.tool_parsers.abstract_tool_parser import ToolParser
logger = init_logger(__name__)
class FunctionGemmaToolParser(ToolParser):
"""
Tool parser for Google's FunctionGemma model (google/functiongemma-270m-it).
Handles the FunctionGemma function call format:
<start_function_call>call:func_name{param:<escape>value<escape>}<end_function_call>
"""
def __init__(self, tokenizer: TokenizerLike):
super().__init__(tokenizer)
# Streaming state
self.current_tool_name_sent: bool = False
self.prev_tool_call_arr: list[dict] = []
self.current_tool_id: int = -1
self.streamed_args_for_tool: list[str] = []
# FunctionGemma tokens
self.tool_call_start_token: str = "<start_function_call>"
self.tool_call_end_token: str = "<end_function_call>"
# Regex patterns
self.tool_call_regex = re.compile(
r"<start_function_call>call:(\w+)\{(.*?)\}<end_function_call>"
r"|<start_function_call>call:(\w+)\{(.*)",
re.DOTALL,
)
self.arg_regex = re.compile(
r"(\w+):<escape>(.*?)<escape>",
re.DOTALL,
)
if self.model_tokenizer:
self.tool_call_start_token_ids = self.model_tokenizer.encode(
self.tool_call_start_token, add_special_tokens=False
)
self.tool_call_end_token_ids = self.model_tokenizer.encode(
self.tool_call_end_token, add_special_tokens=False
)
else:
self.tool_call_start_token_ids = []
self.tool_call_end_token_ids = []
self.buffered_delta_text = ""
def _parse_arguments(self, args_str: str) -> dict:
"""Parse FunctionGemma argument string into a dictionary."""
arguments = {}
if not args_str:
return arguments
matches = self.arg_regex.findall(args_str)
for key, value in matches:
try:
parsed_value = json.loads(value)
arguments[key] = parsed_value
except json.JSONDecodeError:
arguments[key] = value
return arguments
def adjust_request(self, request: ChatCompletionRequest) -> ChatCompletionRequest:
request = super().adjust_request(request)
if request.tools and request.tool_choice != "none":
request.skip_special_tokens = False
return request
def extract_tool_calls(
self,
model_output: str,
request: ChatCompletionRequest,
) -> ExtractedToolCallInformation:
if self.tool_call_start_token not in model_output:
return ExtractedToolCallInformation(
tools_called=False, tool_calls=[], content=model_output
)
try:
matches = self.tool_call_regex.findall(model_output)
if not matches:
return ExtractedToolCallInformation(
tools_called=False, tool_calls=[], content=model_output
)
tool_calls: list[ToolCall] = []
for match in matches:
func_name = match[0] if match[0] else match[2]
args_str = match[1] if match[1] else match[3]
if not func_name:
continue
arguments = self._parse_arguments(args_str)
tool_calls.append(
ToolCall(
type="function",
function=FunctionCall(
name=func_name,
arguments=json.dumps(arguments, ensure_ascii=False),
),
)
)
if tool_calls:
content_end = model_output.find(self.tool_call_start_token)
content = (
model_output[:content_end].strip() if content_end > 0 else None
)
return ExtractedToolCallInformation(
tools_called=True,
tool_calls=tool_calls,
content=content if content else None,
)
return ExtractedToolCallInformation(
tools_called=False, tool_calls=[], content=model_output
)
except Exception:
logger.exception("Error extracting tool calls from FunctionGemma response")
return ExtractedToolCallInformation(
tools_called=False, tool_calls=[], content=model_output
)
def _buffer_delta_text(self, delta_text: str) -> str:
"""Buffer incoming delta text to handle multi-token special sequences."""
potential_start = "<start_function_call>"
potential_end = "<end_function_call>"
combined = self.buffered_delta_text + delta_text
if combined.endswith(potential_start) or combined.endswith(potential_end):
self.buffered_delta_text = ""
return combined
for tag in [potential_start, potential_end]:
for i in range(1, len(tag)):
if combined.endswith(tag[:i]):
self.buffered_delta_text = combined[-(i):]
return combined[:-i]
self.buffered_delta_text = ""
return combined
def extract_tool_calls_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
request: ChatCompletionRequest,
) -> DeltaMessage | None:
delta_text = self._buffer_delta_text(delta_text)
current_text = previous_text + delta_text
if self.tool_call_start_token not in current_text:
if delta_text:
return DeltaMessage(content=delta_text)
return None
try:
start_count = current_text.count(self.tool_call_start_token)
end_count = current_text.count(self.tool_call_end_token)
prev_start_count = previous_text.count(self.tool_call_start_token)
prev_end_count = previous_text.count(self.tool_call_end_token)
if self.tool_call_start_token not in current_text:
return DeltaMessage(content=delta_text)
# Starting a new function call
if start_count > prev_start_count and start_count > end_count:
self.current_tool_id += 1
self.current_tool_name_sent = False
self.streamed_args_for_tool.append("")
self.prev_tool_call_arr.append({})
logger.debug("Starting new tool call %d", self.current_tool_id)
return None
# In the middle of a function call
if start_count > end_count:
last_start = current_text.rfind(self.tool_call_start_token)
partial_call = current_text[
last_start + len(self.tool_call_start_token) :
]
if partial_call.startswith("call:"):
func_part = partial_call[5:]
if "{" in func_part:
func_name = func_part.split("{")[0]
args_part = (
func_part.split("{", 1)[1] if "{" in func_part else ""
)
if not self.current_tool_name_sent and func_name:
self.current_tool_name_sent = True
self.prev_tool_call_arr[self.current_tool_id] = {
"name": func_name,
"arguments": {},
}
return DeltaMessage(
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
type="function",
id=make_tool_call_id(),
function=DeltaFunctionCall(
name=func_name
).model_dump(exclude_none=True),
)
]
)
if self.current_tool_name_sent and args_part:
current_args = self._parse_arguments(args_part)
if current_args:
current_args_json = json.dumps(
current_args, ensure_ascii=False
)
prev_streamed = self.streamed_args_for_tool[
self.current_tool_id
]
if len(current_args_json) > len(prev_streamed):
diff = current_args_json[len(prev_streamed) :]
self.streamed_args_for_tool[
self.current_tool_id
] = current_args_json
self.prev_tool_call_arr[self.current_tool_id][
"arguments"
] = current_args
return DeltaMessage(
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
function=DeltaFunctionCall(
arguments=diff
).model_dump(exclude_none=True),
)
]
)
return None
# Function call just ended
if end_count > prev_end_count:
if self.current_tool_id >= 0 and self.current_tool_id < len(
self.prev_tool_call_arr
):
all_calls = self.tool_call_regex.findall(current_text)
args = {}
if self.current_tool_id < len(all_calls):
match = all_calls[self.current_tool_id]
if match[0]:
args_str = match[1]
args = self._parse_arguments(args_str)
self.prev_tool_call_arr[self.current_tool_id][
"arguments"
] = args
if args:
args_json = json.dumps(args, ensure_ascii=False)
prev_streamed = self.streamed_args_for_tool[
self.current_tool_id
]
if len(args_json) > len(prev_streamed):
diff = args_json[len(prev_streamed) :]
self.streamed_args_for_tool[self.current_tool_id] = (
args_json
)
return DeltaMessage(
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
function=DeltaFunctionCall(
arguments=diff
).model_dump(exclude_none=True),
)
]
)
return None
if delta_text:
return DeltaMessage(content=delta_text)
return None
except Exception:
logger.exception("Error in streaming tool call extraction")
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/tool_parsers/functiongemma_tool_parser.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/pooling_mteb_test/mteb_embed_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import mteb
import numpy as np
import torch
from mteb.models import ModelMeta
from mteb.types import Array
from torch.utils.data import DataLoader
import tests.ci_envs as ci_envs
from tests.models.utils import (
EmbedModelInfo,
check_embeddings_close,
get_vllm_extra_kwargs,
)
# Most embedding models on the STS12 task (See #17175):
# - Model implementation and minor changes in tensor dtype
# results in differences less than 1e-4
# - Different model results in differences more than 1e-3
# 5e-4 is a good tolerance threshold
MTEB_EMBED_TASKS = ["STS12"]
MTEB_EMBED_TOL = 5e-4
_empty_model_meta = ModelMeta(
loader=None,
name="vllm/model",
revision="1",
release_date=None,
languages=None,
framework=[],
similarity_fn_name=None,
n_parameters=None,
memory_usage_mb=None,
max_tokens=None,
embed_dim=None,
license=None,
open_weights=None,
public_training_code=None,
public_training_data=None,
use_instructions=None,
training_datasets=None,
modalities=["text"], # 'image' can be added to evaluate multimodal models
)
class MtebEmbedMixin(mteb.EncoderProtocol):
mteb_model_meta = _empty_model_meta
def similarity(
self,
embeddings1: np.ndarray,
embeddings2: np.ndarray,
) -> np.ndarray:
# Cosine similarity
norm1 = np.linalg.norm(embeddings1, axis=1, keepdims=True)
norm2 = np.linalg.norm(embeddings2, axis=1, keepdims=True)
sim = np.dot(embeddings1, embeddings2.T) / (norm1 * norm2.T)
return sim
def similarity_pairwise(
self,
embeddings1: Array,
embeddings2: Array,
) -> Array:
# Cosine similarity
norm1 = np.linalg.norm(embeddings1, axis=1, keepdims=True)
norm2 = np.linalg.norm(embeddings2, axis=1, keepdims=True)
sim = np.sum(embeddings1 * embeddings2, axis=1) / (
norm1.flatten() * norm2.flatten()
)
return sim
class VllmMtebEncoder(MtebEmbedMixin):
def __init__(self, vllm_model):
self.llm = vllm_model
self.rng = np.random.default_rng(seed=42)
def encode(
self,
inputs: DataLoader[mteb.types.BatchedInput],
*args,
**kwargs,
) -> np.ndarray:
# Hoping to discover potential scheduling
# issues by randomizing the order.
sentences = [text for batch in inputs for text in batch["text"]]
r = self.rng.permutation(len(sentences))
sentences = [sentences[i] for i in r]
outputs = self.llm.embed(sentences, use_tqdm=False)
embeds = np.array(outputs)
embeds = embeds[np.argsort(r)]
return embeds
class OpenAIClientMtebEncoder(MtebEmbedMixin):
def __init__(self, model_name: str, client):
self.model_name = model_name
self.client = client
self.rng = np.random.default_rng(seed=42)
def encode(
self,
inputs: DataLoader[mteb.types.BatchedInput],
*args,
**kwargs,
) -> np.ndarray:
# Hoping to discover potential scheduling
# issues by randomizing the order.
sentences = [text for batch in inputs for text in batch["text"]]
r = self.rng.permutation(len(sentences))
sentences = [sentences[i] for i in r]
embeddings = self.client.embeddings.create(
model=self.model_name, input=sentences
)
outputs = [d.embedding for d in embeddings.data]
embeds = np.array(outputs)
embeds = embeds[np.argsort(r)]
return embeds
def run_mteb_embed_task(encoder: mteb.EncoderProtocol, tasks):
tasks = mteb.get_tasks(tasks=tasks)
results = mteb.evaluate(
encoder,
tasks,
cache=None,
show_progress_bar=False,
)
main_score = results[0].scores["test"][0]["main_score"]
return main_score
def mteb_test_embed_models(
hf_runner,
vllm_runner,
model_info: EmbedModelInfo,
vllm_extra_kwargs=None,
hf_model_callback=None,
atol=MTEB_EMBED_TOL,
):
vllm_extra_kwargs = get_vllm_extra_kwargs(model_info, vllm_extra_kwargs)
# Test embed_dims, isnan and whether to use normalize
example_prompts = ["The chef prepared a delicious meal." * 1000]
with vllm_runner(
model_info.name,
runner="pooling",
max_model_len=model_info.max_model_len,
**vllm_extra_kwargs,
) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
# Confirm whether vllm is using the correct architecture
if model_info.architecture:
assert model_info.architecture in model_config.architectures
# Confirm whether the important configs in model_config are correct.
pooler_config = model_config.pooler_config
if model_info.seq_pooling_type is not None:
assert pooler_config.seq_pooling_type == model_info.seq_pooling_type
if model_info.tok_pooling_type is not None:
assert pooler_config.tok_pooling_type == model_info.tok_pooling_type
if model_info.attn_type is not None:
assert model_config.attn_type == model_info.attn_type
if model_info.is_prefix_caching_supported is not None:
assert (
model_config.is_prefix_caching_supported
== model_info.is_prefix_caching_supported
)
if model_info.is_chunked_prefill_supported is not None:
assert (
model_config.is_chunked_prefill_supported
== model_info.is_chunked_prefill_supported
)
vllm_main_score = run_mteb_embed_task(
VllmMtebEncoder(vllm_model), MTEB_EMBED_TASKS
)
vllm_dtype = vllm_model.llm.llm_engine.model_config.dtype
head_dtype = model_config.head_dtype
# Test embedding_size, isnan and whether to use normalize
vllm_outputs = vllm_model.embed(
example_prompts,
tokenization_kwargs=dict(truncate_prompt_tokens=-1),
)
outputs_tensor = torch.tensor(vllm_outputs)
assert not torch.any(torch.isnan(outputs_tensor))
embedding_size = model_config.embedding_size
assert torch.tensor(vllm_outputs).shape[-1] == embedding_size
# Accelerate mteb test by setting
# SentenceTransformers mteb score to a constant
if model_info.mteb_score is None:
with hf_runner(
model_info.name,
is_sentence_transformer=True,
dtype=ci_envs.VLLM_CI_HF_DTYPE or model_info.hf_dtype,
) as hf_model:
# e.g. setting default parameters for the encode method of hf_runner
if hf_model_callback is not None:
hf_model_callback(hf_model)
st_main_score = run_mteb_embed_task(hf_model, MTEB_EMBED_TASKS)
st_dtype = next(hf_model.model.parameters()).dtype
# Check embeddings close to hf outputs
hf_outputs = hf_model.encode(example_prompts)
check_embeddings_close(
embeddings_0_lst=hf_outputs,
embeddings_1_lst=vllm_outputs,
name_0="hf",
name_1="vllm",
tol=1e-2,
)
else:
st_main_score = model_info.mteb_score
st_dtype = "Constant"
print("Model:", model_info.name)
print("VLLM:", f"dtype:{vllm_dtype}", f"head_dtype:{head_dtype}", vllm_main_score)
print("SentenceTransformers:", st_dtype, st_main_score)
print("Difference:", st_main_score - vllm_main_score)
# We are not concerned that the vllm mteb results are better
# than SentenceTransformers, so we only perform one-sided testing.
assert st_main_score - vllm_main_score < atol
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling_mteb_test/mteb_embed_utils.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/whisper_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# From https://platform.openai.com/docs/guides/speech-to-text/supported-languages
ISO639_1_SUPPORTED_LANGS = {
"af": "Afrikaans",
"ar": "Arabic",
"hy": "Armenian",
"az": "Azerbaijani",
"be": "Belarusian",
"bs": "Bosnian",
"bg": "Bulgarian",
"ca": "Catalan",
"zh": "Chinese",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"gl": "Galician",
"de": "German",
"el": "Greek",
"he": "Hebrew",
"hi": "Hindi",
"hu": "Hungarian",
"is": "Icelandic",
"id": "Indonesian",
"it": "Italian",
"ja": "Japanese",
"kn": "Kannada",
"kk": "Kazakh",
"ko": "Korean",
"lv": "Latvian",
"lt": "Lithuanian",
"mk": "Macedonian",
"ms": "Malay",
"mr": "Marathi",
"mi": "Maori",
"ne": "Nepali",
"no": "Norwegian",
"fa": "Persian",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"ru": "Russian",
"sr": "Serbian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sw": "Swahili",
"sv": "Swedish",
"tl": "Tagalog",
"ta": "Tamil",
"th": "Thai",
"tr": "Turkish",
"uk": "Ukrainian",
"ur": "Urdu",
"vi": "Vietnamese",
"cy": "Welsh",
}
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/whisper_utils.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/pooling/score/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import patch
import pytest
import torch
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import ChatTemplateResolutionError
from vllm.entrypoints.pooling.score.utils import (
compute_maxsim_score,
compute_maxsim_scores,
get_score_prompt,
)
from vllm.inputs import TokensPrompt
from vllm.tokenizers import get_tokenizer
# A cross-encoder model for testing
CROSS_ENCODER_MODEL_ID = "cross-encoder/ms-marco-MiniLM-L-6-v2"
def assert_prompt_tokenization_consistent(
tokenizer, full_prompt, engine_prompt, add_special_tokens=True
):
"""Verify that engine_prompt token_ids match tokenizing full_prompt."""
expected_ids = tokenizer(full_prompt, add_special_tokens=add_special_tokens)[
"input_ids"
]
actual_ids = engine_prompt["prompt_token_ids"]
assert actual_ids == expected_ids, (
f"Token IDs don't match.\nExpected: {expected_ids}\nActual: {actual_ids}"
)
@pytest.fixture(scope="module")
def cross_encoder_model_config():
return ModelConfig(
CROSS_ENCODER_MODEL_ID,
runner="pooling",
)
@pytest.fixture(scope="module")
def cross_encoder_tokenizer(cross_encoder_model_config):
return get_tokenizer(
CROSS_ENCODER_MODEL_ID,
trust_remote_code=cross_encoder_model_config.trust_remote_code,
)
@pytest.fixture(scope="module")
def llm_reranker_model_config():
"""Model config for LLM-as-reranker style (no pad token)."""
config = ModelConfig(
CROSS_ENCODER_MODEL_ID,
runner="pooling",
)
# use_sep_token is a property that reads from hf_config,
# so we set it there to override the default (True)
config.hf_config.use_sep_token = False
return config
@pytest.fixture
def tokenization_kwargs():
"""Common tokenization kwargs used across tests."""
return {"add_special_tokens": True, "return_tensors": None}
@pytest.fixture
def mock_model_with_score_template():
"""Mock model class that supports score template and tracks post_process calls."""
class MockModelWithScoreTemplate:
supports_score_template = True
post_process_called: list[TokensPrompt] = []
@staticmethod
def get_score_template(p1: str, p2: str) -> str:
return f"[QUERY]{p1}[SEP][DOC]{p2}"
@staticmethod
def post_process_tokens(prompt: TokensPrompt) -> None:
MockModelWithScoreTemplate.post_process_called.append(prompt)
return MockModelWithScoreTemplate
@pytest.fixture
def mock_model_no_score_template():
"""Mock model class that does not support score template."""
class MockModelNoScoreTemplate:
supports_score_template = False
return MockModelNoScoreTemplate
class TestGetScorePrompt:
"""Tests for the get_score_prompt function."""
def test_tokenization_kwargs_passed_through(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
):
"""Test that tokenization kwargs are properly passed through."""
data_1 = "Query text"
data_2 = "Document text"
# Test with truncation - custom kwargs for this test
custom_tokenization_kwargs = {
"add_special_tokens": True,
"return_tensors": None,
"truncation": True,
"max_length": 20,
}
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config,
cross_encoder_tokenizer,
custom_tokenization_kwargs,
data_1,
data_2,
)
assert isinstance(full_prompt, str)
assert "prompt_token_ids" in engine_prompt
# With max_length=20 and truncation, should not exceed this
assert len(engine_prompt["prompt_token_ids"]) <= 20
# Since truncation was applied, token_ids should be a prefix of full encoding
full_ids = cross_encoder_tokenizer(full_prompt, add_special_tokens=True)[
"input_ids"
]
actual_ids = engine_prompt["prompt_token_ids"]
assert full_ids[: len(actual_ids)] == actual_ids, (
f"Token IDs are not a prefix of full encoding.\n"
f"Full IDs: {full_ids}\n"
f"Actual IDs: {actual_ids}"
)
def test_model_supports_score_template(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test when model supports score template (no score_template arg)."""
with patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query text",
"document text",
)
assert full_prompt == "[QUERY]query text[SEP][DOC]document text"
assert "prompt_token_ids" in engine_prompt
assert len(engine_prompt["prompt_token_ids"]) > 0
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_model_supports_score_template_but_custom_template_provided(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test when model supports score template but custom template is provided."""
template = (
'TEMPLATE_USED {{ messages[0]["content"] }} {{ messages[1]["content"] }}'
)
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"doc",
score_template=template, # Providing a template
)
assert "prompt_token_ids" in engine_prompt
assert full_prompt == "TEMPLATE_USED query doc"
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_not_using_default_template(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
# FIXME: For now, we only apply a template when one is explicitly provided.
# We cannot rely on the tokenizer's chat template because many models
# inherit junk templates from their base LLM, which breaks both the models
# and the tests that use them.
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.pooling.score.utils.safe_apply_chat_template",
return_value="test querytest doc",
),
):
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"test query",
"test doc",
)
assert full_prompt == "test querytest doc"
assert "prompt_token_ids" in engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_fallback_with_sep_token(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
"""Test fallback path when ChatTemplateResolutionError
and use_sep_token=True."""
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.pooling.score.utils.safe_apply_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config, # use_sep_token=True
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"document",
)
assert "prompt_token_ids" in engine_prompt
# Should have token_type_ids from text_pair encoding
assert "token_type_ids" in engine_prompt
assert "query" in full_prompt
assert "document" in full_prompt
assert full_prompt != "querydocument"
assert (
engine_prompt["prompt_token_ids"]
== cross_encoder_tokenizer(
"query", text_pair="document", add_special_tokens=True
)["input_ids"]
)
# FIXME(?): add_special_tokens=False is needed because in this case
# full_prompt is obtained by decoding the tokenized prompt, which includes
# special tokens and we would get duplicated special tokens otherwise.
# This is inconsistent with other cases.
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer,
full_prompt,
engine_prompt,
add_special_tokens=False,
)
def test_fallback_without_sep_token(
self,
llm_reranker_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_no_score_template,
):
"""Test fallback path when ChatTemplateResolutionError
and use_sep_token=False."""
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_no_score_template,
),
patch(
"vllm.entrypoints.pooling.score.utils.safe_apply_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
llm_reranker_model_config, # use_sep_token=False
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"document",
)
assert full_prompt == "querydocument"
assert "prompt_token_ids" in engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_post_process_tokens_called(
self,
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
mock_model_with_score_template,
):
"""Test that post_process_tokens is called on the engine prompt."""
# Reset the call tracker
mock_model_with_score_template.post_process_called.clear()
with (
patch(
"vllm.model_executor.model_loader.get_model_cls",
return_value=mock_model_with_score_template,
),
patch(
"vllm.entrypoints.pooling.score.utils.safe_apply_chat_template",
side_effect=ChatTemplateResolutionError("No template"),
),
):
full_prompt, engine_prompt = get_score_prompt(
cross_encoder_model_config,
cross_encoder_tokenizer,
tokenization_kwargs,
"query",
"doc",
)
# post_process_tokens should have been called once
assert len(mock_model_with_score_template.post_process_called) == 1
assert mock_model_with_score_template.post_process_called[0] is engine_prompt
assert_prompt_tokenization_consistent(
cross_encoder_tokenizer, full_prompt, engine_prompt
)
def test_compute_maxsim_scores_matches_reference_per_pair() -> None:
generator = torch.Generator()
generator.manual_seed(7)
shared_query = torch.randn(5, 8, generator=generator)
q_embs = [
shared_query, # 1:N style shared query
shared_query,
torch.randn(2, 8, generator=generator),
torch.randn(4, 8, generator=generator),
]
d_embs = [
torch.randn(6, 8, generator=generator),
torch.randn(3, 8, generator=generator),
torch.randn(5, 8, generator=generator),
torch.randn(7, 8, generator=generator),
]
batched_scores = compute_maxsim_scores(
q_embs,
d_embs,
max_batch_size=4,
max_score_matrix_elements=40, # batch shrinking path.
)
reference_scores = [
compute_maxsim_score(q, d).to("cpu") for q, d in zip(q_embs, d_embs)
]
assert len(batched_scores) == len(reference_scores)
for batched, reference in zip(batched_scores, reference_scores):
torch.testing.assert_close(batched, reference, rtol=1e-4, atol=1e-4)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/pooling/score/test_utils.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/models/language/pooling_mteb_test/test_nemotron.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from tests.models.language.pooling_mteb_test.mteb_embed_utils import (
mteb_test_embed_models,
)
from tests.models.language.pooling_mteb_test.mteb_score_utils import (
mteb_test_rerank_models,
)
from tests.models.utils import (
EmbedModelInfo,
RerankModelInfo,
)
EMBEDDING_MODELS = [
EmbedModelInfo(
"nvidia/llama-nemotron-embed-1b-v2",
architecture="LlamaBidirectionalModel",
mteb_score=0.689164662128673,
seq_pooling_type="MEAN",
attn_type="encoder_only",
is_prefix_caching_supported=False,
is_chunked_prefill_supported=False,
)
]
RERANK_MODELS = [
RerankModelInfo(
"nvidia/llama-nemotron-rerank-1b-v2",
architecture="LlamaBidirectionalForSequenceClassification",
chat_template_name="nemotron-rerank.jinja",
mteb_score=0.33994,
seq_pooling_type="MEAN",
attn_type="encoder_only",
is_prefix_caching_supported=False,
is_chunked_prefill_supported=False,
),
]
@pytest.mark.parametrize("model_info", EMBEDDING_MODELS)
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
@pytest.mark.parametrize("model_info", RERANK_MODELS)
def test_rerank_models_mteb(vllm_runner, model_info: RerankModelInfo) -> None:
mteb_test_rerank_models(vllm_runner, model_info)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling_mteb_test/test_nemotron.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from contextlib import contextmanager
import torch
from torch import nn
from vllm.model_executor.layers.fused_moe.fused_moe import zero_experts_compute_triton
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
class ZeroExpertFusedMoE(FusedMoE):
"""
A FusedMoE operation that also computes the results of zero experts.
Zero experts perform identity operations (scaled pass-through) instead
of full MLP computations.
This class uses memoization to avoid redundant routing computation:
routing is computed once and reused for both zero expert computation
and the main FusedMoE forward pass.
"""
def __init__(
self,
zero_expert_num: int,
zero_expert_type: str,
router: nn.Module,
**kwargs,
):
# ZeroExpertFusedMoE manages its own custom_routing_function for memoization
assert (
"custom_routing_function" not in kwargs
or kwargs.get("custom_routing_function") is None
), (
"ZeroExpertFusedMoE does not support external custom_routing_function. "
"It manages its own for routing memoization."
)
# Automatically slice router's e_score_correction_bias to only include
# real experts (not zero_experts) for the base FusedMoE.
# The full bias will be used temporarily in forward() for routing.
if hasattr(router, "e_score_correction_bias") and "num_experts" in kwargs:
num_real_experts = kwargs["num_experts"]
router_bias = router.e_score_correction_bias
user_bias = kwargs.get("e_score_correction_bias")
# Use router's bias if:
# 1. User didn't provide bias, or
# 2. User provided full bias (same size as router)
if user_bias is None or user_bias.shape[0] == router_bias.shape[0]:
kwargs["e_score_correction_bias"] = router_bias[:num_real_experts]
# FusedMoE no longer accepts zero_expert_num/zero_expert_type.
# We handle zero experts ourselves in forward().
super().__init__(**kwargs)
# Store the actual zero_expert_num and zero_expert_type for our own use
self._actual_zero_expert_num = zero_expert_num
self._actual_zero_expert_type = zero_expert_type
self._router = router # Full router (includes zero experts)
# Expose zero_expert_num and zero_expert_type as attributes for
# compatibility with quantization methods that check these attributes
self.zero_expert_num = 0
self.zero_expert_type = None
# Memoization state for routing results
self._memoized_topk_weights: torch.Tensor | None = None
self._memoized_topk_ids: torch.Tensor | None = None
# Create custom_routing_function to reuse memoized routing results
def custom_routing_function(hidden_states, gating_output, topk, renormalize):
"""Return memoized `topk_weights` and `topk_ids`."""
if self._memoized_topk_weights is None or self._memoized_topk_ids is None:
raise RuntimeError(
"ZeroExpertFusedMoE: routing results not memoized. "
"Call select_experts first to compute routing."
)
return self._memoized_topk_weights, self._memoized_topk_ids
self.custom_routing_function = custom_routing_function
@contextmanager
def _temporarily_set_attrs(self, **attrs):
"""
Temporarily set attributes using object.__setattr__ and restore them.
This bypasses nn.Module.__setattr__ to avoid Dynamo tracing issues.
When PyTorch Dynamo traces the forward pass, it cannot handle
nn.Module.__setattr__ calls (which include parameter registration logic),
resulting in "Unsupported" errors. Using object.__setattr__ directly
sets the attribute without triggering nn.Module's custom __setattr__,
allowing Dynamo to trace the code successfully.
"""
originals = {key: getattr(self, key) for key in attrs}
try:
for key, value in attrs.items():
object.__setattr__(self, key, value)
yield
finally:
for key, value in originals.items():
object.__setattr__(self, key, value)
def _compute_zero_expert_result(
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
) -> torch.Tensor | None:
"""Compute zero expert results using pre-computed routing."""
if (
self._actual_zero_expert_num is None
or self._actual_zero_expert_num <= 0
or self._actual_zero_expert_type is None
):
return None
return zero_experts_compute_triton(
expert_indices=topk_ids.clone(),
expert_scales=topk_weights.clone(),
num_experts=self.logical_num_experts,
zero_expert_type=self._actual_zero_expert_type,
hidden_states=hidden_states,
)
def forward(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor, # Full logits including zero experts
) -> torch.Tensor:
"""
Forward pass with zero expert support and routing memoization.
Args:
hidden_states: Input hidden states
router_logits: Full router logits (including zero experts)
Returns:
Combined output from real experts and zero experts
"""
# Prepare temporary attribute overrides for routing computation
temp_attrs = {
"custom_routing_function": None, # Disable for first routing
}
if self._router is not None:
temp_attrs["e_score_correction_bias"] = self._router.e_score_correction_bias
# Compute routing with temporary attributes
# Pass full router_logits (including zero experts) so that zero experts
# can be properly identified in topk_ids
with self._temporarily_set_attrs(**temp_attrs):
topk_weights, topk_ids = self.select_experts(
hidden_states=hidden_states,
router_logits=router_logits, # Full logits (includes zero experts)
)
# Compute zero expert result if needed
zero_expert_result = self._compute_zero_expert_result(
hidden_states=hidden_states,
topk_weights=topk_weights,
topk_ids=topk_ids,
)
# Memoize routing results for reuse in super().forward()
self._memoized_topk_weights = topk_weights
self._memoized_topk_ids = topk_ids
# Slice router_logits for real experts only
router_logits_sliced = router_logits[..., : self.logical_num_experts]
# Compute real expert results (will reuse memoized routing via
# custom_routing_function)
# zero_expert_num is already 0, so FusedMoE won't handle zero experts
fused_out = super().forward(
hidden_states=hidden_states,
router_logits=router_logits_sliced,
)
# Combine results
# Both zero_expert_result and fused_out are computed from the same
# hidden_states, so they should be on the same device.
if zero_expert_result is not None:
fused_out = fused_out + zero_expert_result
# Clear memoization after use
self._memoized_topk_weights = None
self._memoized_topk_ids = None
return fused_out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/test_attention_backend_registry.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.v1.attention.backend import (
AttentionBackend,
AttentionImpl,
)
from vllm.v1.attention.backends.registry import (
AttentionBackendEnum,
MambaAttentionBackendEnum,
register_backend,
)
class CustomAttentionImpl(AttentionImpl):
"""Mock custom attention implementation for testing."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
"""Mock forward pass."""
pass
class CustomAttentionBackend(AttentionBackend):
"""Mock custom attention backend for testing."""
@staticmethod
def get_name():
return "CUSTOM"
@staticmethod
def get_impl_cls():
return CustomAttentionImpl
@staticmethod
def get_builder_cls():
"""Mock builder class."""
return None
@staticmethod
def get_required_kv_cache_layout():
"""Mock KV cache layout."""
return None
class CustomMambaAttentionImpl(AttentionImpl):
"""Mock custom mamba attention implementation for testing."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
"""Mock forward pass."""
pass
class CustomMambaAttentionBackend(AttentionBackend):
"""Mock custom mamba attention backend for testing."""
@staticmethod
def get_name():
return "CUSTOM_MAMBA"
@staticmethod
def get_impl_cls():
return CustomMambaAttentionImpl
@staticmethod
def get_builder_cls():
"""Mock builder class."""
return None
@staticmethod
def get_required_kv_cache_layout():
"""Mock KV cache layout."""
return None
def test_custom_is_not_alias_of_any_backend():
# Get all members of AttentionBackendEnum
all_backends = list(AttentionBackendEnum)
# Find any aliases of CUSTOM
aliases = []
for backend in all_backends:
if backend.name != "CUSTOM" and backend is AttentionBackendEnum.CUSTOM:
aliases.append(backend.name)
# CUSTOM should not be an alias of any other backend
assert len(aliases) == 0, (
f"BUG! CUSTOM is an alias of: {', '.join(aliases)}!\n"
f"CUSTOM.value = {repr(AttentionBackendEnum.CUSTOM.value)}\n"
f"This happens when CUSTOM has the same value as another backend.\n"
f"When you register to CUSTOM, you're actually registering to {aliases[0]}!\n"
f"All backend values:\n"
+ "\n".join(f" {b.name}: {repr(b.value)}" for b in all_backends)
)
# Verify CUSTOM has its own unique identity
assert AttentionBackendEnum.CUSTOM.name == "CUSTOM", (
f"CUSTOM.name should be 'CUSTOM', but got '{AttentionBackendEnum.CUSTOM.name}'"
)
def test_register_custom_backend_with_class_path():
# Register with explicit class path
register_backend(
backend=AttentionBackendEnum.CUSTOM,
class_path="tests.test_attention_backend_registry.CustomAttentionBackend",
is_mamba=False,
)
# Check that CUSTOM backend is registered
assert AttentionBackendEnum.CUSTOM.is_overridden(), (
"CUSTOM should be overridden after registration"
)
# Get the registered class path
class_path = AttentionBackendEnum.CUSTOM.get_path()
assert class_path == "tests.test_attention_backend_registry.CustomAttentionBackend"
# Get the backend class
backend_cls = AttentionBackendEnum.CUSTOM.get_class()
assert backend_cls.get_name() == "CUSTOM"
assert backend_cls.get_impl_cls() == CustomAttentionImpl
def test_mamba_custom_is_not_alias_of_any_backend():
# Get all mamba backends
all_backends = list(MambaAttentionBackendEnum)
# Find any aliases of CUSTOM
aliases = []
for backend in all_backends:
if backend.name != "CUSTOM" and backend is MambaAttentionBackendEnum.CUSTOM:
aliases.append(backend.name)
# CUSTOM should not be an alias of any other backend
assert len(aliases) == 0, (
f"BUG! MambaAttentionBackendEnum.CUSTOM is an alias of: {', '.join(aliases)}!\n"
f"CUSTOM.value = {repr(MambaAttentionBackendEnum.CUSTOM.value)}\n"
f"All mamba backend values:\n"
+ "\n".join(f" {b.name}: {repr(b.value)}" for b in all_backends)
)
def test_register_custom_mamba_backend_with_class_path():
# Register with explicit class path
register_backend(
backend=MambaAttentionBackendEnum.CUSTOM,
class_path="tests.test_attention_backend_registry.CustomMambaAttentionBackend",
is_mamba=True,
)
# Check that the backend is registered
assert MambaAttentionBackendEnum.CUSTOM.is_overridden()
# Get the registered class path
class_path = MambaAttentionBackendEnum.CUSTOM.get_path()
assert (
class_path
== "tests.test_attention_backend_registry.CustomMambaAttentionBackend"
)
# Get the backend class
backend_cls = MambaAttentionBackendEnum.CUSTOM.get_class()
assert backend_cls.get_name() == "CUSTOM_MAMBA"
assert backend_cls.get_impl_cls() == CustomMambaAttentionImpl
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_attention_backend_registry.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/tool_parsers/glm47_moe_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import regex as re
from vllm.logger import init_logger
from vllm.tokenizers import TokenizerLike
from vllm.tool_parsers.glm4_moe_tool_parser import Glm4MoeModelToolParser
logger = init_logger(__name__)
class Glm47MoeModelToolParser(Glm4MoeModelToolParser):
def __init__(self, tokenizer: TokenizerLike):
super().__init__(tokenizer)
self.func_detail_regex = re.compile(
r"<tool_call>(.*?)(<arg_key>.*?)?</tool_call>", re.DOTALL
)
self.func_arg_regex = re.compile(
r"<arg_key>(.*?)</arg_key>(?:\\n|\s)*<arg_value>(.*?)</arg_value>",
re.DOTALL,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/tool_parsers/glm47_moe_tool_parser.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/mimo_v2_flash.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
from itertools import islice
import torch
from torch import nn
from vllm.config import (
CacheConfig,
VllmConfig,
get_current_vllm_config,
str_dtype_to_torch_dtype,
)
from vllm.distributed import (
get_ep_group,
get_pp_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
tensor_model_parallel_all_gather,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.utils import sequence_parallel_chunk
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
from .interfaces import MixtureOfExperts, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
class MiMoV2MLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {hidden_act}. Only silu is supported for now."
)
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class MiMoV2MoE(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
is_nextn: bool = False,
):
super().__init__()
config = vllm_config.model_config.hf_text_config
parallel_config = vllm_config.parallel_config
quant_config = vllm_config.quant_config
self.tp_size = get_tensor_model_parallel_world_size()
self.ep_group = get_ep_group().device_group
self.ep_rank = get_ep_group().rank_in_group
self.ep_size = self.ep_group.size()
self.n_routed_experts = config.n_routed_experts
self.is_sequence_parallel = parallel_config.use_sequence_parallel_moe
if self.tp_size > config.n_routed_experts:
raise ValueError(
f"Tensor parallel size {self.tp_size} is greater than "
f"the number of experts {config.n_routed_experts}."
)
if config.hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {config.hidden_act}. "
"Only silu is supported for now."
)
vllm_config = get_current_vllm_config()
eplb_config = vllm_config.parallel_config.eplb_config
self.enable_eplb = parallel_config.enable_eplb
self.n_logical_experts = self.n_routed_experts
self.n_redundant_experts = eplb_config.num_redundant_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
dtype = getattr(config, "moe_router_dtype", "float32")
self.gate_dtype = str_dtype_to_torch_dtype(dtype)
self.gate = nn.Linear(
config.hidden_size,
config.n_routed_experts,
bias=False,
dtype=self.gate_dtype,
)
self.gate.e_score_correction_bias = nn.Parameter(
torch.empty(config.n_routed_experts, dtype=self.gate_dtype)
)
self.experts = FusedMoE(
num_experts=self.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=True,
renormalize=config.norm_topk_prob,
quant_config=quant_config,
prefix=f"{prefix}.experts",
e_score_correction_bias=self.gate.e_score_correction_bias,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
is_sequence_parallel=self.is_sequence_parallel,
use_grouped_topk=True,
num_expert_group=config.n_group,
topk_group=config.topk_group,
scoring_func="sigmoid",
router_logits_dtype=self.gate_dtype,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
assert hidden_states.dim() <= 2, "MiMoV2MoE only supports 1D or 2D inputs"
is_input_1d = hidden_states.dim() == 1
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
if self.is_sequence_parallel:
hidden_states = sequence_parallel_chunk(hidden_states)
if self.gate_dtype is not None:
gate_input = hidden_states.to(self.gate_dtype)
else:
gate_input = hidden_states
router_logits = self.gate(gate_input)
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.is_sequence_parallel:
final_hidden_states = tensor_model_parallel_all_gather(
final_hidden_states, 0
)
final_hidden_states = final_hidden_states[:num_tokens]
return final_hidden_states.squeeze(0) if is_input_1d else final_hidden_states
class MiMoV2Attention(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
head_dim: int,
v_head_dim: int | None = None,
v_scale: float | None = None,
sliding_window_size: int = -1,
attention_bias: bool = False,
add_swa_attention_sink_bias: bool = False,
layer_id: int = 0,
rope_theta: float = 1000000,
max_position_embeddings: int = 32768,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
partial_rotary_factor: float = 1.0,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.layer_id = layer_id
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = head_dim
self.v_head_dim = v_head_dim if v_head_dim is not None else head_dim
self.q_size = self.num_heads * self.head_dim
self.k_size = self.num_kv_heads * self.head_dim
self.v_size = self.num_kv_heads * self.v_head_dim
self.v_scale = v_scale
self.scaling = self.head_dim**-0.5
self.rope_theta = rope_theta
self.max_position_embeddings = max_position_embeddings
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=attention_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
v_head_size=self.v_head_dim,
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.v_head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
reduce_results=True,
prefix=f"{prefix}.o_proj",
)
self.rotary_emb = get_rope(
head_size=self.head_dim,
max_position=max_position_embeddings,
rope_parameters={
"rope_type": "default",
"rope_theta": rope_theta,
"partial_rotary_factor": partial_rotary_factor,
},
)
self.attention_sink_bias = (
torch.nn.Parameter(torch.empty(self.num_heads), requires_grad=False)
if add_swa_attention_sink_bias
else None
)
sliding_window = sliding_window_size if sliding_window_size > -1 else None
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=sliding_window,
attn_type=AttentionType.DECODER,
prefix=f"{prefix}.attn",
sinks=self.attention_sink_bias,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
# Apply v_scale before attention
if self.v_scale is not None:
v = v * self.v_scale
v = v.view(-1, self.num_kv_heads, self.v_head_dim)
v = torch.nn.functional.pad(v, [0, self.head_dim - self.v_head_dim], value=0)
v = v.view(-1, self.num_kv_heads * self.head_dim)
attn_output = self.attn(q, k, v)
attn_output = attn_output.view(-1, self.num_heads, self.head_dim)[
..., : self.v_head_dim
].reshape(-1, self.num_heads * self.v_head_dim)
output, _ = self.o_proj(attn_output)
return output
class MiMoV2FlashDecoderLayer(nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
config = vllm_config.model_config.hf_text_config
quant_config = vllm_config.quant_config
layer_id = extract_layer_index(prefix)
self.hidden_size = config.hidden_size
self.config = config
self.layer_id = layer_id
rope_theta = getattr(config, "rope_theta", 1000000)
max_position_embeddings = getattr(config, "max_position_embeddings", 32768)
v_scale = getattr(config, "attention_value_scale", None)
if self.is_compressed_softmax_layer():
self.self_attn = MiMoV2Attention(
hidden_size=self.hidden_size,
num_heads=config.swa_num_attention_heads,
num_kv_heads=config.swa_num_key_value_heads,
head_dim=config.swa_head_dim,
v_head_dim=getattr(config, "swa_v_head_dim", None),
v_scale=v_scale,
sliding_window_size=config.sliding_window_size,
attention_bias=config.attention_bias,
add_swa_attention_sink_bias=getattr(
config, "add_swa_attention_sink_bias", False
),
layer_id=layer_id,
rope_theta=getattr(config, "swa_rope_theta", rope_theta),
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
partial_rotary_factor=getattr(config, "partial_rotary_factor", 1.0),
prefix=f"{prefix}.self_attn",
)
else:
self.self_attn = MiMoV2Attention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
head_dim=config.head_dim,
v_head_dim=getattr(config, "v_head_dim", None),
v_scale=v_scale,
sliding_window_size=-1, # normal attention
attention_bias=config.attention_bias,
layer_id=layer_id,
rope_theta=rope_theta,
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
partial_rotary_factor=getattr(config, "partial_rotary_factor", 1.0),
prefix=f"{prefix}.self_attn",
)
self.is_layer_sparse = self.is_moe_layer(layer_id)
if self.is_layer_sparse:
self.mlp = MiMoV2MoE(
vllm_config=vllm_config,
prefix=f"{prefix}.mlp",
)
else:
self.mlp = MiMoV2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.layernorm_epsilon
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
def is_moe_layer(self, layer_idx: int) -> bool:
return (
hasattr(self.config, "moe_layer_freq")
and layer_idx >= 0
and not isinstance(self.config.moe_layer_freq, int)
and self.config.moe_layer_freq[layer_idx]
)
def is_compressed_softmax_layer(self) -> bool:
return self.config.hybrid_layer_pattern[self.layer_id] == 1
class MiMoV2Model(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config.get_text_config()
quant_config = vllm_config.quant_config
eplb_config = vllm_config.parallel_config.eplb_config
self.config = config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.num_redundant_experts = eplb_config.num_redundant_experts
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: MiMoV2FlashDecoderLayer(
vllm_config=vllm_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
else:
self.norm = PPMissingLayer()
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for idx, layer in enumerate(
islice(self.layers, self.start_layer, self.end_layer)
):
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
return FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts,
num_redundant_experts=self.num_redundant_experts,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
tp_rank = get_tensor_model_parallel_rank()
tp_size = get_tensor_model_parallel_world_size()
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
expert_params_mapping = self.get_expert_mapping()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
continue
if "mtp" in name:
continue
if self.quant_config is not None:
cache_scale_name = self.quant_config.get_cache_scale(name)
if cache_scale_name is not None and cache_scale_name in params_dict:
param = params_dict[cache_scale_name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
kv_scale = loaded_weight
if kv_scale.dim() > 0 and kv_scale.numel() > 1:
kv_scale = kv_scale.view(-1)[0]
weight_loader(param, kv_scale)
loaded_params.add(cache_scale_name)
continue
expert_matched = False
for param_name, weight_name, expert_id, shard_id in expert_params_mapping:
if weight_name not in name:
continue
name_rewritten = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_rewritten, self):
continue
if (
name_rewritten.endswith(".bias") or name_rewritten.endswith("_bias")
) and name_rewritten not in params_dict:
continue
if name_rewritten not in params_dict:
continue
param = params_dict[name_rewritten]
weight_loader = param.weight_loader
weight_loader(
param,
loaded_weight,
name_rewritten,
shard_id=shard_id,
expert_id=expert_id,
)
loaded_params.add(name_rewritten)
expert_matched = True
break
if expert_matched:
continue
stacked_matched = False
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name_rewritten = name.replace(weight_name, param_name)
if (
name_rewritten.endswith(".bias")
and name_rewritten not in params_dict
):
continue
if is_pp_missing_parameter(name_rewritten, self):
continue
if name_rewritten not in params_dict:
continue
param = params_dict[name_rewritten]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight, shard_id)
loaded_params.add(name_rewritten)
stacked_matched = True
break
if stacked_matched:
continue
if name.endswith(".bias") and name not in params_dict:
continue
orig_name = name
mapped_name = maybe_remap_kv_scale_name(name, params_dict)
name = mapped_name if mapped_name is not None else orig_name
if name not in params_dict:
continue
param = params_dict[name]
if "attention_sink_bias" in name:
total_heads = loaded_weight.shape[0]
heads_per_rank = total_heads // tp_size
head_start = tp_rank * heads_per_rank
narrow_weight = loaded_weight.narrow(0, head_start, heads_per_rank)
param.data.copy_(narrow_weight)
loaded_params.add(name)
else:
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class MiMoV2FlashForCausalLM(nn.Module, SupportsPP, MixtureOfExperts):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = MiMoV2Model(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
self.model.aux_hidden_state_layers = layers
def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
num_layers = len(self.model.layers)
return (2, num_layers // 2, num_layers - 3)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
return self.model.get_expert_mapping()
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/mimo_v2_flash.py",
"license": "Apache License 2.0",
"lines": 607,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/test_serving_chat_stream_harmony.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for harmony streaming delta extraction.
"""
from dataclasses import dataclass, field
from unittest.mock import patch
import pytest
from vllm.entrypoints.openai.chat_completion.stream_harmony import (
TokenState,
extract_harmony_streaming_delta,
)
@dataclass
class MockMessage:
"""Mock message object for testing."""
channel: str | None = None
recipient: str | None = None
@dataclass
class MockStreamableParser:
"""Mock StreamableParser for testing without openai_harmony dependency."""
messages: list[MockMessage] = field(default_factory=list)
class TestExtractHarmonyStreamingDelta:
"""Tests for extract_harmony_streaming_delta function."""
@pytest.mark.parametrize(
"delta_text,expected_content",
[
("Hello, world!", "Hello, world!"),
("", ""),
],
)
def test_final_channel_returns_content_delta(self, delta_text, expected_content):
"""Test that final channel returns a DeltaMessage with content."""
parser = MockStreamableParser()
# Updated to use TokenState list
token_states = [TokenState(channel="final", recipient=None, text=delta_text)]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=False,
)
assert delta_message is not None
assert delta_message.content == expected_content
assert tools_streamed is False
@pytest.mark.parametrize(
"include_reasoning,expected_has_message",
[
(True, True),
(False, False),
],
)
def test_analysis_channel_reasoning(self, include_reasoning, expected_has_message):
"""Test analysis channel respects include_reasoning flag."""
parser = MockStreamableParser()
text = "Let me think..."
token_states = [TokenState(channel="analysis", recipient=None, text=text)]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=include_reasoning,
)
if expected_has_message:
assert delta_message is not None
assert delta_message.reasoning == text
else:
assert delta_message is None
assert tools_streamed is False
@pytest.mark.parametrize("channel", ["commentary", "analysis"])
@patch("vllm.entrypoints.openai.chat_completion.stream_harmony.make_tool_call_id")
def test_new_tool_call(self, mock_make_tool_call_id, channel):
"""Test new tool call creation when recipient changes."""
mock_make_tool_call_id.return_value = "call_test123"
parser = MockStreamableParser()
token_states = [
TokenState(channel=channel, recipient="functions.get_weather", text="")
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=False,
)
assert delta_message is not None
assert len(delta_message.tool_calls) == 1
tool_call = delta_message.tool_calls[0]
assert tool_call.id == "call_test123"
assert tool_call.type == "function"
assert tool_call.function.name == "get_weather"
assert tool_call.function.arguments == ""
assert tool_call.index == 0
assert tools_streamed is True
@pytest.mark.parametrize("channel", ["commentary", "analysis"])
def test_tool_call_argument_streaming(self, channel):
"""Test streaming tool call arguments (same recipient)."""
parser = MockStreamableParser()
args_text = '{"location": "Paris"}'
token_states = [
TokenState(
channel=channel, recipient="functions.get_weather", text=args_text
)
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient="functions.get_weather",
include_reasoning=False,
)
assert delta_message is not None
tool_call = delta_message.tool_calls[0]
assert tool_call.id is None
assert tool_call.function.arguments == args_text
assert tool_call.index == 0
assert tools_streamed is True
@pytest.mark.parametrize("channel", ["commentary", "analysis"])
def test_tool_call_empty_arguments_returns_none(self, channel):
"""Test empty delta_text with same recipient returns None."""
parser = MockStreamableParser()
token_states = [
TokenState(channel=channel, recipient="functions.get_weather", text="")
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient="functions.get_weather",
include_reasoning=False,
)
assert delta_message is None
assert tools_streamed is False
def test_tool_call_index_from_previous_messages(self):
"""Test tool call index accounts for previous function messages."""
messages = [
MockMessage(channel="analysis", recipient=None), # Not counted
MockMessage(channel="commentary", recipient="functions.tool1"), # Counted
MockMessage(channel="final", recipient=None), # Not counted
]
parser = MockStreamableParser(messages=messages)
token_states = [
TokenState(channel="commentary", recipient="functions.tool2", text="args")
]
delta_message, _ = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient="functions.tool2",
include_reasoning=False,
)
assert delta_message.tool_calls[0].index == 1
def test_returns_preambles_as_content(self):
"""Test that commentary with no recipient (preamble) is user content."""
parser = MockStreamableParser()
delta_text = "some text"
token_states = [
TokenState(channel="commentary", recipient=None, text=delta_text)
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=True,
)
assert delta_message.content == delta_text
assert tools_streamed is False
@pytest.mark.parametrize(
"channel,recipient",
[
(None, None),
("unknown_channel", None),
("commentary", "browser.search"),
],
)
def test_returns_none_for_invalid_inputs(self, channel, recipient):
"""Test that invalid channel/recipient combinations return None."""
parser = MockStreamableParser()
token_states = [
TokenState(channel=channel, recipient=recipient, text="some text")
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=True,
)
assert delta_message is None
assert tools_streamed is False
def test_consecutive_token_grouping(self):
"""
Test that consecutive tokens with the same channel/recipient
are merged into a single processing group.
"""
parser = MockStreamableParser()
token_states = [
TokenState("final", None, "H"),
TokenState("final", None, "el"),
TokenState("final", None, "lo"),
TokenState("final", None, ","),
TokenState("final", None, " World"),
]
delta_message, _ = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=False,
)
assert delta_message is not None
assert delta_message.content == "Hello, World"
@patch("vllm.entrypoints.openai.chat_completion.stream_harmony.make_tool_call_id")
def test_complex_batch_permutation(self, mock_make_id):
"""
Test a complex permutation: Reasoning -> Tool Call -> Content.
This verifies that multiple distinct actions in one batch
are all captured in the single DeltaMessage.
"""
mock_make_id.return_value = "call_batch_test"
parser = MockStreamableParser()
token_states = [
# 1. Reasoning
TokenState("analysis", None, "Reasoning about query..."),
# 2. Tool Calling
TokenState("commentary", "functions.search", '{"query":'),
TokenState("commentary", "functions.search", ' "vllm"}'),
# 3. Final Content
TokenState("final", None, "."),
]
delta_message, tools_streamed = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient=None,
include_reasoning=True,
)
assert delta_message is not None
assert delta_message.reasoning == "Reasoning about query..."
# We expect 2 objects for 1 logical tool call:
# 1. The definition (id, name, type)
# 2. The arguments payload
assert len(delta_message.tool_calls) == 2
header = delta_message.tool_calls[0]
payload = delta_message.tool_calls[1]
assert header.function.name == "search"
assert header.id == "call_batch_test"
assert header.index == 0
assert payload.index == 0
assert payload.function.arguments == '{"query": "vllm"}'
assert delta_message.content == "."
assert tools_streamed is True
@patch("vllm.entrypoints.openai.chat_completion.stream_harmony.make_tool_call_id")
def test_tool_call_index_consistency_with_ongoing_call(self, mock_make_id):
"""
Test that an ongoing tool call continuation and subsequent new calls
maintain correct indexing when interleaved with content.
"""
mock_make_id.side_effect = ["id_b", "id_c"]
messages = [
MockMessage(channel="commentary", recipient="functions.previous_tool")
]
parser = MockStreamableParser(messages=messages)
token_states = [
TokenState("commentary", "functions.tool_a", '{"key_a": "val_a"}'),
TokenState("final", None, "Thinking..."),
TokenState("commentary", "functions.tool_b", '{"key_b": "val_b"}'),
TokenState("final", None, " Thinking again..."),
TokenState("commentary", "functions.tool_c", '{"key_c": "val_c"}'),
]
delta_message, _ = extract_harmony_streaming_delta(
harmony_parser=parser,
token_states=token_states,
prev_recipient="functions.tool_a",
include_reasoning=False,
)
assert delta_message is not None
tool_a_deltas = [t for t in delta_message.tool_calls if t.index == 1]
assert len(tool_a_deltas) > 0
assert tool_a_deltas[0].id is None
assert tool_a_deltas[0].function.arguments == '{"key_a": "val_a"}'
tool_b_header = next(t for t in delta_message.tool_calls if t.id == "id_b")
assert tool_b_header.index == 2
tool_b_args = next(
t for t in delta_message.tool_calls if t.index == 2 and t.id is None
)
assert tool_b_args.function.arguments == '{"key_b": "val_b"}'
tool_c_start = next(t for t in delta_message.tool_calls if t.id == "id_c")
assert tool_c_start.index == 3
tool_c_args = next(
t for t in delta_message.tool_calls if t.index == 3 and t.id is None
)
assert tool_c_args.function.arguments == '{"key_c": "val_c"}'
assert delta_message.content == "Thinking... Thinking again..."
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_serving_chat_stream_harmony.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/openai/test_embedding_shape_validation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Embedding shape validation in multimodal APIs.
Tests verify that embeddings with correct ndim but incorrect hidden_size
are rejected before they can cause crashes during model inference.
Validation is performed by the parser (MultiModalDataParser) and EmbeddingItems
classes, not by MediaIO classes.
"""
import pytest
import torch
from vllm.multimodal.parse import (
AudioEmbeddingItems,
ImageEmbeddingItems,
MultiModalDataParser,
VideoEmbeddingItems,
)
class TestMultiModalParserShapeValidation:
"""Test hidden_size validation in MultiModalDataParser."""
def test_image_embeddings_correct_hidden_size_accepted(self):
"""Baseline: Image embeddings with correct hidden_size should work."""
expected_hidden_size = 768
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
valid_embeds = torch.randn(2, 100, expected_hidden_size)
result = parser.parse_mm_data({"image": valid_embeds})
assert "image" in result
assert isinstance(result["image"], ImageEmbeddingItems)
assert result["image"].get_count() == 2
def test_image_embeddings_wrong_hidden_size_rejected(self):
"""Security: Image embeddings with wrong hidden_size should be rejected."""
expected_hidden_size = 768
wrong_hidden_size = 4096
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
invalid_embeds = torch.randn(2, 100, wrong_hidden_size)
with pytest.raises(ValueError) as exc_info:
parser.parse_mm_data({"image": invalid_embeds})
error_msg = str(exc_info.value).lower()
assert "image" in error_msg
assert "hidden dimension mismatch" in error_msg
def test_audio_embeddings_wrong_hidden_size_rejected(self):
"""Security: Audio embeddings with wrong hidden_size should be rejected."""
expected_hidden_size = 768
wrong_hidden_size = 2048
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
invalid_embeds = torch.randn(2, 100, wrong_hidden_size)
with pytest.raises(ValueError) as exc_info:
parser.parse_mm_data({"audio": invalid_embeds})
error_msg = str(exc_info.value).lower()
assert "audio" in error_msg
assert "hidden dimension mismatch" in error_msg
def test_video_embeddings_wrong_hidden_size_rejected(self):
"""Security: Video embeddings with wrong hidden_size should be rejected."""
expected_hidden_size = 768
wrong_hidden_size = 512
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
invalid_embeds = torch.randn(2, 100, wrong_hidden_size)
with pytest.raises(ValueError) as exc_info:
parser.parse_mm_data({"video": invalid_embeds})
error_msg = str(exc_info.value).lower()
assert "video" in error_msg
assert "hidden dimension mismatch" in error_msg
def test_list_of_embeddings_validates_each(self):
"""Security: Each embedding in list should be validated."""
expected_hidden_size = 768
wrong_hidden_size = 1024
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
# List with second tensor having wrong hidden_size
invalid_embeds = [
torch.randn(100, expected_hidden_size),
torch.randn(100, wrong_hidden_size),
]
with pytest.raises(ValueError) as exc_info:
parser.parse_mm_data({"image": invalid_embeds})
# Should identify which embedding failed
assert "[1]" in str(exc_info.value)
def test_validation_disabled_allows_any_size(self):
"""When validation disabled (legacy), any hidden_size allowed."""
parser = MultiModalDataParser(expected_hidden_size=None)
any_hidden_size = 12345
embeds = torch.randn(2, 100, any_hidden_size)
# Should not raise
result = parser.parse_mm_data({"image": embeds})
assert "image" in result
assert isinstance(result["image"], ImageEmbeddingItems)
class TestEmbeddingItemsDirectValidation:
"""Direct tests for EmbeddingItems hidden_size validation."""
def test_image_embedding_items_validates_batched_tensor(self):
"""Test validation for batched (3D) image embeddings."""
expected = 768
wrong = 1024
# Valid
valid = torch.randn(2, 100, expected)
items = ImageEmbeddingItems(valid, expected_hidden_size=expected)
assert items.get_count() == 2
# Invalid
invalid = torch.randn(2, 100, wrong)
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(invalid, expected_hidden_size=expected)
assert str(wrong) in str(exc_info.value)
assert str(expected) in str(exc_info.value)
def test_image_embedding_items_validates_list_of_tensors(self):
"""Test validation for list of 2D image embeddings."""
expected = 768
wrong = 512
# Valid list
valid_list = [torch.randn(100, expected), torch.randn(50, expected)]
items = ImageEmbeddingItems(valid_list, expected_hidden_size=expected)
assert items.get_count() == 2
# Invalid list
invalid_list = [torch.randn(100, expected), torch.randn(50, wrong)]
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(invalid_list, expected_hidden_size=expected)
assert "[1]" in str(exc_info.value)
def test_audio_embedding_items_validates(self):
"""Test validation for audio embeddings."""
expected = 768
wrong = 256
invalid = torch.randn(2, 100, wrong)
with pytest.raises(ValueError) as exc_info:
AudioEmbeddingItems(invalid, expected_hidden_size=expected)
assert "audio" in str(exc_info.value).lower()
def test_video_embedding_items_validates(self):
"""Test validation for video embeddings."""
expected = 768
wrong = 384
invalid = torch.randn(2, 100, wrong)
with pytest.raises(ValueError) as exc_info:
VideoEmbeddingItems(invalid, expected_hidden_size=expected)
assert "video" in str(exc_info.value).lower()
class TestShapeValidationIntegration:
"""Integration tests verifying attack scenarios are blocked."""
def test_attack_scenario_multimodal_image(self):
"""
Simulate attack through Chat API with image embeddings.
Verifies validation occurs in multimodal parser path.
"""
expected_hidden_size = 768
wrong_hidden_size = 4096
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
attack_tensor = torch.randn(1, 100, wrong_hidden_size)
with pytest.raises(ValueError):
parser.parse_mm_data({"image": attack_tensor})
def test_attack_scenario_multimodal_audio(self):
"""
Simulate attack through Chat API with audio embeddings.
Verifies validation occurs in multimodal parser path.
"""
expected_hidden_size = 768
wrong_hidden_size = 2048
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
attack_tensor = torch.randn(1, 100, wrong_hidden_size)
with pytest.raises(ValueError):
parser.parse_mm_data({"audio": attack_tensor})
def test_attack_scenario_multimodal_video(self):
"""
Simulate attack through Chat API with video embeddings.
Verifies validation occurs in multimodal parser path.
"""
expected_hidden_size = 768
wrong_hidden_size = 1024
parser = MultiModalDataParser(expected_hidden_size=expected_hidden_size)
attack_tensor = torch.randn(1, 100, wrong_hidden_size)
with pytest.raises(ValueError):
parser.parse_mm_data({"video": attack_tensor})
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_embedding_shape_validation.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/multimodal/test_embedding_shape_validation_unit.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for embedding shape validation.
Simple, fast unit tests that can run without server fixtures.
Run with: pytest tests/multimodal/test_embedding_shape_validation_unit.py -v
"""
import pytest
import torch
from vllm.multimodal.parse import (
AudioEmbeddingItems,
ImageEmbeddingItems,
)
class TestImageEmbedBasicValidation:
"""Test basic ndim validation in image embeddings via ImageEmbeddingItems."""
def test_valid_2d_tensor_accepted(self):
"""Baseline: 2D tensors should be accepted."""
valid_tensor = torch.randn(10, 768, dtype=torch.float32)
# Should not raise - 2D is valid
items = ImageEmbeddingItems(valid_tensor)
assert items.get_count() == 10
def test_valid_3d_tensor_accepted(self):
"""Baseline: 3D tensors should be accepted."""
valid_tensor = torch.randn(2, 10, 768, dtype=torch.float32)
# Should not raise - 3D is valid
items = ImageEmbeddingItems(valid_tensor)
assert items.get_count() == 2
def test_valid_list_of_2d_tensors_accepted(self):
"""Baseline: List of 2D tensors should be accepted."""
tensors = [
torch.randn(10, 768, dtype=torch.float32),
torch.randn(15, 768, dtype=torch.float32),
]
# Should not raise
items = ImageEmbeddingItems(tensors)
assert items.get_count() == 2
def test_1d_tensor_rejected(self):
"""Security: 1D tensors should be rejected (invalid ndim)."""
invalid_tensor = torch.randn(768, dtype=torch.float32) # 1D
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(invalid_tensor)
assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value)
def test_4d_tensor_rejected(self):
"""Security: 4D tensors should be rejected (invalid ndim)."""
invalid_tensor = torch.randn(1, 2, 10, 768, dtype=torch.float32) # 4D
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(invalid_tensor)
assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value)
def test_hidden_size_validation_correct_size(self):
"""Embeddings with correct hidden size should be accepted."""
expected_hidden_size = 768
valid_tensor = torch.randn(10, expected_hidden_size, dtype=torch.float32)
# Should not raise
items = ImageEmbeddingItems(
valid_tensor, expected_hidden_size=expected_hidden_size
)
assert items.get_count() == 10
def test_hidden_size_validation_wrong_size_rejected(self):
"""Embeddings with wrong hidden size should be rejected."""
expected_hidden_size = 768
wrong_hidden_size = 4096
invalid_tensor = torch.randn(10, wrong_hidden_size, dtype=torch.float32)
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(
invalid_tensor, expected_hidden_size=expected_hidden_size
)
error_msg = str(exc_info.value)
assert "hidden dimension mismatch" in error_msg.lower()
assert str(wrong_hidden_size) in error_msg
assert str(expected_hidden_size) in error_msg
class TestAudioEmbedBasicValidation:
"""Test basic ndim validation in audio embeddings via AudioEmbeddingItems."""
def test_valid_2d_tensor_accepted(self):
"""Baseline: 2D tensors should be accepted."""
valid_tensor = torch.randn(10, 768, dtype=torch.float32)
# Should not raise - 2D is valid
items = AudioEmbeddingItems(valid_tensor)
assert items.get_count() == 10
def test_valid_3d_tensor_accepted(self):
"""Baseline: 3D tensors should be accepted."""
valid_tensor = torch.randn(2, 10, 768, dtype=torch.float32)
# Should not raise - 3D is valid
items = AudioEmbeddingItems(valid_tensor)
assert items.get_count() == 2
def test_valid_list_of_2d_tensors_accepted(self):
"""Baseline: List of 2D tensors should be accepted."""
tensors = [
torch.randn(10, 768, dtype=torch.float32),
torch.randn(15, 768, dtype=torch.float32),
]
# Should not raise
items = AudioEmbeddingItems(tensors)
assert items.get_count() == 2
def test_1d_tensor_rejected(self):
"""Security: 1D tensors should be rejected (invalid ndim)."""
invalid_tensor = torch.randn(768, dtype=torch.float32) # 1D
with pytest.raises(ValueError) as exc_info:
AudioEmbeddingItems(invalid_tensor)
assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value)
def test_scalar_rejected(self):
"""Security: Scalar tensors should be rejected."""
invalid_tensor = torch.tensor(1.0) # 0D (scalar)
with pytest.raises(ValueError):
AudioEmbeddingItems(invalid_tensor)
def test_hidden_size_validation_correct_size(self):
"""Embeddings with correct hidden size should be accepted."""
expected_hidden_size = 768
valid_tensor = torch.randn(10, expected_hidden_size, dtype=torch.float32)
# Should not raise
items = AudioEmbeddingItems(
valid_tensor, expected_hidden_size=expected_hidden_size
)
assert items.get_count() == 10
def test_hidden_size_validation_wrong_size_rejected(self):
"""Embeddings with wrong hidden size should be rejected."""
expected_hidden_size = 768
wrong_hidden_size = 4096
invalid_tensor = torch.randn(10, wrong_hidden_size, dtype=torch.float32)
with pytest.raises(ValueError) as exc_info:
AudioEmbeddingItems(
invalid_tensor, expected_hidden_size=expected_hidden_size
)
error_msg = str(exc_info.value)
assert "hidden dimension mismatch" in error_msg.lower()
assert str(wrong_hidden_size) in error_msg
assert str(expected_hidden_size) in error_msg
class TestShapeValidationDoSPrevention:
"""
Tests for DoS prevention through shape validation.
Verifies that embeddings with incorrect shapes are rejected early,
preventing crashes during model inference.
"""
def test_prevent_crash_from_wrong_shape_image_embeds(self):
"""
Prevent crash scenario: wrong hidden size in image embeddings.
Without validation, this would pass initial checks but crash later
during model forward pass when dimensions don't match.
"""
expected_hidden_size = 768 # Typical model hidden size
wrong_hidden_size = 4096 # Wrong size (e.g., Llama-sized)
wrong_embedding = torch.randn(100, wrong_hidden_size, dtype=torch.float32)
# Should be rejected at instantiation time, not during inference
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(
wrong_embedding, expected_hidden_size=expected_hidden_size
)
error_msg = str(exc_info.value)
assert "hidden dimension mismatch" in error_msg.lower()
assert str(expected_hidden_size) in error_msg # Expected
assert str(wrong_hidden_size) in error_msg # Received
def test_prevent_crash_from_wrong_shape_audio_embeds(self):
"""
Prevent crash scenario: wrong hidden size in audio embeddings.
"""
expected_hidden_size = 768
wrong_hidden_size = 4096
wrong_embedding = torch.randn(100, wrong_hidden_size, dtype=torch.float32)
with pytest.raises(ValueError) as exc_info:
AudioEmbeddingItems(
wrong_embedding, expected_hidden_size=expected_hidden_size
)
error_msg = str(exc_info.value)
assert "hidden dimension mismatch" in error_msg.lower()
def test_extremely_large_hidden_size_rejected(self):
"""Security: Prevent DoS from extremely large embeddings."""
expected_hidden_size = 768
huge_hidden_size = 100000 # Large but not extreme to avoid test OOM
invalid_tensor = torch.randn(10, huge_hidden_size, dtype=torch.float32)
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(
invalid_tensor, expected_hidden_size=expected_hidden_size
)
assert "hidden dimension mismatch" in str(exc_info.value).lower()
def test_batch_with_mixed_hidden_sizes_rejected(self):
"""All embeddings in a list must have the same hidden size."""
expected_hidden_size = 768
# One correct, one wrong
batch = [
torch.randn(10, expected_hidden_size, dtype=torch.float32),
torch.randn(10, expected_hidden_size + 100, dtype=torch.float32), # Wrong!
]
# Should fail on the second one
with pytest.raises(ValueError) as exc_info:
ImageEmbeddingItems(batch, expected_hidden_size=expected_hidden_size)
assert "hidden dimension mismatch" in str(exc_info.value).lower()
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/test_embedding_shape_validation_unit.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/engine/test_preprocess_error_handling.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch.cuda
from vllm import LLM, SamplingParams
from vllm.platforms import current_platform
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.core import EngineCore
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
def test_preprocess_error_handling(monkeypatch: pytest.MonkeyPatch):
"""Test that preprocessing errors are handled gracefully."""
if current_platform.is_rocm() or current_platform.is_xpu():
pytest.skip(
"Skipped on ROCm/XPU: this test only works with 'fork', "
"but ROCm/XPU uses 'spawn'."
)
assert not torch.cuda.is_initialized(), (
"fork needs to be used for the engine "
"core process and this isn't possible if cuda is already initialized"
)
# Store original method to call for non-failing requests
original_preprocess = EngineCore.preprocess_add_request
# Monkeypatch to make preprocess_add_request raise an exception
# only for requests with "FAIL" in the first token
def conditional_failing_preprocess(self, request: EngineCoreRequest):
# Fail if the first token id is 333
if request.prompt_token_ids and request.prompt_token_ids[0] == 333:
raise ValueError("Simulated preprocessing error!")
return original_preprocess(self, request)
monkeypatch.setattr(
EngineCore, "preprocess_add_request", conditional_failing_preprocess
)
llm = LLM(model=MODEL_NAME)
# Create a failing request by crafting a request with an invalid token
# We need to use a direct approach since LLM.generate tokenizes for us
from vllm.inputs import TokensPrompt
# This should raise an exception due to the preprocessing failure
# Special token id to trigger the failure
failing_prompt = TokensPrompt(prompt_token_ids=[333])
outputs = llm.generate(failing_prompt, SamplingParams(max_tokens=10)) # type: ignore
assert len(outputs) == 1
assert len(outputs[0].outputs[0].token_ids) == 0
assert outputs[0].finished
assert outputs[0].outputs[0].finish_reason == "error"
# Verify the engine is still functional with a normal request
outputs = llm.generate("Hello, my name is", SamplingParams(max_tokens=10))
assert len(outputs) == 1
assert len(outputs[0].outputs[0].token_ids) > 0
assert outputs[0].outputs[0].finish_reason in ("stop", "length")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/engine/test_preprocess_error_handling.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/jais2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only Jais2 model compatible with HuggingFace weights."""
from collections.abc import Iterable
import torch
from torch import nn
from transformers import Jais2Config
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import (
get_pp_group,
get_tensor_model_parallel_world_size,
)
from vllm.model_executor.layers.activation import ReLUSquaredActivation
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class Jais2MLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.up_proj = ColumnParallelLinear(
input_size=hidden_size,
output_size=intermediate_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.up_proj",
)
self.down_proj = RowParallelLinear(
input_size=intermediate_size,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
self.act_fn = ReLUSquaredActivation()
def forward(self, x):
x, _ = self.up_proj(x)
x = self.act_fn(x)
x, _ = self.down_proj(x)
return x
class Jais2Attention(nn.Module):
def __init__(
self,
config: Jais2Config,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position_embeddings: int = 8192,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
cache_config: CacheConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
# MistralConfig has an optional head_dim introduced by Mistral-Nemo
self.head_dim = getattr(
config, "head_dim", self.hidden_size // self.total_num_heads
)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.qkv_proj = QKVParallelLinear(
hidden_size=hidden_size,
head_size=self.head_dim,
total_num_heads=self.total_num_heads,
total_num_kv_heads=self.total_num_kv_heads,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
input_size=self.total_num_heads * self.head_dim,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
is_neox_style = True
if quant_config is not None and quant_config.get_name() == "gguf":
is_neox_style = False
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position_embeddings,
rope_parameters=getattr(config, "rope_parameters", None),
is_neox_style=is_neox_style,
)
if hasattr(config, "interleaved_sliding_window"):
interleaved_sliding_window = config.interleaved_sliding_window
if isinstance(interleaved_sliding_window, int):
sliding_window = interleaved_sliding_window
elif isinstance(interleaved_sliding_window, list):
sw_idx = layer_idx % len(interleaved_sliding_window)
sliding_window = interleaved_sliding_window[sw_idx]
else:
raise ValueError(
f"{type(interleaved_sliding_window)} is not supported."
)
else:
sliding_window = None
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=sliding_window,
prefix=f"{prefix}.attn",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class Jais2DecoderLayer(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
config: Jais2Config,
prefix: str = "",
) -> None:
super().__init__()
config = config or vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = self.get_quant_config(vllm_config)
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
# Support abacusai/Smaug-72B-v0.1 with attention_bias
# Support internlm/internlm-7b with bias
attention_bias = getattr(config, "attention_bias", False) or getattr(
config, "bias", False
)
self.self_attn = Jais2Attention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=getattr(
config, "num_key_value_heads", config.num_attention_heads
),
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
bias=attention_bias,
cache_config=cache_config,
prefix=f"{prefix}.self_attn",
)
self.mlp = Jais2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
bias=getattr(config, "mlp_bias", False),
prefix=f"{prefix}.mlp",
)
self.input_layernorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps
)
self.post_attention_layernorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = (
self.input_layernorm(hidden_states + residual),
hidden_states + residual,
)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
# Fully Connected
hidden_states, residual = (
self.post_attention_layernorm(hidden_states + residual),
hidden_states + residual,
)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
def get_quant_config(self, vllm_config: VllmConfig) -> QuantizationConfig | None:
"""Get quantization config for this layer. Override in subclasses."""
return vllm_config.quant_config
@support_torch_compile
class Jais2Model(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
layer_type: type[nn.Module] = Jais2DecoderLayer,
):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.org_vocab_size = config.vocab_size
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
quant_config=quant_config,
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: layer_type(
config=config,
vllm_config=vllm_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for i in range(self.start_layer, self.end_layer):
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states + residual), residual
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache scales for compressed-tensors quantization
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = loaded_weight[0]
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
if "scale" in name:
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class Jais2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
}
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.model = self._init_model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
logit_scale = getattr(config, "logit_scale", 1.0)
self.logits_processor = LogitsProcessor(
config.vocab_size, scale=logit_scale
)
else:
self.lm_head = PPMissingLayer()
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def _init_model(self, vllm_config: VllmConfig, prefix: str = ""):
return Jais2Model(vllm_config=vllm_config, prefix=prefix)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
model_output = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return model_output
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/jais2.py",
"license": "Apache License 2.0",
"lines": 454,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_cpu_fused_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from tests.kernels.allclose_default import get_default_atol, get_default_rtol
from vllm._custom_ops import cpu_fused_moe, cpu_prepack_moe_weight
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.cpu_fused_moe import _CPU_MOE_ACT_FN
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
if not current_platform.is_cpu():
pytest.skip("skipping CPU-only tests", allow_module_level=True)
EXPERT_NUM = [
8,
]
HIDDEN_DIM = [128, 2880]
INTERMEDIATE_DIM = [128, 2880]
BATCH_SIZE = [1, 64, 256]
ACT = [MoEActivation.SILU, MoEActivation.SWIGLUOAI]
USE_BIAS = [True, False]
ISA = ["amx", "vec"] if torch._C._cpu._is_amx_tile_supported() else ["vec"]
DTYPE = [torch.bfloat16]
def ref_fused_moe(
input: torch.Tensor,
w13: torch.Tensor,
w2: torch.Tensor,
w13_bias: torch.Tensor | None,
w2_bias: torch.Tensor | None,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
) -> torch.Tensor:
len_experts = w13.size(0)
cnts = topk_ids.new_zeros((topk_ids.shape[0], len_experts))
cnts.scatter_(1, topk_ids.to(torch.int64), 1)
tokens_per_expert = cnts.sum(dim=0)
idxs = topk_ids.view(-1).argsort()
sorted_tokens = input[idxs // topk_ids.shape[1]]
tokens_per_expert = tokens_per_expert.cpu().numpy()
outputs = []
start_idx = 0
for i, num_tokens in enumerate(tokens_per_expert):
end_idx = start_idx + num_tokens
if num_tokens == 0:
continue
tokens_for_this_expert = sorted_tokens[start_idx:end_idx].float()
curr_w13 = w13[i].float()
curr_w2 = w2[i].float()
curr_w13_bias = None
if w13_bias is not None:
curr_w13_bias = w13_bias[i].float()
curr_w2_bias = None
if w2_bias is not None:
curr_w2_bias = w2_bias[i].float()
gate_up = torch.nn.functional.linear(
tokens_for_this_expert, curr_w13, curr_w13_bias
)
# Note: to simulate the kernel implementation
gate_up = _CPU_MOE_ACT_FN[activation](gate_up).to(dtype=input.dtype).float()
expert_out = torch.nn.functional.linear(gate_up, curr_w2, curr_w2_bias)
outputs.append(expert_out)
start_idx = end_idx
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
new_x = torch.empty_like(outs)
new_x[idxs] = outs
final_out = (
new_x.view(*topk_ids.shape, -1)
.mul_(topk_weights.unsqueeze(dim=-1))
.sum(dim=1)
.type(input.dtype)
)
return final_out
@pytest.mark.parametrize("batch_size", BATCH_SIZE)
@pytest.mark.parametrize("expert_num", EXPERT_NUM)
@pytest.mark.parametrize("hidden_size", HIDDEN_DIM)
@pytest.mark.parametrize("intermediate_size", INTERMEDIATE_DIM)
@pytest.mark.parametrize("use_bias", USE_BIAS)
@pytest.mark.parametrize("dtype", DTYPE)
@pytest.mark.parametrize("act", ACT)
@pytest.mark.parametrize("isa", ISA)
def test_cpu_fused_moe(
default_vllm_config,
batch_size: int,
expert_num: int,
hidden_size: int,
intermediate_size: int,
use_bias: bool,
dtype: torch.dtype,
act: MoEActivation,
isa: str,
):
set_random_seed(0)
topk_num = max(expert_num // 2, 1)
up_dim = 2 * intermediate_size
input = torch.randn((batch_size, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
w13 = torch.randn((expert_num, up_dim, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
w2 = torch.randn((expert_num, hidden_size, intermediate_size), dtype=dtype) / (
0.5 * intermediate_size**0.5
)
router_logits = torch.randn((batch_size, expert_num), dtype=dtype)
w13_bias = None
w2_bias = None
if use_bias:
w13_bias = torch.randn((expert_num, up_dim), dtype=dtype) / (0.5 * up_dim**0.5)
w2_bias = torch.randn((expert_num, hidden_size), dtype=dtype) / (
0.5 * hidden_size**0.5
)
score = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
topk_weight, topk_ids = torch.topk(score, topk_num)
topk_ids = topk_ids.to(torch.int32)
ref_output = ref_fused_moe(
input,
w13,
w2,
w13_bias,
w2_bias,
topk_weight,
topk_ids,
act,
)
packed_w13 = cpu_prepack_moe_weight(w13, isa)
packed_w2 = cpu_prepack_moe_weight(w2, isa)
output = cpu_fused_moe(
input,
packed_w13,
packed_w2,
w13_bias,
w2_bias,
topk_weight,
topk_ids,
act.value,
isa,
)
atol, rtol = get_default_atol(output), get_default_rtol(output)
(
torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol),
f"{torch.max(torch.abs(output - ref_output))}",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_cpu_fused_moe.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/metrics/test_perf_metrics.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for the analytic estimators in metrics/flops.py.
"""
import types
from types import SimpleNamespace
from transformers.models.deepseek_v3.configuration_deepseek_v3 import DeepseekV3Config
from transformers.models.llama4.configuration_llama4 import (
Llama4Config,
Llama4TextConfig,
)
from transformers.models.qwen3.configuration_qwen3 import Qwen3Config
from transformers.models.qwen3_moe.configuration_qwen3_moe import Qwen3MoeConfig
from vllm.config.model import ModelConfig, get_hf_text_config
from vllm.transformers_utils.model_arch_config_convertor import (
MODEL_ARCH_CONFIG_CONVERTORS,
ModelArchConfigConvertorBase,
)
from vllm.v1.metrics.perf import (
AttentionMetrics,
BaseConfigParser,
ExecutionContext,
FfnMetrics,
ModelMetrics,
ParsedArgs,
UnembedMetrics,
)
class MockModelConfig:
"""Mock ModelConfig that implements the getter methods used by parsers."""
def __init__(self, hf_config, dtype):
self.hf_config = hf_config
self.hf_text_config = get_hf_text_config(hf_config)
convertor_cls = MODEL_ARCH_CONFIG_CONVERTORS.get(
self.hf_config.model_type, ModelArchConfigConvertorBase
)
self.model_arch_config = convertor_cls(
self.hf_config, self.hf_text_config
).convert()
self.dtype = dtype
self.is_attention_free = False
def __getattr__(self, name):
# 1. Check if ModelConfig actually has this attribute
if not hasattr(ModelConfig, name):
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}' "
f"and neither does 'ModelConfig'."
)
# 2. Fetch the attribute from the ModelConfig CLASS
attr = getattr(ModelConfig, name)
# 3. Case A: It is a @property
if isinstance(attr, property):
# Manually invoke the property's getter, passing 'self' (this mock instance)
return attr.__get__(self, self.__class__)
# 4. Case B: It is a standard method (function)
if isinstance(attr, types.FunctionType):
# Bind the function to 'self' so it acts like a method of
# this instance. This creates a bound method where 'self' is
# automatically passed as the first arg.
return types.MethodType(attr, self)
# 5. Case C: It is a class attribute / static variable
return attr
def create_mock_vllm_config(
hf_config,
model_dtype="bfloat16",
cache_dtype="auto",
quant_config=None,
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
enable_expert_parallel=False,
) -> SimpleNamespace:
vllm_config = SimpleNamespace()
vllm_config.model_config = MockModelConfig(hf_config, model_dtype)
vllm_config.cache_config = SimpleNamespace()
vllm_config.cache_config.cache_dtype = cache_dtype
vllm_config.quant_config = quant_config
vllm_config.parallel_config = SimpleNamespace()
vllm_config.parallel_config.data_parallel_size = data_parallel_size
vllm_config.parallel_config.tensor_parallel_size = tensor_parallel_size
vllm_config.parallel_config.pipeline_parallel_size = pipeline_parallel_size
vllm_config.parallel_config.enable_expert_parallel = enable_expert_parallel
return vllm_config
#### Parser Tests ####
def test_base_config_parser():
"""Test BaseConfigParser extracts base model attributes correctly."""
hf_config = Qwen3Config(
vocab_size=50000,
hidden_size=2048,
num_attention_heads=16,
num_hidden_layers=24,
)
vllm_config = create_mock_vllm_config(hf_config, model_dtype="float16")
parser = BaseConfigParser()
args = ParsedArgs()
result = parser.parse(args, vllm_config)
assert result.vocab_size == 50000
assert result.hidden_size == 2048
assert result.num_attention_heads == 16
assert result.num_hidden_layers == 24
assert result.weight_byte_size == 2 # float16 is 2 bytes
assert result.activation_byte_size == 2 # default activation size
def test_base_attention_config_parser_with_gqa():
"""Test BaseAttentionConfigParser with grouped query attention."""
hf_config = Qwen3Config(
hidden_size=4096,
num_attention_heads=32,
num_key_value_heads=8, # GQA with 4:1 ratio
head_dim=128,
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = AttentionMetrics.get_parser()
result = parser_chain.parse(vllm_config)
assert result.num_key_value_heads == 8
assert result.head_dim == 128
def test_base_attention_config_parser_without_gqa():
"""
Test BaseAttentionConfigParser defaults to MHA when num_key_value_heads not
specified.
"""
hf_config = Qwen3Config(
hidden_size=4096,
num_attention_heads=32,
# No num_key_value_heads specified
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = AttentionMetrics.get_parser()
result = parser_chain.parse(vllm_config)
# Should default to MHA (num_key_value_heads = num_attention_heads)
assert result.num_key_value_heads == 32
def test_base_ffn_config_parser_dense():
"""Test BaseFfnConfigParser for dense FFN."""
hf_config = Qwen3Config(
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = FfnMetrics.get_parser()
result = parser_chain.parse(vllm_config)
assert result.intermediate_size == 11008
assert result.num_experts == 0
assert result.num_experts_per_tok == 0
assert result.num_moe_layers == 0 # No MoE
def test_base_ffn_config_parser_moe():
"""Test BaseFfnConfigParser for MoE FFN."""
hf_config = Qwen3MoeConfig(
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_experts=64,
num_experts_per_tok=8,
moe_intermediate_size=14336,
n_shared_experts=2,
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = FfnMetrics.get_parser()
result = parser_chain.parse(vllm_config)
assert result.num_experts == 64
assert result.num_experts_per_tok == 8
assert result.moe_intermediate_size == 14336
assert result.num_shared_experts == 2
assert result.num_moe_layers == 32 # All layers are MoE by default
def test_interleave_moe_layer_step_parser():
"""Test InterleaveMoeLayerStepParser correctly computes MoE layer count."""
hf_config = Llama4Config(
text_config=Llama4TextConfig(
num_hidden_layers=32,
num_local_experts=64,
interleave_moe_layer_step=4, # Every 4th layer is MoE
),
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = FfnMetrics.get_parser()
result = parser_chain.parse(vllm_config)
assert result.num_moe_layers == 8
def test_moe_layer_freq_parser():
"""Test MoeLayerFreqParser correctly computes MoE layer count."""
hf_config = DeepseekV3Config(
num_hidden_layers=30,
n_routed_experts=64,
moe_layer_freq=3, # Every 3rd layer after first_k_dense_replace
first_k_dense_replace=6, # First 6 layers are dense
)
vllm_config = create_mock_vllm_config(hf_config)
parser_chain = FfnMetrics.get_parser()
result = parser_chain.parse(vllm_config)
# Layers >= 6 and divisible by 3: 6, 9, 12, 15, 18, 21, 24, 27
expected_moe_layers = len(
[layer for layer in range(30) if layer >= 6 and layer % 3 == 0]
)
assert expected_moe_layers == 8
assert result.num_moe_layers == expected_moe_layers
#### ComponentMetrics Tests ####
def test_attention_metrics_scaling():
"""Test that attention metrics scale proportionally with model dimensions."""
base_hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_key_value_heads=16,
num_hidden_layers=12,
head_dim=128,
)
base_vllm_config = create_mock_vllm_config(base_hf_config)
base_metrics = AttentionMetrics.from_vllm_config(base_vllm_config)
# Test scaling with number of layers
double_layers_hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_key_value_heads=16,
num_hidden_layers=24, # Double the layers
head_dim=128,
)
double_layers_vllm_config = create_mock_vllm_config(double_layers_hf_config)
double_layers_metrics = AttentionMetrics.from_vllm_config(double_layers_vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# FLOPS should double when layers double
base_flops = base_metrics.get_num_flops(ctx)
double_flops = double_layers_metrics.get_num_flops(ctx)
assert double_flops == 2 * base_flops
# Read/write bytes should also scale proportionally
base_read = base_metrics.get_read_bytes(ctx)
double_read = double_layers_metrics.get_read_bytes(ctx)
assert double_read == 2 * base_read
base_write = base_metrics.get_write_bytes(ctx)
double_write = double_layers_metrics.get_write_bytes(ctx)
assert double_write == 2 * base_write
def test_attention_metrics_grouped_query():
"""Test attention metrics handle grouped query attention correctly."""
mha_hf_config = Qwen3Config(
hidden_size=4096,
num_attention_heads=32,
num_key_value_heads=32, # MHA
num_hidden_layers=1,
)
mha_config = create_mock_vllm_config(mha_hf_config)
gqa_hf_config = Qwen3Config(
hidden_size=4096,
num_attention_heads=32,
num_key_value_heads=8, # GQA with 4:1 ratio
num_hidden_layers=1,
)
gqa_config = create_mock_vllm_config(gqa_hf_config)
mha_metrics = AttentionMetrics.from_vllm_config(mha_config)
gqa_metrics = AttentionMetrics.from_vllm_config(gqa_config)
ctx = ExecutionContext.from_single_request(
num_tokens=1, context_len=1024, is_prefill=False
)
# GQA should have less KV cache reads since fewer KV heads
mha_read = mha_metrics.get_read_bytes(ctx)
gqa_read = gqa_metrics.get_read_bytes(ctx)
assert gqa_read < mha_read
def test_ffn_metrics_scaling():
"""Test FFN metrics scale proportionally with model dimensions."""
base_hf_config = Qwen3Config(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
)
base_vllm_config = create_mock_vllm_config(base_hf_config)
base_metrics = FfnMetrics.from_vllm_config(base_vllm_config)
# Test scaling with intermediate size
larger_ffn_hf_config = Qwen3Config(
hidden_size=2048,
intermediate_size=16384, # Double intermediate size
num_hidden_layers=12,
)
larger_ffn_vllm_config = create_mock_vllm_config(larger_ffn_hf_config)
larger_ffn_metrics = FfnMetrics.from_vllm_config(larger_ffn_vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# FLOPS should double when intermediate size doubles
base_flops = base_metrics.get_num_flops(ctx)
larger_flops = larger_ffn_metrics.get_num_flops(ctx)
assert larger_flops == base_flops * 2
def test_moe_metrics_vs_dense():
"""Test MoE metrics versus dense metrics."""
dense_hf_config = Qwen3Config(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
)
dense_config = create_mock_vllm_config(dense_hf_config)
moe_hf_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
num_experts=64,
num_experts_per_tok=2, # 2 routed expert
moe_intermediate_size=8192,
n_shared_experts=0,
)
moe_config = create_mock_vllm_config(moe_hf_config)
dense_metrics = FfnMetrics.from_vllm_config(dense_config)
moe_metrics = FfnMetrics.from_vllm_config(moe_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# MoE should have different compute/memory characteristics
dense_flops = dense_metrics.get_num_flops(ctx)
moe_flops = moe_metrics.get_num_flops(ctx)
# 2 routed experts vs 1 dense.
assert moe_flops == dense_flops * 2
def test_unembed_metrics_scaling():
"""Test unembedding metrics scale with vocab size."""
small_vocab_hf_config = Qwen3Config(
hidden_size=2048,
vocab_size=32000,
)
small_vocab_config = create_mock_vllm_config(small_vocab_hf_config)
large_vocab_hf_config = Qwen3Config(
hidden_size=2048,
vocab_size=64000, # Double vocab size
)
large_vocab_config = create_mock_vllm_config(large_vocab_hf_config)
small_vocab_metrics = UnembedMetrics.from_vllm_config(small_vocab_config)
large_vocab_metrics = UnembedMetrics.from_vllm_config(large_vocab_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# FLOPS should double when vocab size doubles
small_flops = small_vocab_metrics.get_num_flops(ctx)
large_flops = large_vocab_metrics.get_num_flops(ctx)
assert large_flops == 2 * small_flops
def test_prefill_vs_decode_differences():
"""Test that prefill and decode have different memory access patterns."""
hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_key_value_heads=16,
num_hidden_layers=1,
)
config = create_mock_vllm_config(hf_config)
metrics = AttentionMetrics.from_vllm_config(config)
prefill_ctx = ExecutionContext.from_single_request(
num_tokens=512, context_len=512, is_prefill=True
)
decode_ctx = ExecutionContext.from_single_request(
num_tokens=1, context_len=512, is_prefill=False
)
prefill_read = metrics.get_read_bytes(prefill_ctx)
decode_read = metrics.get_read_bytes(decode_ctx)
assert prefill_read != decode_read
def test_model_metrics_aggregation():
"""Test ModelMetrics correctly aggregates across components."""
hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_hidden_layers=12,
vocab_size=32000,
intermediate_size=8192,
)
config = create_mock_vllm_config(hf_config)
model_metrics = ModelMetrics(config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Should have metrics for attention, ffn, and unembed
total_flops = model_metrics.get_num_flops(ctx)
breakdown = model_metrics.get_num_flops_breakdown(ctx)
# Breakdown should sum to total
assert total_flops == sum(breakdown.values())
def test_moe_expert_activation_proportional_scaling():
"""Test that routed expert metrics scale proportionally with num_experts_per_tok."""
base_moe_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
num_experts=64,
num_experts_per_tok=1, # 1 expert per token
moe_intermediate_size=8192,
n_shared_experts=2,
)
double_experts_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
num_experts=64,
num_experts_per_tok=2, # 2 experts per token (double)
moe_intermediate_size=8192,
n_shared_experts=2, # Same shared experts
)
triple_experts_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
num_experts=64,
num_experts_per_tok=3, # 3 experts per token (triple)
moe_intermediate_size=8192,
n_shared_experts=2, # Same shared experts
)
base_vllm_config = create_mock_vllm_config(base_moe_config)
double_vllm_config = create_mock_vllm_config(double_experts_config)
triple_vllm_config = create_mock_vllm_config(triple_experts_config)
base_metrics = FfnMetrics.from_vllm_config(base_vllm_config)
double_metrics = FfnMetrics.from_vllm_config(double_vllm_config)
triple_metrics = FfnMetrics.from_vllm_config(triple_vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Get total metrics - the key insight is that differences should be proportional
base_flops = base_metrics.get_num_flops(ctx)
double_flops = double_metrics.get_num_flops(ctx)
triple_flops = triple_metrics.get_num_flops(ctx)
# The difference between double and base should equal one additional expert
one_expert_diff = double_flops - base_flops
# The difference between triple and base should equal two additional experts
two_expert_diff = triple_flops - base_flops
# Proportional scaling: 2 * (1 expert diff) should equal (2 expert diff)
assert two_expert_diff == 2 * one_expert_diff
# Same logic applies to memory operations
base_read = base_metrics.get_read_bytes(ctx)
double_read = double_metrics.get_read_bytes(ctx)
triple_read = triple_metrics.get_read_bytes(ctx)
one_expert_read_diff = double_read - base_read
two_expert_read_diff = triple_read - base_read
assert two_expert_read_diff == 2 * one_expert_read_diff
# Same for write bytes
base_write = base_metrics.get_write_bytes(ctx)
double_write = double_metrics.get_write_bytes(ctx)
triple_write = triple_metrics.get_write_bytes(ctx)
one_expert_write_diff = double_write - base_write
two_expert_write_diff = triple_write - base_write
assert two_expert_write_diff == 2 * one_expert_write_diff
def test_quantization_config_parser_fp8():
"""Test quantization parsers with fp8."""
class MockQuantConfig:
def get_name(self):
return "fp8"
hf_config = Qwen3Config(
hidden_size=2048, num_attention_heads=16, num_hidden_layers=1
)
vllm_config = create_mock_vllm_config(hf_config, quant_config=MockQuantConfig())
attn_result = AttentionMetrics.get_parser().parse(vllm_config)
assert attn_result.weight_byte_size == 1 # fp8
ffn_result = FfnMetrics.get_parser().parse(vllm_config)
assert ffn_result.weight_byte_size == 1 # fp8
def test_quantization_config_parser_mxfp4():
"""Test quantization parsers with mxfp4."""
class MockQuantConfig:
def get_name(self):
return "mxfp4"
hf_config = Qwen3Config(
hidden_size=2048, intermediate_size=8192, num_hidden_layers=1
)
vllm_config = create_mock_vllm_config(hf_config, quant_config=MockQuantConfig())
ffn_result = FfnMetrics.get_parser().parse(vllm_config)
assert ffn_result.weight_byte_size == 0.5 # mxfp4
#### Per-GPU Tests ####
def test_attention_per_gpu_with_tensor_parallelism():
"""Test attention metrics with tensor parallelism - per_gpu vs global."""
hf_config = Qwen3Config(
hidden_size=4096,
num_attention_heads=32,
num_key_value_heads=8,
num_hidden_layers=24,
)
# Test with TP=4
vllm_config = create_mock_vllm_config(hf_config, tensor_parallel_size=4)
metrics = AttentionMetrics.from_vllm_config(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=128, context_len=1024, is_prefill=True
)
# Get global and per-gpu metrics
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
# With TP=4, global flops should be 4x per-gpu flops (heads divided by 4)
assert global_flops == 4 * per_gpu_flops
# Same for read/write bytes
global_read = metrics.get_read_bytes(ctx, per_gpu=False)
per_gpu_read = metrics.get_read_bytes(ctx, per_gpu=True)
# Reads should scale similarly (weight reads are divided by TP)
assert global_read > per_gpu_read
global_write = metrics.get_write_bytes(ctx, per_gpu=False)
per_gpu_write = metrics.get_write_bytes(ctx, per_gpu=True)
assert global_write > per_gpu_write
def test_attention_per_gpu_with_pipeline_parallelism():
"""Test attention metrics with pipeline parallelism - per_gpu vs global."""
hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_hidden_layers=32,
)
# Test with PP=4
vllm_config = create_mock_vllm_config(hf_config, pipeline_parallel_size=4)
metrics = AttentionMetrics.from_vllm_config(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=False
)
# Get global and per-gpu metrics
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
# With PP=4, global flops should be 4x per-gpu flops (layers divided by 4)
assert global_flops == 4 * per_gpu_flops
global_read = metrics.get_read_bytes(ctx, per_gpu=False)
per_gpu_read = metrics.get_read_bytes(ctx, per_gpu=True)
assert global_read == 4 * per_gpu_read
def test_ffn_per_gpu_with_tensor_parallelism():
"""Test FFN metrics with tensor parallelism - per_gpu vs global."""
hf_config = Qwen3Config(
hidden_size=4096,
intermediate_size=14336,
num_hidden_layers=32,
)
# Test with DP=2, TP=4 (ffn_tp_size will be 8)
vllm_config = create_mock_vllm_config(
hf_config,
data_parallel_size=2,
tensor_parallel_size=4,
)
metrics = FfnMetrics.from_vllm_config(vllm_config)
# ffn_tp_size should be dp_size * tp_size = 8 (when EP not enabled)
assert metrics.ffn_tp_size == 8
ctx = ExecutionContext.from_single_request(
num_tokens=128, context_len=2048, is_prefill=True
)
# Get global and per-gpu metrics
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
# With ffn_tp_size=8, global should be 8x per-gpu
assert global_flops == 8 * per_gpu_flops
def test_ffn_per_gpu_with_pipeline_parallelism():
"""Test FFN metrics with pipeline parallelism - per_gpu vs global."""
hf_config = Qwen3Config(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=24,
)
# Test with PP=6
vllm_config = create_mock_vllm_config(hf_config, pipeline_parallel_size=6)
metrics = FfnMetrics.from_vllm_config(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Get global and per-gpu metrics
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
# With PP=6, global should be 6x per-gpu (layers divided by 6)
assert global_flops == 6 * per_gpu_flops
def test_moe_per_gpu_with_expert_parallelism():
"""
Test MoE metrics with expert parallelism - verifies num_activated_experts bug fix.
"""
hf_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=24,
num_experts=64,
num_experts_per_tok=8,
moe_intermediate_size=14336,
n_shared_experts=2,
)
# Test with DP=2, TP=4, EP enabled (ffn_ep_size will be 8)
vllm_config = create_mock_vllm_config(
hf_config,
data_parallel_size=2,
tensor_parallel_size=4,
enable_expert_parallel=True,
)
metrics = FfnMetrics.from_vllm_config(vllm_config)
# When EP enabled, ffn_ep_size = dp_size * tp_size = 8
assert metrics.ffn_ep_size == 8
assert metrics.ffn_tp_size == 1
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Get per-gpu metrics
per_gpu_read_breakdown = metrics.get_read_bytes_breakdown(ctx, per_gpu=True)
global_read_breakdown = metrics.get_read_bytes_breakdown(ctx, per_gpu=False)
# Verify that routed expert weight reads are reasonable
# With per_gpu=True, each GPU has 64/8 = 8 experts
# T=100, E_per_gpu=8/8=1, so T*E=100 expert activations
# num_activated_experts should be min(100, 8) = 8
# Check that weight reads scale appropriately
# Global has all 64 experts, per-gpu has 8 experts
# So weight reads should reflect this difference
if "routed_up_gate_weights" in per_gpu_read_breakdown:
per_gpu_weight_reads = per_gpu_read_breakdown["routed_up_gate_weights"]
global_weight_reads = global_read_breakdown["routed_up_gate_weights"]
# The ratio should reflect the expert count difference
# This verifies the bug fix works correctly
assert per_gpu_weight_reads < global_weight_reads
# Global should read more experts than per-gpu
# Exact ratio depends on num_activated_experts calculation
ratio = global_weight_reads / per_gpu_weight_reads
# Should be > 1 since global has more experts to read
assert ratio > 1
def test_moe_per_gpu_expert_activation_accounting():
"""
Test that MoE correctly accounts for expert activations with small batch sizes.
"""
hf_config = Qwen3MoeConfig(
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=12,
num_experts=64,
num_experts_per_tok=8,
moe_intermediate_size=14336,
n_shared_experts=0, # No shared experts for this test
)
# Test with EP=8
vllm_config = create_mock_vllm_config(
hf_config,
data_parallel_size=8,
enable_expert_parallel=True,
)
metrics = FfnMetrics.from_vllm_config(vllm_config)
# Small batch: T=10, E_per_gpu=8/8=1
# Each GPU: T*E = 10*1 = 10 activations
# Experts per GPU: 64/8 = 8
# So num_activated_experts should be min(10, 8) = 8
small_ctx = ExecutionContext.from_single_request(
num_tokens=10, context_len=512, is_prefill=True
)
small_read = metrics.get_read_bytes_breakdown(small_ctx, per_gpu=True)
# Large batch: T=1000, E_per_gpu=1
# Each GPU: T*E = 1000*1 = 1000 activations
# Experts per GPU: 8
# So num_activated_experts should be min(1000, 8) = 8 (all experts activated)
large_ctx = ExecutionContext.from_single_request(
num_tokens=1000, context_len=512, is_prefill=True
)
large_read = metrics.get_read_bytes_breakdown(large_ctx, per_gpu=True)
# Weight reads should be similar (both activate all 8 experts per GPU)
# But activation reads should differ (proportional to T*E)
if "routed_up_gate_weights" in small_read:
small_weight = small_read["routed_up_gate_weights"]
large_weight = large_read["routed_up_gate_weights"]
# Weight reads should be the same (both read all 8 experts)
assert small_weight == large_weight
# But input activation reads should scale with T*E
small_input = small_read["routed_up_gate_input"]
large_input = large_read["routed_up_gate_input"]
assert large_input == 100 * small_input # 1000/10 = 100x
def test_unembed_per_gpu_with_tensor_parallelism():
"""Test unembed metrics with tensor parallelism - per_gpu vs global."""
hf_config = Qwen3Config(
hidden_size=4096,
vocab_size=128000,
)
# Test with TP=8
vllm_config = create_mock_vllm_config(hf_config, tensor_parallel_size=8)
metrics = UnembedMetrics.from_vllm_config(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Get global and per-gpu metrics
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
# With TP=8, vocab is divided by 8, so global should be 8x per-gpu
assert global_flops == 8 * per_gpu_flops
# For read bytes, weight reads scale with TP but input reads don't (replicated)
global_read_breakdown = metrics.get_read_bytes_breakdown(ctx, per_gpu=False)
per_gpu_read_breakdown = metrics.get_read_bytes_breakdown(ctx, per_gpu=True)
# Input reads should be the same (replicated across TP ranks)
assert global_read_breakdown["input"] == per_gpu_read_breakdown["input"]
# Weight reads should scale 8x (divided by TP)
assert global_read_breakdown["weight"] == 8 * per_gpu_read_breakdown["weight"]
def test_model_metrics_per_gpu_aggregation():
"""Test ModelMetrics correctly aggregates per_gpu metrics across components."""
hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=16,
num_hidden_layers=12,
vocab_size=32000,
intermediate_size=8192,
)
# Test with mixed parallelism: TP=2, PP=2
vllm_config = create_mock_vllm_config(
hf_config,
tensor_parallel_size=2,
pipeline_parallel_size=2,
)
model_metrics = ModelMetrics(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=100, context_len=512, is_prefill=True
)
# Get breakdowns for both modes
per_gpu_breakdown = model_metrics.get_num_flops_breakdown(ctx, per_gpu=True)
global_breakdown = model_metrics.get_num_flops_breakdown(ctx, per_gpu=False)
# Verify breakdown sums match totals
per_gpu_total = model_metrics.get_num_flops(ctx, per_gpu=True)
global_total = model_metrics.get_num_flops(ctx, per_gpu=False)
assert per_gpu_total == sum(per_gpu_breakdown.values())
assert global_total == sum(global_breakdown.values())
# Global should be larger than per-gpu due to parallelism
assert global_total > per_gpu_total
# With TP=2 and PP=2, the ratio depends on which parallelism applies to
# which component but we can verify that global is reasonably larger
ratio = global_total / per_gpu_total
assert ratio > 1 # Should be between PP and TP*PP depending on component mix
def test_attention_per_gpu_heads_not_evenly_divisible():
"""Test attention with heads not evenly divisible by TP."""
hf_config = Qwen3Config(
hidden_size=2048,
num_attention_heads=17, # Not divisible by 4
num_key_value_heads=5, # Not divisible by 4
num_hidden_layers=8,
)
vllm_config = create_mock_vllm_config(hf_config, tensor_parallel_size=4)
metrics = AttentionMetrics.from_vllm_config(vllm_config)
ctx = ExecutionContext.from_single_request(
num_tokens=64, context_len=256, is_prefill=True
)
# Should not crash and should handle max(1, ...) correctly
per_gpu_flops = metrics.get_num_flops(ctx, per_gpu=True)
global_flops = metrics.get_num_flops(ctx, per_gpu=False)
# Both should be positive
assert per_gpu_flops > 0
assert global_flops > 0
assert global_flops > per_gpu_flops
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/metrics/test_perf_metrics.py",
"license": "Apache License 2.0",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/entrypoints/serve/cache/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from fastapi import APIRouter, FastAPI, Query, Request
from fastapi.responses import Response
import vllm.envs as envs
from vllm.engine.protocol import EngineClient
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
def engine_client(request: Request) -> EngineClient:
return request.app.state.engine_client
@router.post("/reset_prefix_cache")
async def reset_prefix_cache(
raw_request: Request,
reset_running_requests: bool = Query(default=False),
reset_external: bool = Query(default=False),
):
"""
Reset the local prefix cache.
Optionally, if the query parameter `reset_external=true`
also resets the external (connector-managed) prefix cache.
Note that we currently do not check if the prefix cache
is successfully reset in the API server.
Example:
POST /reset_prefix_cache?reset_external=true
"""
logger.info("Resetting prefix cache...")
await engine_client(raw_request).reset_prefix_cache(
reset_running_requests, reset_external
)
return Response(status_code=200)
@router.post("/reset_mm_cache")
async def reset_mm_cache(raw_request: Request):
"""
Reset the multi-modal cache. Note that we currently do not check if the
multi-modal cache is successfully reset in the API server.
"""
logger.info("Resetting multi-modal cache...")
await engine_client(raw_request).reset_mm_cache()
return Response(status_code=200)
@router.post("/reset_encoder_cache")
async def reset_encoder_cache(raw_request: Request):
"""
Reset the encoder cache. Note that we currently do not check if the
encoder cache is successfully reset in the API server.
"""
logger.info("Resetting encoder cache...")
await engine_client(raw_request).reset_encoder_cache()
return Response(status_code=200)
def attach_router(app: FastAPI):
if not envs.VLLM_SERVER_DEV_MODE:
return
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/cache/api_router.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/serve/instrumentator/server_info.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import functools
from typing import Annotated, Literal
import pydantic
from fastapi import APIRouter, Query, Request
from fastapi.responses import JSONResponse
import vllm.envs as envs
from vllm.collect_env import get_env_info
from vllm.config import VllmConfig
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig)
def _get_vllm_env_vars():
from vllm.config.utils import normalize_value
vllm_envs = {}
for key in dir(envs):
if key.startswith("VLLM_") and "KEY" not in key:
value = getattr(envs, key, None)
if value is not None:
value = normalize_value(value)
vllm_envs[key] = value
return vllm_envs
@functools.lru_cache(maxsize=1)
def _get_system_env_info_cached():
return get_env_info()._asdict()
@router.get("/server_info")
async def show_server_info(
raw_request: Request,
config_format: Annotated[Literal["text", "json"], Query()] = "text",
):
vllm_config: VllmConfig = raw_request.app.state.vllm_config
server_info = {
"vllm_config": (
str(vllm_config)
if config_format == "text"
else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str)
),
# fallback=str is needed to handle e.g. torch.dtype
"vllm_env": _get_vllm_env_vars(),
"system_env": await asyncio.to_thread(_get_system_env_info_cached),
}
return JSONResponse(content=server_info)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/instrumentator/server_info.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/serve/rpc/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from http import HTTPStatus
from typing import Any
from fastapi import APIRouter, FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse, Response
import vllm.envs as envs
from vllm.engine.protocol import EngineClient
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
def engine_client(request: Request) -> EngineClient:
return request.app.state.engine_client
@router.post("/collective_rpc")
async def collective_rpc(raw_request: Request):
try:
body = await raw_request.json()
except json.JSONDecodeError as e:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST.value,
detail=f"JSON decode error: {e}",
) from e
method = body.get("method")
if method is None:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST.value,
detail="Missing 'method' in request body",
)
# For security reason, only serialized string args/kwargs are passed.
# User-defined `method` is responsible for deserialization if needed.
args: list[str] = body.get("args", [])
kwargs: dict[str, str] = body.get("kwargs", {})
timeout: float | None = body.get("timeout")
results = await engine_client(raw_request).collective_rpc(
method=method, timeout=timeout, args=tuple(args), kwargs=kwargs
)
if results is None:
return Response(status_code=200)
response: list[Any] = []
for result in results:
if result is None or isinstance(result, dict | list):
response.append(result)
else:
response.append(str(result))
return JSONResponse(content={"results": response})
def attach_router(app: FastAPI):
if not envs.VLLM_SERVER_DEV_MODE:
return
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/rpc/api_router.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/tool_use/test_minimax_m2_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import pytest
from vllm.tool_parsers.minimax_m2_tool_parser import (
MinimaxM2ToolParser,
)
pytestmark = pytest.mark.cpu_test
class FakeTokenizer:
"""Minimal fake tokenizer that exposes the attributes used by the
parser: a truthy model_tokenizer marker and a vocab mapping for the
special tokens.
"""
def __init__(self):
self.model_tokenizer = True
# The parser will look up start/end tokens by their literal strings
self.vocab = {
"<minimax:tool_call>": 1,
"</minimax:tool_call>": 2,
}
def get_vocab(self):
return self.vocab
@pytest.fixture
def minimax_m2_tool_parser():
return MinimaxM2ToolParser(FakeTokenizer())
def test_extract_tool_calls_streaming_incremental(minimax_m2_tool_parser):
parser = minimax_m2_tool_parser
parser._reset_streaming_state()
chunks = [
"<minimax:tool_call>",
'<invoke name="get_weather">',
'<parameter name="city">',
"Seattle</parameter>",
"</invoke></minimax:tool_call>",
]
previous = ""
for chunk in chunks:
current = previous + chunk
delta = chunk
parser.extract_tool_calls_streaming(
previous_text=previous,
current_text=current,
delta_text=delta,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
previous = current
assert len(parser.prev_tool_call_arr) == 1
entry = parser.prev_tool_call_arr[0]
assert entry["name"] == "get_weather"
args = entry["arguments"]
assert args["city"] == "Seattle"
def test_streaming_minimax_m2_multiple_invokes(minimax_m2_tool_parser):
parser = minimax_m2_tool_parser
parser._reset_streaming_state()
chunks = [
"<minimax:tool_call>",
'<invoke name="search_web">',
'<parameter name="query_tag">',
'["technology", "events"]</parameter>',
'<parameter name="query_list">',
'["OpenAI", "latest", "release"]</parameter>',
"</invoke>",
'<invoke name="search_web">',
'<parameter name="query_tag">',
'["technology", "events"]</parameter>',
'<parameter name="query_list">',
'["Gemini", "latest", "release"]</parameter>',
"</invoke>",
"</minimax:tool_call>",
]
previous = ""
for chunk in chunks:
current = previous + chunk
delta = chunk
parser.extract_tool_calls_streaming(
previous_text=previous,
current_text=current,
delta_text=delta,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
previous = current
assert len(parser.prev_tool_call_arr) == 2
for entry, expect_model in zip(parser.prev_tool_call_arr, ["OpenAI", "Gemini"]):
assert entry["name"] == "search_web"
args = json.dumps(entry["arguments"])
assert "technology" in args and "events" in args
assert expect_model in args
# check streamed_args_for_tool for serving_chat.py
for index in range(2):
expected_call = parser.prev_tool_call_arr[index].get("arguments", {})
expected_call = json.dumps(expected_call)
actual_call = parser.streamed_args_for_tool[index]
assert expected_call == actual_call
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/tool_use/test_minimax_m2_tool_parser.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/core/test_apply_rotary_emb.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for ApplyRotaryEmb CustomOp dispatch behavior.
This test ensures that RotaryEmbedding classes correctly call the appropriate
ApplyRotaryEmb methods based on the calling context:
1. RotaryEmbedding.forward_native() -> ApplyRotaryEmb.forward_native()
2. RotaryEmbedding.forward_cuda() -> ApplyRotaryEmb.forward() (auto-dispatch)
3. RotaryEmbedding.forward_hip() -> ApplyRotaryEmb.forward() (auto-dispatch)
"""
from dataclasses import dataclass
import pytest
import torch
from vllm.config import (
CompilationConfig,
VllmConfig,
get_cached_compilation_config,
set_current_vllm_config,
)
from vllm.platforms import current_platform
CUDA_DEVICES = ["cuda:0"]
@dataclass
class RotaryEmbeddingTestCase:
"""Test case configuration for RotaryEmbedding dispatch tests."""
name: str
rope_class: type
rope_kwargs: dict
method_name: str # forward_native, forward_cuda, forward
positions_shape: tuple # (num_tokens,) or (3, num_tokens) or (4, num_tokens)
expect_forward_native: bool # Should call ApplyRotaryEmb.forward_native()
expect_forward: bool # Should call ApplyRotaryEmb.forward()
def get_test_cases() -> list[RotaryEmbeddingTestCase]:
"""Generate test cases for all RotaryEmbedding classes."""
from vllm.model_executor.layers.rotary_embedding.ernie45_vl_rope import (
Ernie4_5_VLRotaryEmbedding,
)
from vllm.model_executor.layers.rotary_embedding.mrope import MRotaryEmbedding
from vllm.model_executor.layers.rotary_embedding.xdrope import XDRotaryEmbedding
common_kwargs = {
"head_size": 128,
"rotary_dim": 128,
"max_position_embeddings": 4096,
"base": 10000,
"is_neox_style": True,
"dtype": torch.bfloat16,
}
return [
# MRotaryEmbedding tests
RotaryEmbeddingTestCase(
name="MRotaryEmbedding.forward_native",
rope_class=MRotaryEmbedding,
rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]},
method_name="forward_native",
positions_shape=(3, 32), # 2D for multimodal
expect_forward_native=True,
expect_forward=False,
),
RotaryEmbeddingTestCase(
name="MRotaryEmbedding.forward_cuda_1d",
rope_class=MRotaryEmbedding,
rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]},
method_name="forward_cuda",
positions_shape=(32,), # 1D triggers apply_rotary_emb path
expect_forward_native=False,
expect_forward=True,
),
# XDRotaryEmbedding tests
RotaryEmbeddingTestCase(
name="XDRotaryEmbedding.forward",
rope_class=XDRotaryEmbedding,
rope_kwargs={
**common_kwargs,
"scaling_alpha": 1.0,
"xdrope_section": [16, 16, 16, 16],
},
method_name="forward",
positions_shape=(4, 32), # 4D for P/W/H/T
expect_forward_native=False,
expect_forward=True,
),
# Ernie4_5_VLRotaryEmbedding tests
RotaryEmbeddingTestCase(
name="Ernie4_5_VLRotaryEmbedding.forward_native",
rope_class=Ernie4_5_VLRotaryEmbedding,
rope_kwargs={**common_kwargs, "mrope_section": [22, 22, 20]},
method_name="forward_native",
positions_shape=(3, 32), # 2D for multimodal
expect_forward_native=True,
expect_forward=False,
),
]
def run_dispatch_test(
test_case: RotaryEmbeddingTestCase,
device: str,
):
"""Run a dispatch test for a RotaryEmbedding class."""
vllm_config = VllmConfig(
compilation_config=CompilationConfig(custom_ops=["all", "+apply_rotary_emb"])
)
get_cached_compilation_config.cache_clear()
with set_current_vllm_config(vllm_config):
rope = test_case.rope_class(**test_case.rope_kwargs).to(device=device)
apply_rotary_emb = rope.apply_rotary_emb
# Verify custom op is enabled
if test_case.expect_forward_native:
assert (
apply_rotary_emb._forward_method != apply_rotary_emb.forward_native
), "Test setup error: ApplyRotaryEmb custom op should be enabled"
# Setup call tracking
call_tracker = {"forward_native_called": False, "forward_called": False}
original_forward_native = apply_rotary_emb.forward_native
original_forward = apply_rotary_emb.forward
def tracked_forward_native(*args, **kwargs):
call_tracker["forward_native_called"] = True
return original_forward_native(*args, **kwargs)
def tracked_forward(*args, **kwargs):
call_tracker["forward_called"] = True
return original_forward(*args, **kwargs)
apply_rotary_emb.forward_native = tracked_forward_native
apply_rotary_emb.forward = tracked_forward
try:
num_tokens = test_case.positions_shape[-1]
num_q_heads = 8
num_kv_heads = 2
head_size = test_case.rope_kwargs["head_size"]
max_position = test_case.rope_kwargs["max_position_embeddings"]
positions = torch.randint(
0, max_position // 4, test_case.positions_shape, device=device
)
query = torch.randn(
num_tokens, num_q_heads * head_size, dtype=torch.bfloat16, device=device
)
key = torch.randn(
num_tokens,
num_kv_heads * head_size,
dtype=torch.bfloat16,
device=device,
)
# Call the method under test
method = getattr(rope, test_case.method_name)
method(positions, query.clone(), key.clone())
# Verify expectations
if test_case.expect_forward_native:
assert call_tracker["forward_native_called"], (
f"{test_case.name} should call ApplyRotaryEmb.forward_native()"
)
if not test_case.expect_forward:
assert not call_tracker["forward_called"], (
f"{test_case.name} should NOT call ApplyRotaryEmb.forward(). "
"Bug: when +apply_rotary_emb is enabled, forward_native() "
"incorrectly dispatches to CUDA/HIP kernels."
)
if test_case.expect_forward:
assert call_tracker["forward_called"], (
f"{test_case.name} should call ApplyRotaryEmb.forward()"
)
finally:
apply_rotary_emb.forward_native = original_forward_native
apply_rotary_emb.forward = original_forward
@pytest.mark.skipif(
not current_platform.is_cuda_alike(), reason="Skipping CUDA/ROCm only tests."
)
@pytest.mark.parametrize("test_case", get_test_cases(), ids=lambda tc: tc.name)
@pytest.mark.parametrize("device", CUDA_DEVICES)
def test_rotary_embedding_dispatch(
test_case: RotaryEmbeddingTestCase,
device: str,
):
"""
Test that RotaryEmbedding classes dispatch to the correct ApplyRotaryEmb method.
- forward_native methods should call ApplyRotaryEmb.forward_native()
- forward_cuda/forward methods should call ApplyRotaryEmb.forward()
"""
run_dispatch_test(test_case, device)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/core/test_apply_rotary_emb.py",
"license": "Apache License 2.0",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/bagel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 Bytedance Ltd. and/or its affiliates.
"""Inference-only BAGEL model compatible with HuggingFace weights.
BAGEL is a unified multimodal model for image understanding and generation.
For vLLM, we focus on the image understanding (vision-to-text) capabilities.
"""
from collections.abc import Iterable, Mapping, Sequence
from typing import Any, Literal, TypeAlias
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import MultiModalDataItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
)
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.processors.bagel import BagelProcessor
from vllm.utils.tensor_schema import TensorSchema
from .interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
)
from .siglip import SiglipVisionModel
from .utils import (
AutoWeightsLoader,
StageMissingLayer,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
logger = init_logger(__name__)
class BagelImagePixelInputs(TensorSchema):
"""
Dimensions:
- bn: Batch size * number of images
- c: Number of channels (3)
- h: Height of each image
- w: Width of each image
"""
type: Literal["pixel_values"]
pixel_values: torch.Tensor # Shape: (bn, 3, h, w)
BagelImageInputs: TypeAlias = BagelImagePixelInputs
class BagelVisionMLP(nn.Module):
"""MLP connector for vision features."""
def __init__(
self,
in_features: int,
hidden_features: int,
out_features: int,
act_layer: str = "gelu_pytorch_tanh",
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.fc1 = ColumnParallelLinear(
in_features,
hidden_features,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc1",
)
self.act = get_act_fn(act_layer)
self.fc2 = RowParallelLinear(
hidden_features,
out_features,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc2",
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.fc1(x)
x = self.act(x)
x, _ = self.fc2(x)
return x
class PositionEmbedding(nn.Module):
"""2D position embedding for vision tokens using sin-cos embeddings."""
def __init__(self, max_num_patch_per_side: int, hidden_size: int):
super().__init__()
self.max_num_patch_per_side = max_num_patch_per_side
self.hidden_size = hidden_size
# Create learnable 2D position embeddings (frozen sin-cos)
pos_embed = self._get_2d_sincos_pos_embed(hidden_size, max_num_patch_per_side)
self.register_buffer(
"pos_embed",
torch.from_numpy(pos_embed).float(),
persistent=False,
)
@staticmethod
def _get_2d_sincos_pos_embed(embed_dim: int, grid_size: int):
"""Generate 2D sin-cos position embeddings."""
import numpy as np
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = PositionEmbedding._get_2d_sincos_pos_embed_from_grid(
embed_dim, grid
)
return pos_embed
@staticmethod
def _get_2d_sincos_pos_embed_from_grid(embed_dim: int, grid):
"""Generate 2D sin-cos position embeddings from grid."""
import numpy as np
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = PositionEmbedding._get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[0]
)
emb_w = PositionEmbedding._get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[1]
)
emb = np.concatenate([emb_h, emb_w], axis=1)
return emb
@staticmethod
def _get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos):
"""Generate 1D sin-cos position embeddings."""
import numpy as np
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float64)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega
pos = pos.reshape(-1)
out = np.einsum("m,d->md", pos, omega)
emb_sin = np.sin(out)
emb_cos = np.cos(out)
emb = np.concatenate([emb_sin, emb_cos], axis=1)
return emb
def forward(self, position_ids: torch.Tensor) -> torch.Tensor:
"""
Args:
position_ids: Flattened position IDs, shape (N,) where each ID
corresponds to a position in the flattened grid
Returns:
Position embeddings of shape (N, hidden_size)
"""
# Ensure position_ids are on the same device as pos_embed
position_ids = position_ids.to(self.pos_embed.device)
return self.pos_embed[position_ids]
class BagelProcessingInfo(BaseProcessingInfo):
"""Processing information for BAGEL model."""
def get_hf_processor(self, **kwargs: object) -> BagelProcessor:
from vllm.transformers_utils.processor import cached_get_image_processor
image_processor = cached_get_image_processor(
self.ctx.model_config.model,
revision=self.ctx.model_config.revision,
trust_remote_code=self.ctx.model_config.trust_remote_code,
)
tokenizer = self.get_tokenizer()
return BagelProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
**kwargs,
)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int]:
hf_config = self.get_hf_config()
# Calculate max tokens per image
# For BAGEL: (vit_max_num_patch_per_side) ** 2
max_num_patches = hf_config.vit_max_num_patch_per_side**2
return {"image": max_num_patches}
def get_num_image_tokens(
self,
*,
image_width: int,
image_height: int,
) -> int:
hf_config = self.get_hf_config()
vit_config = hf_config.vit_config
patch_size = vit_config.patch_size
# Calculate number of patches
num_patches_h = image_height // patch_size
num_patches_w = image_width // patch_size
return num_patches_h * num_patches_w
class BagelDummyInputsBuilder(BaseDummyInputsBuilder[BagelProcessingInfo]):
"""Build dummy inputs for BAGEL model profiling."""
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
# Use a simple placeholder for each image
return "<|image_pad|>" * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
hf_config = self.info.get_hf_config()
vit_config = hf_config.vit_config
# Use the configured image size
image_size = vit_config.image_size
image_overrides = mm_options.get("image")
return {
"image": self._get_dummy_images(
width=image_size,
height=image_size,
num_images=num_images,
overrides=image_overrides,
),
}
class BagelMultiModalProcessor(BaseMultiModalProcessor[BagelProcessingInfo]):
"""Multimodal processor for BAGEL model."""
def _hf_processor_applies_updates(
self,
prompt_text: str,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
tokenization_kwargs: Mapping[str, object],
) -> bool:
return False
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, Any],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptReplacement]:
"""Replace image placeholders with the correct number of tokens."""
hf_config = self.info.get_hf_config()
# Get the tokenizer to look up the image token ID
tokenizer = self.info.get_tokenizer()
image_token_id = tokenizer.get_vocab().get("<|image_pad|>")
if image_token_id is None:
raise ValueError(
"Image token '<|image_pad|>' not found in tokenizer vocabulary"
)
def get_replacement_bagel(item_idx: int):
# For BAGEL, calculate number of tokens based on max patch size
num_tokens = hf_config.vit_max_num_patch_per_side**2
# Use the image token ID from tokenizer
return [image_token_id] * num_tokens
return [
PromptReplacement(
modality="image",
target=[image_token_id],
replacement=get_replacement_bagel,
)
]
def _get_mm_fields_config(
self,
hf_inputs: Any,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return {
"pixel_values": MultiModalFieldConfig.batched("image"),
}
@MULTIMODAL_REGISTRY.register_processor(
BagelMultiModalProcessor,
info=BagelProcessingInfo,
dummy_inputs=BagelDummyInputsBuilder,
)
class BagelForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP
):
"""
BAGEL: A unified multimodal model for image understanding and generation.
For vLLM, we focus on the image understanding (vision-to-text) capabilities.
The image generation part is not supported in vLLM.
"""
# Weight mapping from HF to vLLM
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"language_model.": "language_model.",
"vit_model.": "vit_model.",
"connector.": "connector.",
"vit_pos_embed.": "vit_pos_embed.",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<|image_pad|>"
raise ValueError("Only image modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
# Ensure we have a BagelConfig (check by name to handle trust_remote_code)
# When trust_remote_code=True, the config comes from transformers_modules
if type(config).__name__ != "BagelConfig":
raise ValueError(
f"Expected BagelConfig, got {type(config).__name__}. "
"Make sure the model config is properly loaded."
)
self.config = config
self.multimodal_config = multimodal_config
# Initialize language model (Qwen2)
# Pass the llm_config from BagelConfig to initialize Qwen2 properly
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.llm_config,
prefix=maybe_prefix(prefix, "language_model"),
architectures=["Qwen2ForCausalLM"],
)
# Initialize vision model (SigLIP) if visual understanding is enabled
if config.visual_und:
# Fix vit_config: checkpoint has 26 layers (0-25) but config says 27
# Also disable head as it's not in checkpoint
vit_config = config.vit_config
if vit_config.num_hidden_layers == 27:
logger.warning(
"Overriding vit_config.num_hidden_layers from 27 to 26 "
"to match the Bagel model checkpoint."
)
vit_config.num_hidden_layers = 26
if not hasattr(vit_config, "vision_use_head"):
logger.warning(
"Setting vit_config.vision_use_head to False as it is not "
"present in the Bagel model checkpoint."
)
vit_config.vision_use_head = False
with self._mark_tower_model(vllm_config, "image"):
self.vit_model = SiglipVisionModel(
config=vit_config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vit_model"),
)
# Initialize connector (MLP)
vit_hidden_size = config.vit_config.hidden_size
llm_hidden_size = config.llm_config.hidden_size
self.connector = BagelVisionMLP(
in_features=vit_hidden_size,
hidden_features=llm_hidden_size,
out_features=llm_hidden_size,
act_layer=config.connector_act,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "connector"),
)
# Position embedding for vision tokens
self.vit_pos_embed = PositionEmbedding(
max_num_patch_per_side=config.vit_max_num_patch_per_side,
hidden_size=llm_hidden_size,
)
else:
self.vit_model = StageMissingLayer("image_tower")
self.connector = StageMissingLayer("image_tower")
self.vit_pos_embed = StageMissingLayer("image_tower")
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _parse_and_validate_image_input(
self, **kwargs: object
) -> BagelImageInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
if pixel_values is None:
return None
return BagelImagePixelInputs(
type="pixel_values",
pixel_values=pixel_values,
)
def _process_image_input(
self, image_input: BagelImageInputs
) -> tuple[torch.Tensor, ...]:
"""Process image inputs through vision encoder and connector."""
pixel_values = image_input["pixel_values"]
# Handle potential extra batch dimension
# Expected shape: (batch_size * num_images, 3, H, W)
# But might receive: (batch_size, num_images, 3, H, W)
if pixel_values.ndim == 5:
# Flatten batch and num_images dimensions
batch_size, num_images, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(
batch_size * num_images, channels, height, width
)
# Get vision features from SigLIP
# pixel_values shape: (batch_size * num_images, 3, H, W)
vision_features = self.vit_model(pixel_values)
# Pass through connector
vision_embeds = self.connector(vision_features)
# Add position embeddings
batch_size, num_patches, hidden_size = vision_embeds.shape
patch_size = self.config.vit_config.patch_size
image_size = self.config.vit_config.image_size
# Calculate grid dimensions
num_patches_per_side = image_size // patch_size
# Create flattened position IDs (0 to num_patches-1)
# For BAGEL, we use extrapolate mode by default
h_coords = torch.arange(num_patches_per_side, device=vision_embeds.device)
w_coords = torch.arange(num_patches_per_side, device=vision_embeds.device)
position_ids = (
h_coords[:, None] * self.config.vit_max_num_patch_per_side + w_coords
).flatten()
position_ids = position_ids.unsqueeze(0).expand(batch_size, -1).flatten()
# Add position embeddings
pos_embeds = self.vit_pos_embed(position_ids)
pos_embeds = pos_embeds.reshape(batch_size, num_patches, hidden_size)
# Ensure pos_embeds are on the same device as vision_embeds
pos_embeds = pos_embeds.to(vision_embeds.device)
vision_embeds = vision_embeds + pos_embeds
# Split by image
return tuple(vision_embeds)
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
"""Get multimodal embeddings from input."""
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return []
return self._process_image_input(image_input)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
"""Run forward pass for BAGEL.
Args:
input_ids: Flattened (concatenated) input_ids corresponding to a batch.
positions: Flattened (concatenated) position ids corresponding to a batch.
intermediate_tensors: Intermediate tensors from prior forward pass.
inputs_embeds: Optional tensor of input embeddings.
"""
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
"""Load weights from checkpoint."""
# Skip generation-related weights since we only support text2text and image2text
# Filter out all image generation components:
# - 'moe_gen': MoE generation weights
# - 'latent_pos_embed': Latent position embeddings for VAE
# - 'llm2vae', 'vae2llm': LLM-VAE projections
# - 'time_embedder': Timestep embeddings for diffusion
# - VAE encoder/decoder: Use specific prefixes to avoid matching vision encoder
generation_keywords = [
"moe_gen",
"latent_pos_embed",
"llm2vae",
"vae2llm",
"time_embedder",
]
vae_prefixes = [
"decoder.",
"encoder.",
] # VAE encoder/decoder, not vision encoder
filtered_weights = []
for name, tensor in weights:
# Skip generation-related keywords
if any(skip in name for skip in generation_keywords):
continue
if any(name.startswith(prefix) for prefix in vae_prefixes):
continue
if "patch_embedding.weight" in name and tensor.ndim == 2:
out_channels = tensor.shape[0]
in_features = tensor.shape[1]
patch_size = self.config.vit_config.patch_size
in_channels = self.config.vit_config.num_channels
if in_features == in_channels * patch_size * patch_size:
tensor = tensor.reshape(
out_channels, patch_size, patch_size, in_channels
)
tensor = tensor.permute(0, 3, 1, 2).contiguous()
filtered_weights.append((name, tensor))
# Skip vit_pos_embed.pos_embed as it's handled by PositionEmbedding module
loader = AutoWeightsLoader(self, skip_prefixes=["vit_pos_embed.pos_embed"])
return loader.load_weights(filtered_weights, mapper=self.hf_to_vllm_mapper)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/bagel.py",
"license": "Apache License 2.0",
"lines": 493,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/bagel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from transformers import PretrainedConfig, SiglipVisionConfig
from transformers.models.qwen2 import Qwen2Config
class BagelConfig(PretrainedConfig):
"""Configuration class for BAGEL model."""
model_type = "bagel"
def __init__(
self,
visual_gen: bool = True,
visual_und: bool = True,
llm_config: dict | Qwen2Config | None = None,
vit_config: dict | SiglipVisionConfig | None = None,
vae_config: dict | None = None,
latent_patch_size: int = 2,
max_latent_size: int = 32,
vit_max_num_patch_per_side: int = 70,
connector_act: str = "gelu_pytorch_tanh",
interpolate_pos: bool = False,
timestep_shift: float = 1.0,
**kwargs,
):
super().__init__(**kwargs)
self.visual_gen = visual_gen
self.visual_und = visual_und
# Convert dict configs to proper config objects
if isinstance(llm_config, dict):
self.llm_config = Qwen2Config(**llm_config)
else:
self.llm_config = llm_config or Qwen2Config()
if isinstance(vit_config, dict):
self.vit_config = SiglipVisionConfig(**vit_config)
else:
self.vit_config = vit_config or SiglipVisionConfig()
self.vae_config = vae_config or {"z_channels": 16, "downsample": 8}
self.latent_patch_size = latent_patch_size
self.max_latent_size = max_latent_size
self.vit_max_num_patch_per_side = vit_max_num_patch_per_side
self.connector_act = connector_act
self.interpolate_pos = interpolate_pos
self.timestep_shift = timestep_shift
@property
def hidden_size(self) -> int:
"""Return the hidden size of the language model."""
return self.llm_config.hidden_size
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/bagel.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/processors/bagel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 Bytedance Ltd. and/or its affiliates.
"""BAGEL processor for image and text inputs."""
from transformers import AutoProcessor
from transformers.feature_extraction_utils import BatchFeature
from transformers.image_utils import ImageInput
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
class BagelProcessorKwargs(ProcessingKwargs, total=False): # type: ignore[call-arg]
_defaults = {
"images_kwargs": {
"return_tensors": "pt",
},
}
class BagelProcessor(ProcessorMixin):
"""
Constructs a BAGEL processor which wraps a
SigLIP image processor and a Qwen2 tokenizer.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "SiglipImageProcessor"
tokenizer_class = "AutoTokenizer"
def __call__(
self,
text: TextInput
| PreTokenizedInput
| list[TextInput]
| list[PreTokenizedInput] = None,
images: ImageInput = None,
**kwargs: Unpack[BagelProcessorKwargs],
):
"""
Main method to prepare for the model one or several sequences(s) and image(s).
"""
output_kwargs = self._merge_kwargs(
BagelProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
# Process images with the image processor
pixel_values = self.image_processor(
images, **output_kwargs["images_kwargs"]
)
else:
pixel_values = {}
text_inputs = (
self.tokenizer(text, **output_kwargs["text_kwargs"])
if text is not None
else {}
)
return BatchFeature(data={**pixel_values, **text_inputs})
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's batch_decode.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's decode.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
AutoProcessor.register("BagelProcessor", BagelProcessor)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/processors/bagel.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/multimodal/generation/test_vit_backend_functionality.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Consolidated test for ViT attention backend functionality across multiple models.
This test validates that each multimodal model can successfully generate outputs
using different ViT attention backends. Tests are parametrized by model and backend.
"""
from dataclasses import asdict
from typing import Any
import pytest
from transformers import AutoProcessor
from vllm import LLM, EngineArgs, SamplingParams
from vllm.multimodal.utils import encode_image_url
from vllm.multimodal.video import sample_frames_from_video
from vllm.platforms import current_platform
from vllm.v1.attention.backends.registry import AttentionBackendEnum
from ....utils import create_new_process_for_each_test
from ...utils import dummy_hf_overrides
# Dots.OCR prompt from official repository
# https://github.com/rednote-hilab/dots.ocr/blob/d72d1d8c5bdd0362eb264f714cdbd1e5daa7cdff/dots_ocr/utils/prompts.py#L3
# ruff: noqa: E501
DOTS_OCR_PROMPT = """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.
1. Bbox format: [x1, y1, x2, y2]
2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'].
3. Text Extraction & Formatting Rules:
- Picture: For the 'Picture' category, the text field should be omitted.
- Formula: Format its text as LaTeX.
- Table: Format its text as HTML.
- All Others (Text, Title, etc.): Format their text as Markdown.
4. Constraints:
- The output text must be the original text from the image, with no translation.
- All layout elements must be sorted according to human reading order.
5. Final Output: The entire output must be a single JSON object.
"""
VIDEO_PLACEHOLDER = "<|vision_start|><|video_pad|><|vision_end|>"
# Model configurations
MODEL_CONFIGS: dict[str, dict[str, Any]] = {
"dots_ocr": {
"model_name": "rednote-hilab/dots.ocr",
"interface": "llm_chat",
"max_model_len": 32768,
"max_num_seqs": 1,
"limit_mm_per_prompt": {"image": 1},
"sampling_params": {
"temperature": 0.1,
"max_tokens": 16384,
"top_p": 0.9,
"stop_token_ids": None,
},
"use_specific_image": "stop_sign",
"prompt_builder": "build_dots_ocr_prompt",
"output_validator": lambda x: len(x) > 10 and "stop" in x.lower(),
},
"ernie45_vl": {
"model_name": "baidu/ERNIE-4.5-VL-28B-A3B-PT",
"interface": "llm_generate",
"max_model_len": 16384,
"max_num_seqs": 2,
"sampling_params": {
"temperature": 0.0,
"max_tokens": 256,
"stop_token_ids": None,
},
"use_processor": True,
"question": "What is the content of each image?",
},
"glm4_1v": {
"model_name": "zai-org/GLM-4.1V-9B-Thinking",
"interface": "llm_generate",
"max_model_len": 32768,
"max_num_seqs": 2,
"sampling_params": {
"temperature": 0.0,
"max_tokens": 256,
"stop_token_ids": None,
},
"use_processor": True,
"question": "What is the content of each image?",
},
"glm_ocr": {
"model_name": "zai-org/GLM-OCR",
"interface": "llm_generate",
"max_model_len": 131072,
"max_num_seqs": 2,
"sampling_params": {
"temperature": 0.0,
"max_tokens": 256,
"stop_token_ids": None,
},
"use_processor": True,
"question": "Text Recognition:",
},
"keye_vl": {
"model_name": "Kwai-Keye/Keye-VL-8B-Preview",
"interface": "llm_generate",
"max_model_len": 8192,
"max_num_seqs": 5,
"sampling_params": {
"temperature": 0.0,
"max_tokens": 256,
"stop_token_ids": None,
},
"supported_backends": {
AttentionBackendEnum.FLASH_ATTN,
AttentionBackendEnum.ROCM_AITER_FA,
},
"use_processor": True,
"question": "What is the content of each image?",
},
"ovis2_5": {
"model_name": "AIDC-AI/Ovis2.5-2B",
"interface": "llm_generate",
"max_model_len": 8192,
"max_num_seqs": 2,
"sampling_params": {
"temperature": 0.0,
"max_tokens": 256,
"stop_token_ids": None,
},
"prompt_builder": "build_ovis_prompt",
"question": "What is the content of each image?",
},
"qwen2_5_vl": {
"model_name": "Qwen/Qwen2.5-VL-3B-Instruct",
"interface": "vllm_runner",
"media_type": "video",
"max_model_len": 4000,
"max_num_seqs": 1,
"limit_mm_per_prompt": {"video": 1},
"sampling_params": {
"max_tokens": 128,
},
"runner_kwargs": {
"runner": "generate",
"dtype": "bfloat16",
},
"video_params": {
"num_frames": 16,
"pruning_rates": [0.0, 0.75],
},
},
"qwen2_5_omni": {
"model_name": "Qwen/Qwen2.5-Omni-3B",
"interface": "llm_generate",
"max_model_len": 32768,
"max_num_seqs": 2,
"limit_mm_per_prompt": {"image": 3, "video": 3, "audio": 3},
"sampling_params": {
"temperature": 0.6,
"top_p": 0.95,
"top_k": 20,
"max_tokens": 16384,
},
"use_processor": True,
"question": "What is the content of each image?",
},
"qwen3_omni": {
"model_name": "Qwen/Qwen3-Omni-30B-A3B-Instruct",
"interface": "llm_generate",
"max_model_len": 32768,
"max_num_seqs": 2,
"limit_mm_per_prompt": {"image": 3, "video": 3, "audio": 3},
"sampling_params": {
"temperature": 0.6,
"top_p": 0.95,
"top_k": 20,
"max_tokens": 16384,
},
"use_processor": True,
"question": "What is the content of each image?",
},
}
# Prompt builder functions
def build_dots_ocr_prompt(images, config):
"""Build Dots.OCR specific prompt with OCR instructions."""
# Use only stop_sign image for Dots.OCR
image = images[0] # Already filtered to stop_sign
image_url = encode_image_url(image)
placeholders = [{"type": "image_url", "image_url": {"url": image_url}}]
messages = [
{
"role": "user",
"content": [
*placeholders,
{
"type": "text",
"text": f"<|img|><|imgpad|><|endofimg|>{DOTS_OCR_PROMPT}",
},
],
},
]
return messages
def build_processor_prompt(images, config):
"""Build prompt using AutoProcessor.apply_chat_template()."""
processor = AutoProcessor.from_pretrained(
config["model_name"], trust_remote_code=True
)
image_urls = [encode_image_url(img) for img in images]
placeholders = [{"type": "image", "image": url} for url in image_urls]
messages = [
{
"role": "user",
"content": [
*placeholders,
{"type": "text", "text": config["question"]},
],
},
]
return processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
def build_ovis_prompt(images, config):
"""Build Ovis2.5 specific prompt with custom format."""
image_urls = [encode_image_url(img) for img in images]
placeholders = "\n".join(
f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1)
)
return (
f"<|im_start|>user\n\n{placeholders}\n{config['question']}<|im_end|>\n"
"<|im_start|>assistant\n"
)
def build_qwen2_5_video_prompt():
"""Build Qwen2.5-VL video prompt with EVS placeholder."""
return (
f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
f"<|im_start|>user\n{VIDEO_PLACEHOLDER}"
"Describe this video with a short sentence (no more than 20 words)"
"<|im_end|><|im_start|>assistant\n"
)
# Handler functions
def run_llm_generate_test(config, mm_encoder_attn_backend, image_assets):
"""Standard LLM.generate() interface handler."""
images = [asset.pil_image for asset in image_assets]
# Build prompt
if config.get("use_processor"):
prompt = build_processor_prompt(images, config)
else:
prompt_builder_name = config.get("prompt_builder", "build_ovis_prompt")
prompt_builder = globals()[prompt_builder_name]
prompt = prompt_builder(images, config)
# Determine limit_mm_per_prompt
limit_mm_per_prompt = config.get("limit_mm_per_prompt", {"image": len(images)})
# Create engine
engine_args = EngineArgs(
model=config["model_name"],
trust_remote_code=True,
max_model_len=config["max_model_len"],
max_num_seqs=config["max_num_seqs"],
limit_mm_per_prompt=limit_mm_per_prompt,
mm_encoder_attn_backend=mm_encoder_attn_backend,
hf_overrides=dummy_hf_overrides,
load_format="dummy",
)
engine_dict = asdict(engine_args) | {"seed": 42}
llm = LLM(**engine_dict)
# Generate
sampling_params = SamplingParams(**config["sampling_params"])
outputs = llm.generate(
{
"prompt": prompt,
"multi_modal_data": {"image": images},
},
sampling_params=sampling_params,
)
# Validate
for o in outputs:
generated_text = o.outputs[0].text
validator = config.get("output_validator", lambda x: len(x) > 10)
assert validator(generated_text), (
f"Validation failed for {config['model_name']}: {generated_text}"
)
def run_llm_chat_test(config, mm_encoder_attn_backend, image_assets):
"""LLM.chat() interface handler for Dots.OCR."""
# Filter to stop_sign image only
stop_sign_image = [
asset.pil_image for asset in image_assets if asset.name == "stop_sign"
][0]
# Build messages
messages = build_dots_ocr_prompt([stop_sign_image], config)
# Create engine
engine_args = EngineArgs(
model=config["model_name"],
trust_remote_code=True,
max_model_len=config["max_model_len"],
max_num_seqs=config["max_num_seqs"],
limit_mm_per_prompt=config["limit_mm_per_prompt"],
mm_encoder_attn_backend=mm_encoder_attn_backend,
hf_overrides=dummy_hf_overrides,
load_format="dummy",
)
engine_dict = asdict(engine_args) | {"seed": 42}
llm = LLM(**engine_dict)
# Generate using chat
sampling_params = SamplingParams(**config["sampling_params"])
outputs = llm.chat(messages=messages, sampling_params=sampling_params)
# Validate
for o in outputs:
generated_text = o.outputs[0].text
validator = config.get("output_validator", lambda x: len(x) > 10)
assert validator(generated_text), (
f"Validation failed for {config['model_name']}: {generated_text}"
)
def run_video_test(config, mm_encoder_attn_backend, video_assets, vllm_runner):
"""Video test with EVS (Efficient Video Sampling) handler."""
for pruning_rate in config["video_params"]["pruning_rates"]:
num_frames = config["video_params"]["num_frames"]
# Sample frames from video
sampled_vids = [
sample_frames_from_video(asset.np_ndarrays, num_frames)
for asset in video_assets
]
# Build prompt and prepare video
prompt = build_qwen2_5_video_prompt()
prompts = [prompt]
videos = [sampled_vids[0]]
# Run with vllm_runner context manager
with vllm_runner(
config["model_name"],
max_model_len=config["max_model_len"],
max_num_seqs=config["max_num_seqs"],
limit_mm_per_prompt=config["limit_mm_per_prompt"],
tensor_parallel_size=1,
video_pruning_rate=pruning_rate,
mm_encoder_attn_backend=mm_encoder_attn_backend,
hf_overrides=dummy_hf_overrides,
load_format="dummy",
**config["runner_kwargs"],
) as vllm_model:
outputs = vllm_model.generate_greedy(
prompts,
config["sampling_params"]["max_tokens"],
videos=videos,
)
# Validate output
assert len(outputs) == 1, f"Expected 1 output, got {len(outputs)}"
output_ids, output_text = outputs[0]
assert len(output_ids) > 0, "Generated no output IDs"
assert len(output_text) > 0, "Generated empty text"
assert isinstance(output_text, str), (
f"Output is not string: {type(output_text)}"
)
# Main test function
@pytest.mark.parametrize("model_key", list(MODEL_CONFIGS.keys()))
@pytest.mark.parametrize(
"mm_encoder_attn_backend",
[None] + current_platform.get_supported_vit_attn_backends(),
)
@pytest.mark.skip(reason="Broken test due to memory segmentation fault")
@create_new_process_for_each_test()
def test_vit_backend_functionality(
model_key: str,
mm_encoder_attn_backend: AttentionBackendEnum | None,
image_assets,
video_assets,
vllm_runner,
request,
):
"""Test ViT attention backend functionality for multimodal models.
This test validates that each model can successfully generate outputs
using different ViT attention backends. The test:
1. Filters unsupported backends per model
2. Applies appropriate GPU marks
3. Routes to the correct test handler based on interface
4. Validates output meets minimum requirements
"""
config = MODEL_CONFIGS[model_key]
# Step 1: Backend filtering
if (
"supported_backends" in config
and mm_encoder_attn_backend is not None
and mm_encoder_attn_backend not in config["supported_backends"]
):
pytest.skip(
f"{model_key} does not support {mm_encoder_attn_backend} backend now."
)
# Step 2: Apply GPU marks dynamically
if "gpu_marks" in config:
for mark in config["gpu_marks"]:
request.applymarker(mark)
# Step 3: Route to appropriate handler
if config.get("media_type") == "video":
run_video_test(config, mm_encoder_attn_backend, video_assets, vllm_runner)
elif config["interface"] == "llm_chat":
run_llm_chat_test(config, mm_encoder_attn_backend, image_assets)
elif config["interface"] == "llm_generate":
run_llm_generate_test(config, mm_encoder_attn_backend, image_assets)
else:
raise ValueError(f"Unknown interface: {config['interface']}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/generation/test_vit_backend_functionality.py",
"license": "Apache License 2.0",
"lines": 388,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/multimodal/test_sparse_tensor_validation_unit.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for sparse tensor validation.
Simple, fast unit tests that can run without server fixtures.
Run with: pytest tests/multimodal/test_sparse_tensor_validation_unit.py -v
"""
import io
import pytest
import torch
class TestSparseTensorValidationContextManager:
"""Test that torch.sparse.check_sparse_tensor_invariants() works as expected."""
def test_valid_sparse_tensor_passes(self):
"""Valid sparse tensors should pass validation."""
indices = torch.tensor([[0, 1], [0, 1]])
values = torch.tensor([1.0, 2.0])
shape = (2, 2)
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.sparse_coo_tensor(indices, values, shape)
dense = tensor.to_dense()
assert dense.shape == shape
def test_out_of_bounds_indices_rejected(self):
"""Sparse tensors with out-of-bounds indices should be rejected."""
indices = torch.tensor([[5], [5]]) # Out of bounds for 2x2
values = torch.tensor([1.0])
shape = (2, 2)
with pytest.raises(RuntimeError) as exc_info: # noqa: SIM117
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.sparse_coo_tensor(indices, values, shape)
tensor.to_dense()
assert (
"index" in str(exc_info.value).lower()
or "bound" in str(exc_info.value).lower()
)
def test_negative_indices_rejected(self):
"""Sparse tensors with negative indices should be rejected."""
indices = torch.tensor([[-1], [0]])
values = torch.tensor([1.0])
shape = (2, 2)
with pytest.raises(RuntimeError): # noqa: SIM117
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.sparse_coo_tensor(indices, values, shape)
tensor.to_dense()
def test_without_context_manager_allows_invalid(self):
"""
WITHOUT validation, invalid tensors may not immediately error.
This demonstrates the vulnerability: PyTorch 2.8.0+ doesn't validate
by default, which can lead to memory corruption.
"""
indices = torch.tensor([[100], [100]]) # Way out of bounds
values = torch.tensor([1.0])
shape = (2, 2)
# Without validation context, this might create an invalid tensor
# (actual behavior depends on PyTorch version)
tensor = torch.sparse_coo_tensor(indices, values, shape)
# The tensor object is created, but it's invalid
assert tensor.is_sparse
class TestTorchLoadWithValidation:
"""Test torch.load() with sparse tensor validation."""
def test_load_valid_sparse_tensor_with_validation(self):
"""Valid sparse tensors should load successfully with validation."""
# Create and save a valid sparse tensor
indices = torch.tensor([[0, 1], [0, 1]])
values = torch.tensor([1.0, 2.0])
tensor = torch.sparse_coo_tensor(indices, values, (2, 2))
buffer = io.BytesIO()
torch.save(tensor, buffer)
buffer.seek(0)
# Load with validation
with torch.sparse.check_sparse_tensor_invariants():
loaded = torch.load(buffer, weights_only=True)
dense = loaded.to_dense()
assert dense.shape == (2, 2)
def test_load_invalid_sparse_tensor_rejected(self):
"""Invalid sparse tensors should be caught when loaded with validation."""
# Create an invalid sparse tensor (out of bounds)
indices = torch.tensor([[10], [10]])
values = torch.tensor([1.0])
tensor = torch.sparse_coo_tensor(indices, values, (2, 2))
buffer = io.BytesIO()
torch.save(tensor, buffer)
buffer.seek(0)
# Load with validation - should fail on to_dense()
with pytest.raises(RuntimeError): # noqa: SIM117
with torch.sparse.check_sparse_tensor_invariants():
loaded = torch.load(buffer, weights_only=True)
loaded.to_dense()
def test_load_dense_tensor_unaffected(self):
"""Dense tensors should work normally with the validation context."""
# Create and save a dense tensor
tensor = torch.randn(10, 20)
buffer = io.BytesIO()
torch.save(tensor, buffer)
buffer.seek(0)
# Load with validation (should have no effect on dense tensors)
with torch.sparse.check_sparse_tensor_invariants():
loaded = torch.load(buffer, weights_only=True)
assert loaded.shape == (10, 20)
assert not loaded.is_sparse
if __name__ == "__main__":
# Allow running directly for quick testing
pytest.main([__file__, "-v", "--tb=short"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/test_sparse_tensor_validation_unit.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/benchmarks/startup.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Benchmark the cold and warm startup time of vLLM models.
This script measures total startup time (including model loading, compilation,
and cache operations) for both cold and warm scenarios:
- Cold startup: Fresh start with no caches (temporary cache directories)
- Warm startup: Using cached compilation and model info
"""
import argparse
import dataclasses
import json
import multiprocessing
import os
import shutil
import tempfile
import time
from contextlib import contextmanager
from typing import Any
import numpy as np
from tqdm import tqdm
from vllm.benchmarks.lib.utils import (
convert_to_pytorch_benchmark_format,
write_to_json,
)
from vllm.engine.arg_utils import EngineArgs
@contextmanager
def cold_startup():
"""
Context manager to measure cold startup time:
1. Uses a temporary directory for vLLM cache to avoid any pollution
between cold startup iterations.
2. Uses inductor's fresh_cache to clear torch.compile caches.
"""
from torch._inductor.utils import fresh_cache
# Use temporary directory for caching to avoid any pollution between cold startups
original_cache_root = os.environ.get("VLLM_CACHE_ROOT")
temp_cache_dir = tempfile.mkdtemp(prefix="vllm_startup_bench_cold_")
try:
os.environ["VLLM_CACHE_ROOT"] = temp_cache_dir
with fresh_cache():
yield
finally:
# Clean up temporary cache directory
shutil.rmtree(temp_cache_dir, ignore_errors=True)
if original_cache_root:
os.environ["VLLM_CACHE_ROOT"] = original_cache_root
else:
os.environ.pop("VLLM_CACHE_ROOT", None)
def run_startup_in_subprocess(engine_args, result_queue):
"""
Run LLM startup in a subprocess and return timing metrics via a queue.
This ensures complete isolation between iterations.
"""
try:
# Import inside the subprocess to avoid issues with forking
from vllm import LLM
# Measure total startup time
start_time = time.perf_counter()
llm = LLM(**dataclasses.asdict(engine_args))
total_startup_time = time.perf_counter() - start_time
# Extract compilation time if available
compilation_time = 0.0
if hasattr(llm.llm_engine, "vllm_config"):
vllm_config = llm.llm_engine.vllm_config
if (
hasattr(vllm_config, "compilation_config")
and vllm_config.compilation_config is not None
):
compilation_time = vllm_config.compilation_config.compilation_time
result_queue.put(
{
"total_startup_time": total_startup_time,
"compilation_time": compilation_time,
}
)
except Exception as e:
result_queue.put(None)
result_queue.put(str(e))
def save_to_pytorch_benchmark_format(
args: argparse.Namespace, results: dict[str, Any]
) -> None:
base_name = os.path.splitext(args.output_json)[0]
cold_startup_records = convert_to_pytorch_benchmark_format(
args=args,
metrics={
"avg_cold_startup_time": [results["avg_cold_startup_time"]],
},
extra_info={
"cold_startup_times": results["cold_startup_times"],
"cold_startup_percentiles": results["cold_startup_percentiles"],
},
)
if cold_startup_records:
write_to_json(f"{base_name}.cold_startup.pytorch.json", cold_startup_records)
cold_compilation_records = convert_to_pytorch_benchmark_format(
args=args,
metrics={
"avg_cold_compilation_time": [results["avg_cold_compilation_time"]],
},
extra_info={
"cold_compilation_times": results["cold_compilation_times"],
"cold_compilation_percentiles": results["cold_compilation_percentiles"],
},
)
if cold_compilation_records:
write_to_json(
f"{base_name}.cold_compilation.pytorch.json", cold_compilation_records
)
warm_startup_records = convert_to_pytorch_benchmark_format(
args=args,
metrics={
"avg_warm_startup_time": [results["avg_warm_startup_time"]],
},
extra_info={
"warm_startup_times": results["warm_startup_times"],
"warm_startup_percentiles": results["warm_startup_percentiles"],
},
)
if warm_startup_records:
write_to_json(f"{base_name}.warm_startup.pytorch.json", warm_startup_records)
warm_compilation_records = convert_to_pytorch_benchmark_format(
args=args,
metrics={
"avg_warm_compilation_time": [results["avg_warm_compilation_time"]],
},
extra_info={
"warm_compilation_times": results["warm_compilation_times"],
"warm_compilation_percentiles": results["warm_compilation_percentiles"],
},
)
if warm_compilation_records:
write_to_json(
f"{base_name}.warm_compilation.pytorch.json", warm_compilation_records
)
def add_cli_args(parser: argparse.ArgumentParser):
parser.add_argument(
"--num-iters-cold",
type=int,
default=3,
help="Number of cold startup iterations.",
)
parser.add_argument(
"--num-iters-warmup",
type=int,
default=1,
help="Number of warmup iterations before benchmarking warm startups.",
)
parser.add_argument(
"--num-iters-warm",
type=int,
default=3,
help="Number of warm startup iterations.",
)
parser.add_argument(
"--output-json",
type=str,
default=None,
help="Path to save the startup time results in JSON format.",
)
parser = EngineArgs.add_cli_args(parser)
return parser
def main(args: argparse.Namespace):
# Set multiprocessing start method to 'spawn' for clean process isolation
# This ensures each subprocess starts fresh without inheriting state
multiprocessing.set_start_method("spawn", force=True)
engine_args = EngineArgs.from_cli_args(args)
def create_llm_and_measure_startup():
"""
Create LLM instance in a subprocess and measure startup time.
Returns timing metrics, using subprocess for complete isolation.
"""
# Create a queue for inter-process communication
result_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=run_startup_in_subprocess,
args=(
engine_args,
result_queue,
),
)
process.start()
process.join()
if not result_queue.empty():
result = result_queue.get()
if result is None:
if not result_queue.empty():
error_msg = result_queue.get()
raise RuntimeError(f"Subprocess failed: {error_msg}")
else:
raise RuntimeError("Subprocess failed with unknown error")
return result
else:
raise RuntimeError("Subprocess did not return a result")
os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
print("Setting VLLM_ENABLE_V1_MULTIPROCESSING=0 to collect startup metrics.\n")
print("Measuring cold startup time...\n")
cold_startup_times = []
cold_compilation_times = []
for i in tqdm(range(args.num_iters_cold), desc="Cold startup iterations"):
with cold_startup():
metrics = create_llm_and_measure_startup()
cold_startup_times.append(metrics["total_startup_time"])
cold_compilation_times.append(metrics["compilation_time"])
# Warmup for warm startup
print("\nWarming up for warm startup measurement...\n")
for _ in tqdm(range(args.num_iters_warmup), desc="Warmup iterations"):
create_llm_and_measure_startup()
print("\nMeasuring warm startup time...\n")
warm_startup_times = []
warm_compilation_times = []
for i in tqdm(range(args.num_iters_warm), desc="Warm startup iterations"):
metrics = create_llm_and_measure_startup()
warm_startup_times.append(metrics["total_startup_time"])
warm_compilation_times.append(metrics["compilation_time"])
# Calculate statistics
cold_startup_array = np.array(cold_startup_times)
cold_compilation_array = np.array(cold_compilation_times)
warm_startup_array = np.array(warm_startup_times)
warm_compilation_array = np.array(warm_compilation_times)
avg_cold_startup = np.mean(cold_startup_array)
avg_cold_compilation = np.mean(cold_compilation_array)
avg_warm_startup = np.mean(warm_startup_array)
avg_warm_compilation = np.mean(warm_compilation_array)
percentages = [10, 25, 50, 75, 90, 99]
cold_startup_percentiles = np.percentile(cold_startup_array, percentages)
cold_compilation_percentiles = np.percentile(cold_compilation_array, percentages)
warm_startup_percentiles = np.percentile(warm_startup_array, percentages)
warm_compilation_percentiles = np.percentile(warm_compilation_array, percentages)
print("\n" + "=" * 60)
print("STARTUP TIME BENCHMARK RESULTS")
print("=" * 60)
# Cold startup statistics
print("\nCOLD STARTUP:")
print(f"Avg total startup time: {avg_cold_startup:.2f} seconds")
print(f"Avg compilation time: {avg_cold_compilation:.2f} seconds")
print("Startup time percentiles:")
for percentage, percentile in zip(percentages, cold_startup_percentiles):
print(f" {percentage}%: {percentile:.2f} seconds")
print("Compilation time percentiles:")
for percentage, percentile in zip(percentages, cold_compilation_percentiles):
print(f" {percentage}%: {percentile:.2f} seconds")
# Warm startup statistics
print("\nWARM STARTUP:")
print(f"Avg total startup time: {avg_warm_startup:.2f} seconds")
print(f"Avg compilation time: {avg_warm_compilation:.2f} seconds")
print("Startup time percentiles:")
for percentage, percentile in zip(percentages, warm_startup_percentiles):
print(f" {percentage}%: {percentile:.2f} seconds")
print("Compilation time percentiles:")
for percentage, percentile in zip(percentages, warm_compilation_percentiles):
print(f" {percentage}%: {percentile:.2f} seconds")
print("=" * 60)
# Output JSON results if specified
if args.output_json:
results = {
"avg_cold_startup_time": float(avg_cold_startup),
"avg_cold_compilation_time": float(avg_cold_compilation),
"cold_startup_times": cold_startup_times,
"cold_compilation_times": cold_compilation_times,
"cold_startup_percentiles": dict(
zip(percentages, cold_startup_percentiles.tolist())
),
"cold_compilation_percentiles": dict(
zip(percentages, cold_compilation_percentiles.tolist())
),
"avg_warm_startup_time": float(avg_warm_startup),
"avg_warm_compilation_time": float(avg_warm_compilation),
"warm_startup_times": warm_startup_times,
"warm_compilation_times": warm_compilation_times,
"warm_startup_percentiles": dict(
zip(percentages, warm_startup_percentiles.tolist())
),
"warm_compilation_percentiles": dict(
zip(percentages, warm_compilation_percentiles.tolist())
),
}
with open(args.output_json, "w") as f:
json.dump(results, f, indent=4)
save_to_pytorch_benchmark_format(args, results)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/benchmarks/startup.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/cli/benchmark/startup.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
from vllm.benchmarks.startup import add_cli_args, main
from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase
class BenchmarkStartupSubcommand(BenchmarkSubcommandBase):
"""The `startup` subcommand for `vllm bench`."""
name = "startup"
help = "Benchmark the startup time of vLLM models."
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
add_cli_args(parser)
@staticmethod
def cmd(args: argparse.Namespace) -> None:
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/cli/benchmark/startup.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/multimodal/generation/test_audioflamingo3.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The vLLM team.
# Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
from tests.models.registry import HF_EXAMPLE_MODELS
from vllm import LLM, SamplingParams
MODEL_NAME = "nvidia/audio-flamingo-3-hf"
def get_fixture_path(filename):
return os.path.join(
os.path.dirname(__file__), "../../fixtures/audioflamingo3", filename
)
@pytest.fixture(scope="module")
def llm():
# Check if the model is supported by the current transformers version
model_info = HF_EXAMPLE_MODELS.get_hf_info("AudioFlamingo3ForConditionalGeneration")
model_info.check_transformers_version(on_fail="skip")
try:
llm = LLM(
model=MODEL_NAME,
trust_remote_code=True,
dtype="bfloat16",
enforce_eager=True,
limit_mm_per_prompt={"audio": 1},
)
return llm
except Exception as e:
pytest.skip(f"Failed to load model {MODEL_NAME}: {e}")
def test_single_generation(llm):
fixture_path = get_fixture_path("expected_results_single.json")
if not os.path.exists(fixture_path):
pytest.skip(f"Fixture not found: {fixture_path}")
with open(fixture_path) as f:
expected = json.load(f)
audio_url = "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Why_do_we_ask_questions_converted.wav"
messages = [
{
"role": "user",
"content": [
{"type": "audio_url", "audio_url": {"url": audio_url}},
{"type": "text", "text": "Transcribe the input speech."},
],
}
]
sampling_params = SamplingParams(temperature=0.0, max_tokens=128)
outputs = llm.chat(
messages=messages,
sampling_params=sampling_params,
)
generated_text = outputs[0].outputs[0].text.strip()
expected_text = expected["transcriptions"][0]
assert expected_text in generated_text or generated_text in expected_text
def test_batched_generation(llm):
fixture_path = get_fixture_path("expected_results_batched.json")
if not os.path.exists(fixture_path):
pytest.skip(f"Fixture not found: {fixture_path}")
with open(fixture_path) as f:
expected = json.load(f)
items = [
{
"audio_url": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/dogs_barking_in_sync_with_the_music.wav",
"question": "What is surprising about the relationship "
"between the barking and the music?",
"expected_idx": 0,
},
{
"audio_url": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Ch6Ae9DT6Ko_00-04-03_00-04-31.wav",
"question": (
"Why is the philosopher's name mentioned in the lyrics? "
"(A) To express a sense of nostalgia "
"(B) To indicate that language cannot express clearly, "
"satirizing the inversion of black and white in the world "
"(C) To add depth and complexity to the lyrics "
"(D) To showcase the wisdom and influence of the philosopher"
),
"expected_idx": 1,
},
]
conversations = []
for item in items:
messages = [
{
"role": "user",
"content": [
{"type": "audio_url", "audio_url": {"url": item["audio_url"]}},
{"type": "text", "text": item["question"]},
],
}
]
conversations.append(messages)
sampling_params = SamplingParams(temperature=0.0, max_tokens=128)
outputs = llm.chat(
messages=conversations,
sampling_params=sampling_params,
)
for i, output in enumerate(outputs):
generated_text = output.outputs[0].text.strip()
expected_text = expected["transcriptions"][i]
assert expected_text in generated_text or generated_text in expected_text
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/generation/test_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/models/multimodal/processing/test_audioflamingo3.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The vLLM team.
# Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
from transformers import PretrainedConfig
from tests.models.registry import HF_EXAMPLE_MODELS
class MockAudioFlamingo3Config(PretrainedConfig):
model_type = "audioflamingo3"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.audio_config = PretrainedConfig()
self.text_config = PretrainedConfig()
class MockAudioFlamingo3Processor:
def __init__(self):
self.audio_token = "<sound>"
self.audio_token_id = 12345
self.feature_extractor = MockFeatureExtractor()
def __call__(self, text=None, audios=None, **kwargs):
return {"input_ids": [1, 2, 3], "input_features": [np.zeros((3000, 80))]}
class MockFeatureExtractor:
def __init__(self):
self.sampling_rate = 16000
self.chunk_length = 30
@pytest.fixture
def mock_ctx():
config = MockAudioFlamingo3Config()
ctx = MagicMock()
ctx.get_hf_config.return_value = config
ctx.get_hf_processor.return_value = MockAudioFlamingo3Processor()
ctx.model_config.hf_config = config
return ctx
@pytest.fixture(autouse=True)
def check_transformers_version():
# Check if the model is supported by the current transformers version
model_info = HF_EXAMPLE_MODELS.get_hf_info("AudioFlamingo3ForConditionalGeneration")
model_info.check_transformers_version(on_fail="skip")
def test_audio_chunk_counting(mock_ctx):
from vllm.model_executor.models.audioflamingo3 import (
AudioFlamingo3DummyInputsBuilder,
AudioFlamingo3MultiModalProcessor,
AudioFlamingo3ProcessingInfo,
)
info = AudioFlamingo3ProcessingInfo(mock_ctx)
processor = AudioFlamingo3MultiModalProcessor(
info, AudioFlamingo3DummyInputsBuilder(info)
)
sr = 16000
audio_1 = np.zeros(30 * sr)
audio_2 = np.zeros(45 * sr)
mm_data = {"audio": [audio_1, audio_2]}
prompt = "<|user|>Listen.<|end|>"
from vllm.multimodal.processing import BaseMultiModalProcessor
def mock_base_call(self, prompt, mm_data, mm_kwargs, tok_kwargs):
return {"input_ids": [1, 2, 3], "input_features": torch.randn(1, 80, 3000)}
with pytest.MonkeyPatch.context() as mp:
mp.setattr(BaseMultiModalProcessor, "_call_hf_processor", mock_base_call)
processed = processor._call_hf_processor(prompt, mm_data, {}, {})
chunk_counts = processed["chunk_counts"]
assert chunk_counts[0].item() == 1
assert chunk_counts[1].item() == 2
assert len(chunk_counts) == 2
def test_dummy_data_generation(mock_ctx):
from vllm.model_executor.models.audioflamingo3 import (
AudioFlamingo3DummyInputsBuilder,
AudioFlamingo3ProcessingInfo,
)
info = AudioFlamingo3ProcessingInfo(mock_ctx)
builder = AudioFlamingo3DummyInputsBuilder(info)
mm_counts = {"audio": 2}
dummy_data = builder.get_dummy_mm_data(100, mm_counts, {})
assert "audio" in dummy_data
assert len(dummy_data["audio"]) == 2
expected_len = 600 * 16000
assert len(dummy_data["audio"][0]) == expected_len
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/processing/test_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.