repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/pool/metadata.py
vllm/v1/pool/metadata.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import numpy as np import torch from vllm.pooling_params import PoolingParams from vllm.tasks import PoolingTask from vllm.utils.platform_utils import is_pin_memory_available pin_memory = is_pin_memory_available() @dataclass class PoolingCursor: index: list[int] first_token_indices_gpu: torch.Tensor last_token_indices_gpu: torch.Tensor prompt_lens_cpu: torch.Tensor seq_lens_cpu: torch.Tensor num_scheduled_tokens_cpu: torch.Tensor def __getitem__(self, indices: slice): return PoolingCursor( index=self.index[indices], first_token_indices_gpu=self.first_token_indices_gpu[indices], last_token_indices_gpu=self.last_token_indices_gpu[indices], prompt_lens_cpu=self.prompt_lens_cpu[indices], seq_lens_cpu=self.seq_lens_cpu[indices], num_scheduled_tokens_cpu=self.num_scheduled_tokens_cpu[indices], ) def is_partial_prefill(self): return not torch.all(self.prompt_lens_cpu == self.num_scheduled_tokens_cpu) def is_finished(self): return self.prompt_lens_cpu == self.seq_lens_cpu class PoolingStates: def __init__(self): # for chunked prefill with ALL pooling self.hidden_states_cache: list[torch.Tensor] = [] def clean(self): self.hidden_states_cache.clear() @dataclass class PoolingMetadata: """Tensors for pooling.""" prompt_lens: torch.Tensor # CPU Tensor prompt_token_ids: torch.Tensor | None pooling_params: list[PoolingParams] pooling_states: list[PoolingStates] pooling_cursor: PoolingCursor | None = None def __post_init__(self) -> None: pooling_params = self.pooling_params tasks: list[PoolingTask] = [ task for pooling_param in pooling_params if (task := pooling_param.task) is not None ] assert len(pooling_params) == len(tasks) self.tasks = tasks def __getitem__(self, indices: slice): return PoolingMetadata( prompt_lens=self.prompt_lens[indices], prompt_token_ids=None if self.prompt_token_ids is None else self.prompt_token_ids[indices], pooling_params=self.pooling_params[indices], pooling_states=self.pooling_states[indices], pooling_cursor=None if self.pooling_cursor is None else self.pooling_cursor[indices], ) def get_prompt_token_ids(self) -> list[torch.Tensor]: prompt_token_ids = self.prompt_token_ids assert prompt_token_ids is not None, ( "Please set `requires_token_ids=True` in `get_pooling_updates`" ) return [prompt_token_ids[i, :num] for i, num in enumerate(self.prompt_lens)] def build_pooling_cursor( self, num_scheduled_tokens_np: np.ndarray, seq_lens_cpu: torch.Tensor, device: torch.device, ): n_seq = len(num_scheduled_tokens_np) prompt_lens = self.prompt_lens assert len(prompt_lens) == n_seq index = list(range(n_seq)) num_scheduled_tokens_cpu = torch.from_numpy(num_scheduled_tokens_np) cumsum = torch.zeros( n_seq + 1, dtype=torch.int64, pin_memory=pin_memory, device="cpu" ) torch.cumsum(num_scheduled_tokens_cpu, dim=0, out=cumsum[1:]) cumsum = cumsum.to(device, non_blocking=True) self.pooling_cursor = PoolingCursor( index=index, first_token_indices_gpu=cumsum[:n_seq], last_token_indices_gpu=cumsum[1:] - 1, prompt_lens_cpu=prompt_lens, seq_lens_cpu=seq_lens_cpu, num_scheduled_tokens_cpu=num_scheduled_tokens_cpu, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/pool/__init__.py
vllm/v1/pool/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/backend_guidance.py
vllm/v1/structured_output/backend_guidance.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy import json import os from dataclasses import dataclass from typing import TYPE_CHECKING, Any import torch from vllm.logger import init_logger from vllm.sampling_params import SamplingParams from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, StructuredOutputGrammar, StructuredOutputOptions, ) from vllm.v1.structured_output.request import get_structured_output_key if TYPE_CHECKING: import llguidance import llguidance.hf as llguidance_hf import llguidance.torch as llguidance_torch else: llguidance = LazyLoader("llguidance", globals(), "llguidance") llguidance_hf = LazyLoader("llguidance.hf", globals(), "llguidance.hf") llguidance_torch = LazyLoader("llguidance.torch", globals(), "llguidance.torch") logger = init_logger(__name__) def _walk_json_for_additional_properties(data: object): if isinstance(data, dict): for value in data.values(): _walk_json_for_additional_properties(value) if "additionalProperties" not in data and ( "properties" in data or "patternProperties" in data ): data["additionalProperties"] = False elif isinstance(data, list): for item in data: _walk_json_for_additional_properties(item) def has_guidance_unsupported_json_features(schema: dict[str, Any]) -> bool: """Check if JSON schema contains features unsupported by guidance/llguidance.""" def check_object(obj: dict[str, Any]) -> bool: if not isinstance(obj, dict): return False # patternProperties is not supported by llguidance if "patternProperties" in obj: return True # Recursively check all nested objects and arrays for value in obj.values(): if isinstance(value, dict): if check_object(value): return True elif isinstance(value, list): for item in value: if isinstance(item, dict) and check_object(item): return True return False return check_object(schema) def process_for_additional_properties( guide_json: str | dict[str, Any], ) -> dict[str, Any]: if isinstance(guide_json, str): guide_json_obj = json.loads(guide_json) else: # copy for modifications guide_json_obj = copy.deepcopy(guide_json) _walk_json_for_additional_properties(guide_json_obj) return guide_json_obj @dataclass class GuidanceBackend(StructuredOutputBackend): def __post_init__(self): self.disable_any_whitespace = ( self.vllm_config.structured_outputs_config.disable_any_whitespace ) self.disable_additional_properties = ( self.vllm_config.structured_outputs_config.disable_additional_properties ) self.ll_tokenizer = llguidance_hf.from_tokenizer( self.tokenizer, self.vocab_size ) def compile_grammar( self, request_type: StructuredOutputOptions, grammar_spec: str ) -> StructuredOutputGrammar: self.serialized_grammar = serialize_guidance_grammar( request_type, grammar_spec, self.disable_any_whitespace, self.disable_additional_properties, ) ll_matcher = llguidance.LLMatcher( self.ll_tokenizer, self.serialized_grammar, log_level=int(os.environ.get("LLGUIDANCE_LOG_LEVEL", "1")), ) r = GuidanceGrammar( ll_matcher=ll_matcher, ll_tokenizer=self.ll_tokenizer, vocab_size=self.vocab_size, ) r.check_error() return r def allocate_token_bitmask(self, max_num_seqs: int): return llguidance_torch.allocate_token_bitmask( max_num_seqs, self.ll_tokenizer.vocab_size ) def destroy(self): pass @dataclass class GuidanceGrammar(StructuredOutputGrammar): ll_matcher: llguidance.LLMatcher ll_tokenizer: llguidance.LLTokenizer vocab_size: int printed_error: bool = False terminated: bool = False rollback_lag: int = 0 def check_error(self): if not self.printed_error: err = self.ll_matcher.get_error() if err: self.printed_error = True logger.warning("LLMatcher error: %s", err) def accept_tokens(self, request_id: str, tokens: list[int]) -> bool: """Accepts a list of tokens and advances the parser. Returns True if the parser was advanced successfully. Returns False if the parser failed to advance. """ if self.ll_tokenizer.eos_token in tokens: if self.ll_matcher.is_stopped() and not self.terminated: self.rollback_lag = 1 self.terminated = True if self.ll_matcher.is_stopped(): return True # TODO - Add jump decoding support in the future: # self.ll_matcher.compute_ff_bytes() - this should always work # self.ll_matcher.compute_ff_tokens() - this only works for # "canonical" tokenizers # For conversion between the two, see # https://github.com/guidance-ai/llguidance/blob/main/docs/fast_forward.md r = self.ll_matcher.consume_tokens(tokens) self.check_error() return r def validate_tokens(self, tokens: list[int]) -> list[int]: """Checks if the list of tokens are accepted by the parser in sequence. Will not advance the parser. Returns the prefix list of tokens that are accepted by the parser. """ if len(tokens) == 0: return [] if self.ll_matcher.is_stopped(): return [] num_tokens = self.ll_matcher.validate_tokens(tokens) self.check_error() return tokens[:num_tokens] def rollback(self, num_tokens: int) -> None: if num_tokens > 0: self.ll_matcher.rollback(num_tokens - self.rollback_lag) self.terminated = False self.rollback_lag = 0 self.check_error() def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None: # this will automatically return [EOS] mask if the matcher is stopped # or otherwise in an error state llguidance_torch.fill_next_token_bitmask(self.ll_matcher, bitmask, idx) self.check_error() def is_terminated(self) -> bool: return self.terminated def reset(self): # This method may be not needed anymore? TODO self.ll_matcher.reset() def serialize_guidance_grammar( request_type: StructuredOutputOptions, grammar_spec: str | dict[str, Any], disable_any_whitespace: bool = False, disable_additional_properties: bool = False, ) -> str: def _process_schema( grammar_spec: str | dict[str, Any], ) -> str: if disable_additional_properties: grammar_spec = process_for_additional_properties(grammar_spec) return llguidance.LLMatcher.grammar_from_json_schema( grammar_spec, defaults={ "whitespace_flexible": not disable_any_whitespace, }, ) if request_type == StructuredOutputOptions.JSON: return _process_schema(grammar_spec) elif request_type == StructuredOutputOptions.JSON_OBJECT: return llguidance.LLMatcher.grammar_from_json_schema( '{"type": "object"}', defaults={ "whitespace_flexible": not disable_any_whitespace, }, ) else: if request_type == StructuredOutputOptions.REGEX: tp = "regex" elif request_type == StructuredOutputOptions.GRAMMAR: tp = "grammar" elif request_type == StructuredOutputOptions.CHOICE: tp = "choice" elif request_type == StructuredOutputOptions.STRUCTURAL_TAG: if isinstance(grammar_spec, str): s_tag = json.loads(grammar_spec) else: s_tag = grammar_spec triggers: list[str] = s_tag["triggers"] tags: list[llguidance.StructTag] = [] for s in s_tag["structures"]: begin: str = s["begin"] trig = next((t for t in triggers if begin.startswith(t)), None) if trig is None: raise ValueError( f"Trigger {begin} not found in triggers {triggers}" ) tags.append( llguidance.StructTag( trigger=trig, begin=s["begin"], grammar=_process_schema(s["schema"]), end=s["end"], ) ) if not tags: raise ValueError("No structural tags found in the grammar spec.") return llguidance.StructTag.to_grammar(tags) else: logger.error( "Validation should have already occurred. Please file an issue." ) raise ValueError( f"grammar is not of valid supported types. ({request_type!s})" ) return llguidance.grammar_from(tp, grammar_spec) def validate_guidance_grammar( sampling_params: SamplingParams, tokenizer: llguidance.LLTokenizer | None = None ) -> None: tp, grm = get_structured_output_key(sampling_params.structured_outputs) guidance_grm = serialize_guidance_grammar(tp, grm) err = llguidance.LLMatcher.validate_grammar(guidance_grm, tokenizer) if err: raise ValueError(f"Grammar error: {err}")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/request.py
vllm/v1/structured_output/request.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import dataclasses import functools import json from concurrent.futures import Future from concurrent.futures._base import TimeoutError from typing import cast from vllm.sampling_params import SamplingParams, StructuredOutputsParams from vllm.v1.structured_output.backend_types import ( StructuredOutputGrammar, StructuredOutputKey, StructuredOutputOptions, ) @dataclasses.dataclass class StructuredOutputRequest: params: StructuredOutputsParams _grammar: Future[StructuredOutputGrammar] | StructuredOutputGrammar | None = None reasoning_ended: bool | None = None @staticmethod def from_sampling_params( sampling_params: SamplingParams | None, ) -> "StructuredOutputRequest | None": if sampling_params is None: return None params = sampling_params.structured_outputs if not params or params.all_constraints_none(): return None return StructuredOutputRequest(params=params) def _check_grammar_completion(self) -> bool: # NOTE: We have to lazy import to gate circular imports from vllm.v1.request import RequestStatus if isinstance(self._grammar, Future): try: # We will check whether the future is ready within 100 us self._grammar = self._grammar.result(timeout=0.0001) self.status = RequestStatus.WAITING except TimeoutError: return False return True @property def is_grammar_ready(self) -> bool: return self._check_grammar_completion() @property def grammar(self) -> StructuredOutputGrammar | None: completed = self._check_grammar_completion() return ( cast(StructuredOutputGrammar | None, self._grammar) if completed else None ) @grammar.setter def grammar( self, grammar: StructuredOutputGrammar | Future[StructuredOutputGrammar] ) -> None: self._grammar = grammar @functools.cached_property def structured_output_key(self) -> StructuredOutputKey: return get_structured_output_key(self.params) def get_structured_output_key(params: StructuredOutputsParams) -> StructuredOutputKey: if params.json is not None: if not isinstance(params.json, str): json_str = json.dumps(params.json) else: json_str = params.json return StructuredOutputOptions.JSON, json_str if params.json_object: return StructuredOutputOptions.JSON_OBJECT, "" if params.regex is not None: return StructuredOutputOptions.REGEX, params.regex if params.choice is not None: if not isinstance(params.choice, str): json_str = json.dumps(params.choice) else: json_str = params.choice return StructuredOutputOptions.CHOICE, json_str if params.grammar is not None: return StructuredOutputOptions.GRAMMAR, params.grammar if params.structural_tag is not None: return StructuredOutputOptions.STRUCTURAL_TAG, params.structural_tag raise ValueError("No valid structured output parameter found")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/backend_types.py
vllm/v1/structured_output/backend_types.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import enum from abc import ABC, abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING if TYPE_CHECKING: import torch from vllm.config import VllmConfig from vllm.tokenizers import TokenizerLike else: VllmConfig = object TokenizerLike = object class StructuredOutputOptions(enum.Enum): JSON = enum.auto() JSON_OBJECT = enum.auto() REGEX = enum.auto() GRAMMAR = enum.auto() CHOICE = enum.auto() STRUCTURAL_TAG = enum.auto() StructuredOutputKey = tuple[StructuredOutputOptions, str] class StructuredOutputGrammar(ABC): """Request-level backend for structured output requests.""" @abstractmethod def accept_tokens(self, request_id: str, tokens: list[int]) -> bool: """ Determines whether the provided tokens are accepted for the given request. Args: request_id (str): The unique identifier for the request. tokens (list[int]): A list of token IDs to evaluate. Returns: bool: True if the tokens are accepted, False otherwise. """ @abstractmethod def validate_tokens(self, tokens: list[int]) -> list[int]: """ Validates the provided tokens against the grammar. Will not advance the FSM. Args: tokens (list[int]): A list of token IDs to validate. Returns: list[int]: A list of accepted token IDs. Will be a prefix of the input tokens, and empty if none are accepted. """ @abstractmethod def rollback(self, num_tokens: int) -> None: """ Rolls back the state of the grammar by a specified number of tokens. Will also revert counters for the number of processed tokens. Args: num_tokens (int): The number of tokens to roll back. """ @abstractmethod def fill_bitmask(self, bitmask: "torch.Tensor", batch_index: int) -> None: """ Fills the bitmask for a specific batch index. Args: bitmask (torch.Tensor): The bitmask to fill batch_index (int): The index in the bitmask to fill """ @abstractmethod def is_terminated(self) -> bool: """ Checks whether the structured output process has terminated. Returns: bool: True if the process is terminated, False otherwise. """ @abstractmethod def reset(self): """ Resets the state of the structured output grammar. """ @dataclass class StructuredOutputBackend(ABC): """Engine-level backend for structured output requests.""" vllm_config: VllmConfig tokenizer: TokenizerLike vocab_size: int @abstractmethod def compile_grammar( self, request_type: StructuredOutputOptions, grammar_spec: str ) -> StructuredOutputGrammar: """ Compiles a grammar specification into a structured output grammar. Args: request_type (StructuredOutputOptions): The type of structured output request. grammar_spec (str): The grammar specification to compile. Returns: StructuredOutputGrammar: The compiled structured output grammar. """ @abstractmethod def allocate_token_bitmask(self, max_num_seqs: int) -> "torch.Tensor": """ Allocates a token bitmask for the specified maximum number of sequences. Args: max_num_seqs (int): The maximum number of sequences for which to allocate the bitmask. """ @abstractmethod def destroy(self): """ Backend-specific cleanup. """
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/backend_lm_format_enforcer.py
vllm/v1/structured_output/backend_lm_format_enforcer.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ast import json from dataclasses import dataclass, field from functools import lru_cache from typing import TYPE_CHECKING import torch from transformers import PreTrainedTokenizerBase from vllm.sampling_params import SamplingParams from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, StructuredOutputGrammar, StructuredOutputOptions, ) if TYPE_CHECKING: import lmformatenforcer import lmformatenforcer.integrations.vllm as lmfe_vllm else: lmformatenforcer = LazyLoader("lmformatenforcer", globals(), "lmformatenforcer") lmfe_vllm = LazyLoader( "lmformatenforcer.integrations.vllm", globals(), "lmformatenforcer.integrations.vllm", ) @lru_cache def _cached_build_vllm_token_enforcer_tokenizer_data( tokenizer: PreTrainedTokenizerBase, vocab_size: int ) -> "lmfe_vllm.TokenEnforcerTokenizerData": return lmfe_vllm.build_vllm_token_enforcer_tokenizer_data( tokenizer, use_bitmask=True, vocab_size=vocab_size ) @dataclass class LMFormatEnforcerGrammar(StructuredOutputGrammar): token_enforcer: lmformatenforcer.TokenEnforcer current_tokens_prefix: list[int] = field(default_factory=list) def accept_tokens(self, request_id: str, tokens: list[int]) -> bool: original_len = len(self.current_tokens_prefix) for token in tokens: if not self.token_enforcer.get_allowed_tokens( self.current_tokens_prefix ).is_token_allowed(token): # Rollback partial updates to ensure atomicity. del self.current_tokens_prefix[original_len:] return False self.current_tokens_prefix.append(token) return True def validate_tokens(self, tokens: list[int]) -> list[int]: for prefix_length in range(len(tokens)): prefix = tokens[:prefix_length] next_token = tokens[prefix_length] if not self.token_enforcer.get_allowed_tokens( self.current_tokens_prefix + prefix ).is_token_allowed(next_token): break else: return tokens return tokens[:prefix_length] def rollback(self, num_tokens: int) -> None: self.current_tokens_prefix = self.current_tokens_prefix[:-num_tokens] def fill_bitmask(self, bitmask: torch.Tensor, batch_index: int) -> None: allowed_tokens = self.token_enforcer.get_allowed_tokens( self.current_tokens_prefix ) bitmask[batch_index] = allowed_tokens.allowed_tokens def is_terminated(self) -> bool: # We are considered terminated if the prefix ends with eos_token_id return_value = ( len(self.current_tokens_prefix) > 0 and self.current_tokens_prefix[-1] == self.token_enforcer.eos_token_id ) return return_value def reset(self): self.current_tokens_prefix = [] @dataclass class LMFormatEnforcerBackend(StructuredOutputBackend): def __post_init__(self): self.tokenizer_data = _cached_build_vllm_token_enforcer_tokenizer_data( self.tokenizer, self.vocab_size ) def compile_grammar( self, request_type: StructuredOutputOptions, grammar_spec: str ) -> StructuredOutputGrammar: character_level_parser: lmformatenforcer.CharacterLevelParser if request_type == StructuredOutputOptions.JSON: spec_dict = json.loads(grammar_spec) character_level_parser = lmformatenforcer.JsonSchemaParser(spec_dict) elif request_type == StructuredOutputOptions.JSON_OBJECT: character_level_parser = lmformatenforcer.JsonSchemaParser(None) elif request_type == StructuredOutputOptions.REGEX: character_level_parser = lmformatenforcer.RegexParser(grammar_spec) elif request_type == StructuredOutputOptions.CHOICE: choices = ast.literal_eval(grammar_spec) character_level_parser = lmformatenforcer.UnionParser( [lmformatenforcer.StringParser(choice) for choice in choices] ) else: raise ValueError( f"Invalid request type for LM Format Enforcer backend({request_type!s})" ) max_rollback_tokens = ( self.vllm_config.speculative_config.num_speculative_tokens if self.vllm_config.speculative_config is not None else 0 ) if max_rollback_tokens > 0: raise ValueError( "LM Format Enforcer backend does not support speculative tokens" ) token_enforcer = lmformatenforcer.TokenEnforcer( tokenizer_data=self.tokenizer_data, parser=character_level_parser, ) return LMFormatEnforcerGrammar(token_enforcer) def allocate_token_bitmask(self, max_num_seqs: int) -> torch.Tensor: return torch.full( (max_num_seqs, (self.vocab_size + 31) // 32), -1, dtype=torch.int32, pin_memory=torch.cuda.is_available(), ) def destroy(self): pass def validate_structured_output_request_lm_format_enforcer(params: SamplingParams): if params.structured_outputs is None: return so_params = params.structured_outputs if so_params.regex: return elif so_params.json: if isinstance(so_params.json, str): try: # make sure schema is valid json json.loads(so_params.json) except json.JSONDecodeError as e: raise ValueError("Invalid JSON grammar specification.") from e else: try: json.dumps(so_params.json) except Exception as e: raise ValueError( f"Error serializing structured outputs jsonschema: {e}" ) from e return elif so_params.choice: return elif so_params.grammar: raise ValueError( "LM Format Enforcer structured outputs backend " "does not support grammar specifications" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/utils.py
vllm/v1/structured_output/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from __future__ import annotations import hashlib import importlib.metadata import os from typing import TYPE_CHECKING import numpy as np import regex as re import torch from cachetools import LRUCache from diskcache import Cache import vllm.envs as envs from vllm.logger import init_logger from vllm.utils.import_utils import LazyLoader from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput if TYPE_CHECKING: import outlines_core as oc import transformers.convert_slow_tokenizer as convert_slow_tokenizer import transformers.file_utils as file_utils import xgrammar as xgr from vllm.tokenizers import TokenizerLike from vllm.v1.worker.gpu_input_batch import InputBatch else: xgr = LazyLoader("xgr", globals(), "xgrammar") oc = LazyLoader("oc", globals(), "outlines_core") file_utils = LazyLoader("file_utils", globals(), "transformers.file_utils") convert_slow_tokenizer = LazyLoader( "convert_slow_tokenizer", globals(), "transformers.convert_slow_tokenizer" ) TokenizerLike = object SchedulerOutput = object InputBatch = object logger = init_logger(__name__) CACHE = None def apply_grammar_bitmask( scheduler_output: SchedulerOutput, grammar_output: GrammarOutput, input_batch: InputBatch, logits: torch.Tensor, ) -> None: """ Apply grammar bitmask to output logits of the model with xgrammar function. Args: scheduler_output (SchedulerOutput): The result of engine scheduling. input_batch (InputBatch): The input of model runner. logits (torch.Tensor): The output logits of model forward. """ # Serialization of np.ndarray is much more efficient than a tensor, # so we receive it in that format. grammar_bitmask = grammar_output.grammar_bitmask # We receive the structured output bitmask from the scheduler, # compacted to contain bitmasks only for structured output requests. # The order of the requests in the bitmask is not guaranteed to be the # same as the order of the requests in the gpu runner's batch. We need # to sort the bitmask to match the order of the requests used here. # Get the batch indices of the structured output requests. # Keep track of the number of speculative tokens scheduled for every # request in the batch, as the logit indices are offset by this amount. struct_out_req_batch_indices: dict[str, int] = {} cumulative_offset = 0 seq = sorted(input_batch.req_id_to_index.items(), key=lambda x: x[1]) for req_id, batch_index in seq: logit_index = batch_index + cumulative_offset cumulative_offset += len( scheduler_output.scheduled_spec_decode_tokens.get(req_id, []) ) if req_id in grammar_output.structured_output_request_ids: struct_out_req_batch_indices[req_id] = logit_index out_indices = [] # Reorder the bitmask to match the order of the requests in the batch. sorted_bitmask = np.full( shape=(logits.shape[0], grammar_bitmask.shape[1]), fill_value=-1, dtype=grammar_bitmask.dtype, ) cumulative_index = 0 for req_id in grammar_output.structured_output_request_ids: num_spec_tokens = len( scheduler_output.scheduled_spec_decode_tokens.get(req_id, []) ) if req_id in struct_out_req_batch_indices: logit_index = struct_out_req_batch_indices[req_id] for i in range(1 + num_spec_tokens): sorted_bitmask[logit_index + i] = grammar_bitmask[cumulative_index + i] out_indices.append(logit_index + i) cumulative_index += 1 + num_spec_tokens # Copy async to device as tensor. grammar_bitmask = torch.from_numpy(sorted_bitmask).to( logits.device, non_blocking=True ) # If the length of out indices and the logits have the same shape # we don't need to pass indices to the kernel, # since the bitmask is already aligned with the logits. skip_out_indices = len(out_indices) == logits.shape[0] index_tensor = None if not skip_out_indices: # xgrammar expects a python list of indices but it will actually work with # a tensor. If we copy the tensor ourselves here we can do it in a non_blocking # manner and there should be no cpu sync within xgrammar. index_tensor = torch.tensor( out_indices, dtype=torch.int32, device="cpu", pin_memory=True ) index_tensor = index_tensor.to(logits.device, non_blocking=True) xgr.apply_token_bitmask_inplace(logits, grammar_bitmask, indices=index_tensor) class OutlinesVocabulary: """ Wrapper class for `outlines_core.Vocabulary`, which allows us to store a hash with the vocabulary """ def __init__(self, vocabulary: oc.Vocabulary) -> None: # Actual vocabulary object self.inner = vocabulary # Have to do abs(hash()) because python hashes can # be negative, and we are using hash as a cache key. hex_str = hashlib.sha256(vocabulary.__repr__().encode("utf-8")).hexdigest() hash_int = int(hex_str, 16) self._hash = hash_int def get_outlines_cache_path() -> str: """Get the context object that contains previously-computed return values""" outlines_cache_dir = os.getenv("OUTLINES_CACHE_DIR") xdg_cache_home = os.getenv("XDG_CACHE_HOME") home_dir = os.path.expanduser("~") if outlines_cache_dir: # OUTLINES_CACHE_DIR takes precedence return outlines_cache_dir elif xdg_cache_home: return os.path.join(xdg_cache_home, ".cache", "outlines") # If homedir is "/", we may be inside a container, and thus writing to # root would be problematic, so we fall back to using a tempfile. # Also validate the path exists, since os.path.expanduser does # not guarantee existence. elif os.path.isdir(home_dir) and home_dir != "/": # Default Unix fallback: ~/.cache/outlines return os.path.join(home_dir, ".cache", "outlines") else: import tempfile # home_dir may be / inside a docker container without existing user tempdir = tempfile.gettempdir() return os.path.join(tempdir, ".cache", "outlines") def get_outlines_cache(): """Get the Cache instance to be used for index caching""" cache_dir = get_outlines_cache_path() if envs.VLLM_V1_USE_OUTLINES_CACHE: logger.warning( "Enabling outlines cache. This is an unbounded on-disk " "cache. It may consume a lot of disk space and should " "not be used with untrusted clients." ) cache = Cache(cache_dir, eviction_policy="none", cull_limit=0) outlines_version = importlib.metadata.version("outlines_core") cached_version = cache.get("__version__", None) if cached_version != outlines_version: cache.clear() cache.set("__version__", outlines_version) return cache else: return LRUCache(maxsize=128) re_llama_byte_token = re.compile(r"^<0x[0-9A-F]{2}>$") re_replacement_seq = re.compile(r"^.{0,6}�+.{0,6}$") def _reduced_vocabulary( tokenizer: TokenizerLike, eos_token_id: int, ) -> dict[bytes, list[int]]: """Create a map from vocabulary tokens to lists of equivalent token ids. Returns: A Dict of token string -> equivalent token ids """ unicode_to_bytes = { v: k for k, v in convert_slow_tokenizer.bytes_to_unicode().items() } def convert_token_to_string(token: str) -> str: string = tokenizer.convert_tokens_to_string([token]) # A hack to handle missing spaces to HF's Llama tokenizers if ( type(token) is str and token.startswith(file_utils.SPIECE_UNDERLINE) or token == "<0x20>" ): return " " + string return string vocabulary: dict[bytes, list[int]] = {} empty_token_ids: list[int] = [] for token, token_idx in tokenizer.get_vocab().items(): if token in tokenizer.all_special_tokens: continue token_str = convert_token_to_string(token) if token_str: if isinstance(token, (bytes, bytearray)): # For BPE tokenizers where tokens are stored as bytes. # safe to ignore since token_str is of type (bytearray, bytes) # by this point. token_bytes = bytes(token_str) # type: ignore[arg-type] elif "\ufffd" in token_str and not re_replacement_seq.match(token_str): # Handle tokens with invalid UTF-8 sequences. if re_llama_byte_token.match(token): # Llama-like tokenizers use <0xXX> for incomplete sequences. token_bytes = bytes([int(token[3:5], 16)]) else: # GPT2 tokenizers: map each byte back using unicode_to_bytes byte_vals = [unicode_to_bytes.get(c) for c in token] if None in byte_vals: raise RuntimeError( f"Cannot convert token `{token}`" f" ({token_idx}) to bytes: {token_str}" ) # safe to ignore, since if None in byte_vals, # an error is thrown. token_bytes = bytes(byte_vals) # type: ignore[arg-type] else: token_bytes = token_str.encode("utf-8") if token_idx != eos_token_id: vocabulary.setdefault(token_bytes, []).append(token_idx) else: empty_token_ids.append(token_idx) return vocabulary def get_outlines_vocabulary(tokenizer: TokenizerLike) -> oc.Vocabulary: """Get the `Vocabulary` object for a given tokenizer.""" if hasattr(tokenizer, "_outlines_vocabulary"): return tokenizer._outlines_vocabulary # type: ignore try: if ( hasattr( tokenizer, "eos_token_id", ) and tokenizer.eos_token_id is not None ): eos_token_id = tokenizer.eos_token_id else: raise ValueError( f"Error during structured outputs setup for outlines: Tokenizer ({type(tokenizer)}) has no `eos_token_id` property, but `eos_token_id` is required for structured outputs to work properly." # noqa: E501 ) reduced_vocab = _reduced_vocabulary( tokenizer, eos_token_id, # type: ignore ) vocabulary = OutlinesVocabulary(oc.Vocabulary(eos_token_id, reduced_vocab)) tokenizer._outlines_vocabulary = vocabulary # type: ignore return vocabulary except AttributeError as e: raise ValueError( f"Cannot get the vocabulary of the tokenizer " f"({type(tokenizer)}). The tokenizer should have a " "get_vocab method." ) from e def grammar_is_likely_lark(grammar_str: str) -> bool: """ Check if grammar appears to use Lark syntax. Args: grammar_str: Input grammar string Returns: bool: True if grammar appears to be in Lark format, False otherwise Examples: >>> grammar_is_likely_lark("rule: 'abc'") True >>> grammar_is_likely_lark("rule ::= 'abc'") False """ if not grammar_str or not isinstance(grammar_str, str): return False for line in grammar_str.split("\n"): # Remove both comment styles line = re.sub(r"(#|//).*$", "", line).strip() if not line: continue # Look for EBNF rule definition if "::=" in line: return False return True def convert_lark_to_ebnf(grammar_str: str) -> str: """ Convert a Lark grammar string to EBNF format. EBNF reference: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md Lark grammar reference: https://lark-parser.readthedocs.io/en/latest/grammar.html Args: grammar_str: Input grammar in Lark format Returns: str: Converted grammar in EBNF format Examples: >>> print(convert_lark_to_ebnf("rule: 'hello'")) root ::= rule rule ::= "hello" """ if not isinstance(grammar_str, str): raise ValueError(f"Grammar must be a string, got {type(grammar_str)}") if not grammar_str.strip(): raise ValueError("Grammar string cannot be empty") defined_rules = set() referenced_rules = set() output_lines = [] def clean_line(line: str) -> str: """Remove comments and whitespace from line.""" return re.sub(r"(#|//).*$", "", line).strip() def check_quotes(text: str, rule_name: str, line_num: int) -> None: """Validate quote matching in text.""" if text.count("'") % 2 != 0 or text.count('"') % 2 != 0: raise ValueError(f"Mismatched quotes in {rule_name} on line {line_num}") def extract_references(text: str) -> set[str]: """Extract rule references from text.""" # Remove quoted strings and special characters text = re.sub(r'"[^"]*"', "", text) text = re.sub(r"[+*?()|\[\]{}]", " ", text) return set(re.findall(r"\b[a-zA-Z_][a-zA-Z0-9_]*\b", text)) # First pass: Find root rule and validate rule definitions lines = [clean_line(line) for line in grammar_str.split("\n")] first_rule = None for line_num, line in enumerate(lines, 1): if not line or line.startswith("|"): continue if ":" in line: try: name = line.split(":", 1)[0].strip().strip("?") defined_rules.add(name) if first_rule is None: first_rule = name if name == "start": first_rule = "start" except IndexError as e: raise ValueError( f"Invalid rule format on line {line_num}. " "Expected 'rule_name: definition'" ) from e if not defined_rules: raise ValueError("No valid rules found in grammar") # Add root rule output_lines.append(f"root ::= {first_rule}") # Second pass: Process rule definitions and alternatives current_rule = None current_definition = [] for line_num, line in enumerate(lines, 1): if not line: continue try: if ":" in line and not line.startswith("|"): # Save previous rule if exists if current_rule: output_lines.append( f"{current_rule} ::= {' | '.join(current_definition)}" ) # Process new rule name, definition = line.split(":", 1) current_rule = name.strip().strip("?") check_quotes(definition, f"rule '{current_rule}'", line_num) definition = re.sub(r"'([^']*)'", r'"\1"', definition) referenced_rules.update(extract_references(definition)) current_definition = [definition.strip()] elif line.startswith("|"): if not current_rule: raise ValueError( f"Alternative '|' on line {line_num} " "without a preceding rule definition" ) alt_def = line[1:].strip() check_quotes( alt_def, f"alternative for rule '{current_rule}'", line_num ) alt_def = re.sub(r"'([^']*)'", r'"\1"', alt_def) referenced_rules.update(extract_references(alt_def)) current_definition.append(alt_def) except ValueError as e: raise ValueError(f"Error on line {line_num}: {str(e)}") from e # Add final rule if exists if current_rule: output_lines.append(f"{current_rule} ::= {' | '.join(current_definition)}") # Validate all rules are defined undefined_rules = referenced_rules - defined_rules - {"root"} if undefined_rules: raise ValueError( f"Referenced rules are not defined: {', '.join(sorted(undefined_rules))}" ) return "\n".join(output_lines) def choice_as_grammar(choice: list[str]) -> str: def escape_ebnf_string(s: str) -> str: """Escape special characters in a EBNF string.""" # Escape double quotes and backslashes return re.sub(r'(["\\])', r"\\\1", s) escaped_choices = (escape_ebnf_string(c) for c in choice) grammar = "root ::= " + " | ".join(f'"{c}"' for c in escaped_choices) return grammar
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/backend_outlines.py
vllm/v1/structured_output/backend_outlines.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright 2025-present the Outlines developers # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from __future__ import annotations import ast import importlib import json import sys from dataclasses import dataclass, field from typing import TYPE_CHECKING import torch from regex import escape as regex_escape from vllm.sampling_params import SamplingParams from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, StructuredOutputGrammar, StructuredOutputOptions, ) from vllm.v1.structured_output.utils import ( OutlinesVocabulary, get_outlines_cache, get_outlines_vocabulary, ) if TYPE_CHECKING: import outlines_core as oc import outlines_core.json_schema as json_schema else: oc = LazyLoader("oc", globals(), "outlines_core") json_schema = LazyLoader("json_schema", globals(), "outlines_core.json_schema") # Python 3.11+ sre_parse and sre_constants # are deprecated, so we must import them from re if sys.version_info >= (3, 11): # Hack to get around pre-commit regex module rule # because going through re is the only way to get sre_parse # and sre_constants in Python 3.11+ _re = importlib.import_module("re") sre_parse = _re._parser sre_constants = _re._constants else: import sre_constants import sre_parse @dataclass class OutlinesBackend(StructuredOutputBackend): def __post_init__(self): self.vocabulary = get_outlines_vocabulary(self.tokenizer) self.cache = get_outlines_cache() def _compile_index( self, regex_string: str, vocabulary: OutlinesVocabulary ) -> oc.Index: cache_key = f"{vocabulary._hash}_{regex_string}" if cache_key in self.cache: return self.cache[cache_key] index = oc.Index(regex_string, vocabulary.inner) self.cache[cache_key] = index return index def compile_grammar( self, request_type: StructuredOutputOptions, grammar_spec: str ) -> StructuredOutputGrammar: if request_type == StructuredOutputOptions.JSON: regex = json_schema.build_regex_from_schema(grammar_spec) elif request_type == StructuredOutputOptions.REGEX: regex = grammar_spec elif request_type == StructuredOutputOptions.CHOICE: choices = ast.literal_eval(grammar_spec) choices = [regex_escape(c) for c in choices] regex = "(" + "|".join(choices) + ")" else: raise ValueError( f"Invalid request type for Outlines backend ({request_type!s})" ) index = self._compile_index(regex, self.vocabulary) max_rollback_tokens = ( self.vllm_config.speculative_config.num_speculative_tokens if self.vllm_config.speculative_config is not None else 0 ) return OutlinesGrammar( vocab_size=self.vocab_size, guide=oc.Guide(index, max_rollback=max_rollback_tokens), ) def allocate_token_bitmask(self, max_num_seqs: int) -> torch.Tensor: return torch.full( (max_num_seqs, (self.vocab_size + 31) // 32), -1, dtype=torch.int32, pin_memory=torch.cuda.is_available(), ) def destroy(self): pass @dataclass class OutlinesGrammar(StructuredOutputGrammar): vocab_size: int guide: oc.Guide = field(hash=False) num_processed_tokens: int = field( default_factory=lambda: 0, repr=False, hash=False, init=False ) # outlines_core signals done on DFA accept; vLLM expects done after EOS. # We delay the finished flag by one step so EOS can still be emitted. _prev_finished: bool = field(default=False, init=False, repr=False, hash=False) def accept_tokens(self, request_id: str, tokens: list[int]) -> bool: """Accepts a list of tokens and advances the FSM. Returns True if the FSM was advanced successfully. Returns False if the FSM failed to advance. """ if self.guide.accepts_tokens(tokens): # Advance cannot fail because we checked Guide.accepts_tokens() for t in tokens: self.guide.advance(t) self.num_processed_tokens += 1 return True return False def rollback(self, num_tokens: int) -> None: self.guide.rollback_state(num_tokens) self.num_processed_tokens -= num_tokens def validate_tokens(self, tokens: list[int]) -> list[int]: accepted: list[int] = [] for tok in tokens: accepted.append(tok) if not self.guide.accepts_tokens(accepted): accepted.pop() break return accepted def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None: mask = bitmask[idx] self.guide.write_mask_into(mask.data_ptr(), mask.numel(), mask.element_size()) def is_terminated(self) -> bool: curr = self.guide.is_finished() prev = self._prev_finished self._prev_finished = curr return prev def reset(self): self.num_processed_tokens = 0 self._prev_finished = False self.guide.reset() def validate_structured_output_request_outlines(params: SamplingParams): if params.structured_outputs is None: return so_params = params.structured_outputs if so_params.regex: validate_regex_is_buildable(so_params.regex) elif so_params.json: if isinstance(so_params.json, str): try: # make sure schema is valid json json.loads(so_params.json) schema = so_params.json except json.JSONDecodeError as e: raise ValueError("Invalid JSON grammar specification.") from e else: try: schema = json.dumps(so_params.json) except Exception as e: raise ValueError( f"Error serializing structured outputs jsonschema: {e}" ) from e pattern = json_schema.build_regex_from_schema(schema) validate_regex_is_buildable(pattern) elif so_params.choice: choices = [regex_escape(str(choice)) for choice in so_params.choice] regex = "(" + "|".join(choices) + ")" validate_regex_is_buildable(regex) elif so_params.grammar: raise ValueError( "Outlines structured outputs backend " "does not support grammar specifications" ) def _prefix_needs_context(parsed) -> bool: """Return True if there's a look-around/anchor before any consumer.""" def subpattern_consumes(parsed) -> bool: """Return True if subpattern can consume at least one character.""" tokens = parsed.data if hasattr(parsed, "data") else parsed for ttype, tval in tokens: # literal, character class, or dot always consumes if ttype in (sre_parse.LITERAL, sre_parse.IN, sre_parse.ANY): return True # quantified subpattern: check inner pattern elif ttype == sre_parse.MAX_REPEAT: _, mx, sub = tval if mx != 0 and subpattern_consumes(sub): return True # alternation: if any branch consumes, the whole does elif ttype == sre_parse.BRANCH: _, branches = tval if any(subpattern_consumes(br) for br in branches): return True # grouped subpattern: recurse into its contents elif ttype == sre_parse.SUBPATTERN and subpattern_consumes(tval[3]): return True # No consumers, return False return False tokens = parsed.data if hasattr(parsed, "data") else parsed for ttype, tval in tokens: # Direct anchors or look-around if ttype == sre_parse.AT or ttype in ( sre_constants.ASSERT, sre_constants.ASSERT_NOT, ): return True # Nested subpattern: check if ttype == sre_parse.SUBPATTERN: # tval: (group, add_flags, del_flags, subpattern) if _prefix_needs_context(tval[3]): return True if subpattern_consumes(tval[3]): return False # if any branch has a prefix anchor => True, # else if at least one branch consumes => prefix ends => False elif ttype == sre_parse.BRANCH: saw_consumer = False for br in tval[1]: if _prefix_needs_context(br): return True if subpattern_consumes(br): saw_consumer = True if saw_consumer: return False # Immediate consumer tokens elif ttype in (sre_parse.LITERAL, sre_parse.IN, sre_parse.ANY): return False # if subpattern has anchor => True, if it can consume => stop elif ttype == sre_parse.MAX_REPEAT: if _prefix_needs_context(tval[2]): return True if subpattern_consumes(tval[2]): return False return False def _check_unsupported(parsed) -> None: """Check for regex features unsupported by regex-automata""" tokens = parsed.data if hasattr(parsed, "data") else parsed for ttype, tval in tokens: # backreference if ttype in (sre_parse.GROUPREF, sre_parse.GROUPREF_EXISTS): raise ValueError("Backreferences are unsupported.") # look-around assertion elif ttype in (sre_constants.ASSERT, sre_constants.ASSERT_NOT): raise ValueError("Look-Around assertion are unsupported.") # unicode word boundaries elif ttype == sre_parse.AT: if tval in (sre_constants.AT_BOUNDARY, sre_constants.AT_NON_BOUNDARY): raise ValueError("Unicode word boundaries are unsupported.") elif ttype == sre_parse.BRANCH: # tval is (None, branches) for branch in tval[1]: _check_unsupported(branch) # tval is (min, max, subpattern) elif ttype == sre_parse.MAX_REPEAT: _check_unsupported(tval[2]) def validate_regex_is_buildable(pattern: str) -> None: """ Validates that the input regex is not using unsupported features of the `regex-automata` crate (outlines_core regex engine) and has a universal start state. definition of universal start state used can be found at: https://docs.rs/regex-automata/latest/regex_automata/dfa/trait.Automaton.html#method.universal_start_state """ try: parsed = sre_parse.parse(pattern) except sre_constants.error as e: raise ValueError(f"Error parsing regex: {e}") from e try: _check_unsupported(parsed) except ValueError as e: raise ValueError( f"Regex uses unsupported feature for structured outputs: {e}. " "Only basic matching constructs are supported—lookarounds, " "backreferences, and unicode boundaries are not." ) from e if _prefix_needs_context(parsed): raise ValueError( "Regex does not have a anchored universal start state" "This means that the Regex uses anchors (^) or look-arounds " "in a way which requires context before any token is matched." "structured outputs needs regexes that can match without needing " "that context. Try rewriting the pattern without using these " f"constructs. Pattern:\n{pattern}" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/__init__.py
vllm/v1/structured_output/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools import multiprocessing from collections.abc import Iterable from concurrent.futures import Future, ThreadPoolExecutor from typing import TYPE_CHECKING from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.tokenizers import cached_tokenizer_from_config from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_guidance import GuidanceBackend from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, StructuredOutputGrammar, ) from vllm.v1.structured_output.backend_xgrammar import XgrammarBackend if TYPE_CHECKING: import numpy as np import numpy.typing as npt import torch from vllm.reasoning import ReasoningParser from vllm.v1.request import Request else: torch = LazyLoader("torch", globals(), "torch") ReasoningParser = object Request = object logger = init_logger(__name__) class StructuredOutputManager: """Engine-level manager for structured output requests.""" def __init__(self, vllm_config: VllmConfig): self.backend: StructuredOutputBackend | None = None self.reasoner: ReasoningParser | None = None self.vllm_config = vllm_config # When in external_launcher mode, async grammar compilation causes deadlocks # due to external_launcher mode having a scheduler for each TP rank. # Async grammar compilation causes the WAITING_FOR_FSM → WAITING transition to # happen at different times on different TP ranks, # breaking the determinism assumption that external_launcher relies on. self._use_async_grammar_compilation = ( vllm_config.parallel_config.distributed_executor_backend != "external_launcher" ) self._grammar_bitmask: torch.Tensor | None = None self._full_mask = torch.tensor(-1, dtype=torch.int32) max_batch_size = self.vllm_config.scheduler_config.max_num_seqs self.fill_bitmask_parallel_threshold = 128 if self.fill_bitmask_parallel_threshold < max_batch_size: self.fill_bitmask_parallel_batch_size = 16 # Use: # - at least 1 CPU # - at most half the number of CPUs or 8, whichever is less max_workers = max(1, min(multiprocessing.cpu_count() // 2, 8)) self.executor_for_fillmask = ThreadPoolExecutor(max_workers=max_workers) if not self.vllm_config.model_config.skip_tokenizer_init: # The default max_workers if not specified is the number of # CPUs * 5, which is way too high since these tasks are CPU-bound, # not I/O bound. We also know we would never dominate CPU usage # with just grammar compilation, so we set it to half the number # of CPUs. max_workers = max(1, (multiprocessing.cpu_count() + 1) // 2) self.executor = ThreadPoolExecutor(max_workers=max_workers) self.tokenizer = cached_tokenizer_from_config( model_config=self.vllm_config.model_config ) reasoning_parser = ( self.vllm_config.structured_outputs_config.reasoning_parser ) reasoning_parser_plugin = ( self.vllm_config.structured_outputs_config.reasoning_parser_plugin ) if reasoning_parser_plugin and len(reasoning_parser_plugin) > 3: ReasoningParserManager.import_reasoning_parser(reasoning_parser_plugin) reasoning_parser = ( self.vllm_config.structured_outputs_config.reasoning_parser ) if reasoning_parser: reasoner_cls = ReasoningParserManager.get_reasoning_parser( reasoning_parser ) self.reasoner = reasoner_cls(tokenizer=self.tokenizer) self.enable_in_reasoning = ( self.vllm_config.structured_outputs_config.enable_in_reasoning ) def grammar_init(self, request: Request) -> None: if request.structured_output_request is None: return if TYPE_CHECKING: assert ( request.sampling_params is not None and request.sampling_params.structured_outputs is not None ) # Initialize the backend the first time it is needed. # # NOTE: We only support a single backend. We do NOT support different # backends on a per-request basis in V1 (for now, anyway...). # _backend is set in Processor._validate_structured_output if self.backend is None: assert request.sampling_params is not None backend = request.sampling_params.structured_outputs._backend vocab_size = self.vllm_config.model_config.get_vocab_size() if backend == "xgrammar": self.backend = XgrammarBackend( self.vllm_config, tokenizer=self.tokenizer, vocab_size=vocab_size, ) elif backend == "guidance": self.backend = GuidanceBackend( self.vllm_config, tokenizer=self.tokenizer, vocab_size=vocab_size, ) elif backend == "outlines": from vllm.v1.structured_output.backend_outlines import OutlinesBackend self.backend = OutlinesBackend( self.vllm_config, tokenizer=self.tokenizer, vocab_size=vocab_size, ) elif backend == "lm-format-enforcer": from vllm.v1.structured_output.backend_lm_format_enforcer import ( # noqa: E501 LMFormatEnforcerBackend, ) self.backend = LMFormatEnforcerBackend( self.vllm_config, tokenizer=self.tokenizer, vocab_size=vocab_size, ) else: raise ValueError(f"Unsupported structured output backend: {backend}") if self._use_async_grammar_compilation: grammar = self.executor.submit(self._create_grammar, request) else: grammar = self._create_grammar(request) # type: ignore[assignment] request.structured_output_request.grammar = grammar # type: ignore[assignment] def _create_grammar( self, request: Request, ) -> StructuredOutputGrammar: key = request.structured_output_request.structured_output_key # type: ignore[union-attr] # Note that the request was validated in the engine core client, # so at this point we know it is a supported type of request. # # TODO: we still need to handle xgrammar compilation failures, # though it should be unlikely as we test that up front as well. request_type, grammar_spec = key assert self.backend is not None return self.backend.compile_grammar(request_type, grammar_spec) def _fill_bitmasks( self, batch: Iterable[tuple[StructuredOutputGrammar, int, bool]], ) -> None: assert self._grammar_bitmask is not None for grammar, index, apply_bitmask in batch: if apply_bitmask and not grammar.is_terminated(): grammar.fill_bitmask(self._grammar_bitmask, index) else: # Note that for thinking support, we will need to # reset the relevant part of the bitmask for consequent # requests here. self._grammar_bitmask[index].fill_(self._full_mask) def _async_submit_fill_bitmask( self, batch: list[tuple[StructuredOutputGrammar, int, bool]], ) -> Future: return self.executor_for_fillmask.submit(self._fill_bitmasks, batch) def grammar_bitmask( self, requests: dict[str, Request], structured_output_request_ids: list[str], scheduled_spec_decode_tokens: dict[str, list[int]], ) -> "npt.NDArray[np.int32] | None": # Prepare the structured output bitmask for this batch. if not structured_output_request_ids: return None max_num_spec_tokens = 0 if self.vllm_config.speculative_config is not None: max_num_spec_tokens = ( self.vllm_config.speculative_config.num_speculative_tokens ) if self._grammar_bitmask is None: assert self.backend is not None max_batch_size = self.vllm_config.scheduler_config.max_num_seqs # Allocate a bitmask for each token needing to be checked: # one for each speculative position, and one more for the # bonus token / non-speculative token. self._grammar_bitmask = self.backend.allocate_token_bitmask( max_batch_size * (1 + max_num_spec_tokens) ) # Generate a batched bitmask for all structured output requests. # When speculative decoding is enabled, we need to include multiple # masks for each request, one for each possible bonus token position. # These are stored inline in the tensor and unpacked by the gpu runner. cumulative_index = 0 # Optimized parallel filling of bitmasks for # non-spec, large-batch-size cases if ( len(structured_output_request_ids) > self.fill_bitmask_parallel_threshold and max_num_spec_tokens == 0 ): promises = [] batch = [] for req_id in structured_output_request_ids: request = requests[req_id] structured_output_request = request.structured_output_request if TYPE_CHECKING: assert structured_output_request is not None assert structured_output_request.grammar is not None apply_bitmask = self.should_fill_bitmask(request) batch.append( (structured_output_request.grammar, cumulative_index, apply_bitmask) ) if len(batch) == self.fill_bitmask_parallel_batch_size: promises.append(self._async_submit_fill_bitmask(batch)) batch = [] cumulative_index += 1 if batch: promises.append(self._async_submit_fill_bitmask(batch)) # Wait for all bitmask filling tasks to complete. for promise in promises: promise.result() else: # Fallback to serial filling of bitmasks for small-batch-size cases for req_id in structured_output_request_ids: request = requests[req_id] structured_output_request = request.structured_output_request if TYPE_CHECKING: assert structured_output_request is not None assert structured_output_request.grammar is not None apply_bitmask = self.should_fill_bitmask(request) state_advancements = 0 req_tokens = scheduled_spec_decode_tokens.get(req_id, ()) for token in itertools.chain(req_tokens, (None,)): self._fill_bitmasks( ( ( structured_output_request.grammar, cumulative_index, apply_bitmask, ), ) ) if ( apply_bitmask and token is not None and not structured_output_request.grammar.is_terminated() ): accepted = structured_output_request.grammar.accept_tokens( req_id, [token] ) assert accepted, (token, req_id, scheduled_spec_decode_tokens) state_advancements += 1 cumulative_index += 1 if state_advancements > 0: structured_output_request.grammar.rollback(state_advancements) bitmask_tensor = self._grammar_bitmask if cumulative_index < bitmask_tensor.shape[0]: bitmask_tensor = bitmask_tensor[:cumulative_index] # After finishing with the xgrammar operations, we convert to # np.ndarray, because that is much more efficient for serialization # and deserialization when sending this to the GPU workers. return bitmask_tensor.numpy() def should_fill_bitmask(self, request: Request) -> bool: # NOTE (Hanchen) if enable_in_reasoning is True, it means that # the model needs to be constrained in reasoning. So we should always # enable the bitmask filling. if self.reasoner is not None: if self.enable_in_reasoning: return True assert request.structured_output_request is not None if request.structured_output_request.reasoning_ended is None: request.structured_output_request.reasoning_ended = ( self.reasoner.is_reasoning_end(request.prompt_token_ids) ) return request.structured_output_request.reasoning_ended return True def should_advance(self, request: Request) -> bool: if not request.use_structured_output: return False # To determine whether we can advance the FSM. # Supports thinking usage where we skip the reasoning components. if TYPE_CHECKING: assert request.structured_output_request is not None assert request.structured_output_request.grammar is not None # by default, we should always advance # for cases that don't use thinking mode. if self.reasoner is None: return True # if the model needs structured in reasoning, we should advance if self.enable_in_reasoning: return True structured_req = request.structured_output_request if structured_req.reasoning_ended: return True # Check if reasoning ends in *this* step delta_from = request.num_computed_tokens - request.num_output_placeholders if self.reasoner.is_reasoning_end_streaming( request.all_token_ids, request.all_token_ids[delta_from:] ): # Reasoning just ended, so we shouldn't advance til # next pass structured_req.reasoning_ended = True return False def clear_backend(self) -> None: if self.backend is not None: self.backend.destroy()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/structured_output/backend_xgrammar.py
vllm/v1/structured_output/backend_xgrammar.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any import torch import vllm.envs from vllm.logger import init_logger from vllm.sampling_params import SamplingParams from vllm.tokenizers.deepseek_v32 import DeepseekV32Tokenizer from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, StructuredOutputGrammar, StructuredOutputOptions, ) from vllm.v1.structured_output.utils import ( choice_as_grammar, convert_lark_to_ebnf, grammar_is_likely_lark, ) if TYPE_CHECKING: import xgrammar as xgr else: xgr = LazyLoader("xgr", globals(), "xgrammar") logger = init_logger(__name__) @dataclass class XgrammarBackend(StructuredOutputBackend): def __post_init__(self): self.disable_any_whitespace = ( self.vllm_config.structured_outputs_config.disable_any_whitespace ) if isinstance(self.tokenizer, MistralTokenizer): # NOTE: ideally, xgrammar should handle this accordingly. # refer to https://github.com/mlc-ai/xgrammar/blob/d77c0a0173ef14779c918e3be7966ba852f7910f/python/xgrammar/tokenizer_info.py#L98 stop_token_ids = [self.tokenizer.eos_token_id] # not self.tokenizer.vocab_size as self.tokenizer.vocab # collapses all decoded errors into a single token. self.vocab_size = len(self.tokenizer.vocab) tokenizer_info = xgr.TokenizerInfo( # type: ignore encoded_vocab=self.tokenizer.vocab, # NOTE: https://github.com/mlc-ai/xgrammar/blob/5e141f6ff1ca02bc31f9e512e68b61f2a8ae88e5/tests/python/test_tokenizer_info.py#L43 # noqa: E501 vocab_type=xgr.VocabType.RAW if self.tokenizer.is_tekken else xgr.VocabType.BYTE_FALLBACK, vocab_size=self.vocab_size, stop_token_ids=stop_token_ids, add_prefix_space=True, ) elif isinstance(self.tokenizer, DeepseekV32Tokenizer): # copy from xgr.TokenizerInfo.from_huggingface() # because we are using a custom tokenizer wrapper here. vocab_dict = self.tokenizer.get_vocab() tokenizer_vocab_size = max(len(vocab_dict), self.tokenizer.max_token_id + 1) vocab_size = self.vocab_size or tokenizer_vocab_size # maintain tokenizer's indexing encoded_vocab = [""] * vocab_size for token, idx in vocab_dict.items(): if idx < vocab_size: encoded_vocab[idx] = token stop_token_ids = [self.tokenizer.eos_token_id] backend_str = self.tokenizer.tokenizer.backend_tokenizer.to_str() metadata = xgr.TokenizerInfo._detect_metadata_from_hf(backend_str) tokenizer_info = xgr.TokenizerInfo( encoded_vocab=encoded_vocab, vocab_type=metadata["vocab_type"], vocab_size=vocab_size, stop_token_ids=stop_token_ids, add_prefix_space=metadata["add_prefix_space"], ) else: tokenizer_info = xgr.TokenizerInfo.from_huggingface( self.tokenizer, vocab_size=self.vocab_size, ) self.compiler = xgr.GrammarCompiler( tokenizer_info, max_threads=8, cache_enabled=True, cache_limit_bytes=vllm.envs.VLLM_XGRAMMAR_CACHE_MB * 1024 * 1024, ) self.num_speculative_tokens = 0 if self.vllm_config.speculative_config is not None: self.num_speculative_tokens = ( self.vllm_config.speculative_config.num_speculative_tokens ) def compile_grammar( self, request_type: StructuredOutputOptions, grammar_spec: str ) -> StructuredOutputGrammar: if request_type == StructuredOutputOptions.JSON: ctx = self.compiler.compile_json_schema( grammar_spec, any_whitespace=not self.disable_any_whitespace ) elif request_type == StructuredOutputOptions.JSON_OBJECT: ctx = self.compiler.compile_json_schema( '{"type": "object"}', any_whitespace=not self.disable_any_whitespace ) elif request_type == StructuredOutputOptions.GRAMMAR: ctx = self.compiler.compile_grammar(grammar_spec) elif request_type == StructuredOutputOptions.REGEX: ctx = self.compiler.compile_regex(grammar_spec) elif request_type == StructuredOutputOptions.STRUCTURAL_TAG: s_tag = json.loads(grammar_spec) if "structures" in s_tag: # Falling back to deprecated method of compiling structural tag tags = [ xgr.StructuralTagItem( begin=s["begin"], schema=json.dumps(s["schema"]), end=s["end"], ) for s in s_tag["structures"] ] ctx = self.compiler.compile_structural_tag(tags, s_tag["triggers"]) else: ctx = self.compiler.compile_structural_tag(grammar_spec) else: logger.error( "Validation should have already occurred. Please file an issue." ) raise ValueError( f"grammar is not of valid supported types. ({request_type!s})" ) return XgrammarGrammar( matcher=xgr.GrammarMatcher( ctx, max_rollback_tokens=self.num_speculative_tokens, ), vocab_size=self.vocab_size, ctx=ctx, ) def allocate_token_bitmask(self, max_num_seqs: int): return xgr.allocate_token_bitmask(max_num_seqs, self.vocab_size) def destroy(self): del self.compiler @dataclass class XgrammarGrammar(StructuredOutputGrammar): # NOTE: This would be a generic-enough class for # supporting different backends, in the future. # For now, just xgrammar. # # https://xgrammar.mlc.ai/docs/api/python/index.html#xgrammar.GrammarMatcher.find_jump_forward_string # for jump-forward decoding vocab_size: int matcher: xgr.GrammarMatcher = field(hash=False) ctx: xgr.CompiledGrammar = field(hash=False) num_processed_tokens: int = field( default_factory=lambda: 0, repr=False, hash=False, init=False ) _is_terminated: bool = field(default=False, repr=False, hash=False) def accept_tokens(self, request_id: str, tokens: list[int]) -> bool: """Accepts a list of tokens and advances the FSM. Returns True if the FSM was advanced successfully. Returns False if the FSM failed to advance. """ if self._is_terminated: return False for token in tokens: if not self.matcher.accept_token(token): logger.error( "Failed to advance FSM for request %s " "for tokens %s. Please file an issue.", request_id, token, ) return False self.num_processed_tokens += 1 self._is_terminated = self.matcher.is_terminated() return True def validate_tokens(self, tokens: list[int]) -> list[int]: """Checks if the list of tokens are accepted by the FSM in sequence. Will not advance the FSM. Returns the prefix list of tokens that are accepted by the FSM. """ accepted_tokens = [] for token in tokens: if self.matcher.accept_token(token): accepted_tokens.append(token) else: break if len(accepted_tokens) > 0: # Rollback the FSM to the initial state self.matcher.rollback(len(accepted_tokens)) return accepted_tokens def rollback(self, num_tokens: int) -> None: self.matcher.rollback(num_tokens) self.num_processed_tokens -= num_tokens self._is_terminated = self.matcher.is_terminated() def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None: self.matcher.fill_next_token_bitmask(bitmask, idx) def is_terminated(self) -> bool: return self._is_terminated def reset(self): self.num_processed_tokens = 0 self.matcher.reset() # cf https://github.com/mlc-ai/xgrammar/blob/a32ac892676d2eedc0327416105b9b06edfb94b2/cpp/json_schema_converter.cc STRING_SUPPORTED_FORMATS = { "email", "date", "time", "date-time", "duration", "ipv4", "ipv6", "hostname", "uuid", "uri", "uri-reference", "uri-template", "json-pointer", "relative-json-pointer", } def has_xgrammar_unsupported_json_features(schema: dict[str, Any]) -> bool: """Check if JSON schema contains features unsupported by xgrammar.""" def check_object(obj: dict[str, Any]) -> bool: if not isinstance(obj, dict): return False # Check for numeric ranges if obj.get("type") in ("integer", "number") and ("multipleOf" in obj): return True # Check for array unsupported keywords if obj.get("type") == "array" and any( key in obj for key in ("uniqueItems", "contains", "minContains", "maxContains") ): return True # Unsupported keywords for strings if ( obj.get("type") == "string" and "format" in obj and obj["format"] not in STRING_SUPPORTED_FORMATS ): return True # Unsupported keywords for objects if obj.get("type") == "object" and any( key in obj for key in ("patternProperties", "propertyNames") ): return True # Recursively check all nested objects and arrays for value in obj.values(): if isinstance(value, dict): if check_object(value): return True elif isinstance(value, list): for item in value: if isinstance(item, dict) and check_object(item): return True return False return check_object(schema) def validate_xgrammar_grammar(sampling_params: SamplingParams) -> None: """Validate that the request is supported by structured output. Raises ValueError if the request is not supported. """ if sampling_params.structured_outputs is None: return so_params = sampling_params.structured_outputs if so_params.regex: try: xgr.Grammar.from_regex(so_params.regex) except Exception as err: raise ValueError( f"Failed to transform regex into a grammar: {err}" ) from err if so_params.choice: choice_grammar = choice_as_grammar(so_params.choice) try: xgr.Grammar.from_ebnf(choice_grammar) except Exception as err: raise ValueError( "Failed to transform choices into a grammar: {err}" ) from err so_params.choice = None so_params.grammar = choice_grammar return if so_params.json: if isinstance(so_params.json, str): try: schema = json.loads(so_params.json) except json.JSONDecodeError as e: raise ValueError("Invalid JSON grammar specification.") from e else: schema = so_params.json try: xgr.Grammar.from_json_schema(schema) except Exception as err: raise ValueError( f"Failed to transform json schema into a grammar: {err}" ) from err if has_xgrammar_unsupported_json_features(schema): raise ValueError( "The provided JSON schema contains features not supported by xgrammar." ) return if so_params.grammar: if grammar_is_likely_lark(so_params.grammar): # xgrammar supports EBNF grammars only try: so_params.grammar = convert_lark_to_ebnf(so_params.grammar) except ValueError as e: raise ValueError( "Failed to convert the grammar from Lark to EBNF. " ) from e # Test parsing EBNF grammar, possibly already converted from Lark try: # parse the grammar, but we aren't compiling it. xgr.Grammar.from_ebnf(so_params.grammar) except Exception as e: raise ValueError("Invalid grammar specification.") from e return if so_params.structural_tag: try: s_tag = json.loads(so_params.structural_tag) # Using the deprecated method of compiling structural tag if "structures" in s_tag: tags = [ xgr.StructuralTagItem( begin=s["begin"], schema=json.dumps(s["schema"]), end=s["end"], ) for s in s_tag["structures"] ] xgr.Grammar.from_structural_tag(tags, s_tag["triggers"]) else: xgr.Grammar.from_structural_tag(so_params.structural_tag) except Exception as e: raise ValueError("Invalid structural tag specification.") from e
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/prometheus.py
vllm/v1/metrics/prometheus.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import tempfile from prometheus_client import REGISTRY, CollectorRegistry, multiprocess from vllm.logger import init_logger logger = init_logger(__name__) # Global temporary directory for prometheus multiprocessing _prometheus_multiproc_dir: tempfile.TemporaryDirectory | None = None def setup_multiprocess_prometheus(): """Set up prometheus multiprocessing directory if not already configured.""" global _prometheus_multiproc_dir if "PROMETHEUS_MULTIPROC_DIR" not in os.environ: # Make TemporaryDirectory for prometheus multiprocessing # Note: global TemporaryDirectory will be automatically # cleaned up upon exit. _prometheus_multiproc_dir = tempfile.TemporaryDirectory() os.environ["PROMETHEUS_MULTIPROC_DIR"] = _prometheus_multiproc_dir.name logger.debug( "Created PROMETHEUS_MULTIPROC_DIR at %s", _prometheus_multiproc_dir.name ) else: logger.warning( "Found PROMETHEUS_MULTIPROC_DIR was set by user. " "This directory must be wiped between vLLM runs or " "you will find inaccurate metrics. Unset the variable " "and vLLM will properly handle cleanup." ) def get_prometheus_registry() -> CollectorRegistry: """Get the appropriate prometheus registry based on multiprocessing configuration. Returns: Registry: A prometheus registry """ if os.getenv("PROMETHEUS_MULTIPROC_DIR") is not None: logger.debug("Using multiprocess registry for prometheus metrics") registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) return registry return REGISTRY def unregister_vllm_metrics(): """Unregister any existing vLLM collectors from the prometheus registry. This is useful for testing and CI/CD where metrics may be registered multiple times across test runs. Also, in case of multiprocess, we need to unregister the metrics from the global registry. """ registry = REGISTRY # Unregister any existing vLLM collectors for collector in list(registry._collector_to_names): if hasattr(collector, "_name") and "vllm" in collector._name: registry.unregister(collector) def shutdown_prometheus(): """Shutdown prometheus metrics.""" path = _prometheus_multiproc_dir if path is None: return try: pid = os.getpid() multiprocess.mark_process_dead(pid, path) logger.debug("Marked Prometheus metrics for process %d as dead", pid) except Exception as e: logger.error("Error during metrics cleanup: %s", str(e))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/perf.py
vllm/v1/metrics/perf.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Analytic flops/memory estimation module for transformer components, to help derive MFU (Model Flops Utilization) stats for a running model. """ import json import time from abc import ABC, abstractmethod from collections.abc import Iterable from dataclasses import asdict, dataclass from typing import Any, Protocol import torch from pydantic import BaseModel, Field, ValidationError, model_validator from typing_extensions import Self import vllm.envs as envs from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.utils.torch_utils import ( STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size, get_kv_cache_torch_dtype, ) from vllm.v1.core.sched.output import SchedulerOutput logger = init_logger(__name__) class InvalidComponent(Exception): """ Custom exception to indicate that a certain ComponentMetric is not applicable to the given VllmConfig. """ pass #### Basic Data Types #### @dataclass class DebugPerfStats: ## Stats for debugging the metrics calculation calc_duration: float = 0.0 # time spent calculating these stats num_prefill_requests: int = 0 num_decode_requests: int = 0 context_breakdown: dict[str, int] | None = None num_flops_per_gpu_breakdown: dict[str, int] | None = None num_read_bytes_per_gpu_breakdown: dict[str, int] | None = None num_write_bytes_per_gpu_breakdown: dict[str, int] | None = None @dataclass class PerfStats: num_flops_per_gpu: int = 0 num_read_bytes_per_gpu: int = 0 num_write_bytes_per_gpu: int = 0 debug_stats: DebugPerfStats | None = None @dataclass class ExecutionContext: """ Represents an execution context for a batch of requests. This class aggregates statistics across multiple requests in a batch, separately tracking prefill and decode phases. Example) - Batch with one full prefill (2048 tokens) and one decode (1 token, 8192 context): ctx = ExecutionContext() ctx.add(2048, 2048, is_prefill=True) ctx.add(1, 8192, is_prefill=False) """ # Prefill phase statistics num_prefill_requests: int = 0 prefill_num_tokens: int = 0 # sum of num_tokens for prefill requests prefill_context_len: int = 0 # sum of context_len for prefill requests prefill_token_context_product: int = 0 # sum of (num_tokens * context_len) # Decode phase statistics num_decode_requests: int = 0 decode_num_tokens: int = 0 # sum of num_tokens for decode requests decode_context_len: int = 0 # sum of context_len for decode requests decode_token_context_product: int = 0 # sum of (num_tokens * context_len) def add(self, num_tokens: int, context_len: int, is_prefill: bool) -> None: """Add a single request's statistics to this batch context.""" if is_prefill: self.num_prefill_requests += 1 self.prefill_num_tokens += num_tokens self.prefill_context_len += context_len self.prefill_token_context_product += num_tokens * context_len else: self.num_decode_requests += 1 self.decode_num_tokens += num_tokens self.decode_context_len += context_len self.decode_token_context_product += num_tokens * context_len def total_num_tokens(self) -> int: """Total number of tokens across all requests in the batch.""" return self.prefill_num_tokens + self.decode_num_tokens def total_token_context_product(self) -> int: """Total sum of (num_tokens * context_len) across all requests.""" return self.prefill_token_context_product + self.decode_token_context_product @classmethod def from_single_request( cls, num_tokens: int, context_len: int, is_prefill: bool ) -> "ExecutionContext": """Create an ExecutionContext from a single request. This is a convenience method primarily for testing. """ ctx = cls() ctx.add(num_tokens, context_len, is_prefill) return ctx class ParsedArgs: """ Syntactic sugar so that Parsers can use dot notations to access/update the parsed arguments. e.g.) args = ParsedArgs() args.x = 3 args.y = args.x + 1 """ def __getattr__(self, name: str) -> Any: raise AttributeError(f"'{type(self).__name__}' has no attribute '{name}'") def __setattr__(self, name: str, value: Any) -> None: object.__setattr__(self, name, value) def model_dump(self) -> dict[str, Any]: return vars(self).copy() #### Abstract #### class Parser(Protocol): def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: """ Parse the vllm config and update the current ParsedArgs and pass it on. If the parser isn't applicable to the vllm_config, it will do nothing. """ ... class ParserChain: """ Applies chain of parser in a sequential order. Later parsers might overwrite results from previous parsers, so parsers should be chained in the appropriate order if they are not mutually exclusive. """ def __init__(self, *parsers: Parser) -> None: self.parsers = list(parsers) def add_parser(self, parser: Parser) -> None: self.parsers.append(parser) def parse(self, vllm_config: VllmConfig) -> ParsedArgs: args = ParsedArgs() for parser in self.parsers: args = parser.parse(args, vllm_config) return args _COMPONENT_METRICS_REGISTRY: dict[str, type["ComponentMetrics"]] = {} class ComponentMetrics(BaseModel, ABC): """ Each concrete ComponentMetrics class is associated with: - fields that are required for metric derivation (fields are specified/validated through pydantic model) - parser to parse VllmConfig into fields - metric methods that derive flops/bytes for a given execution context """ @classmethod @abstractmethod def component_type(cls) -> str: ... @classmethod @abstractmethod def get_parser(cls) -> ParserChain: """ Return a ParserChain that provides values for all required fields. The returned parser chain must populate ParsedArgs with values for every field defined on this ComponentMetrics class. Missing fields will cause a ValidationError when from_vllm_config() is called. See individual Parser docstrings for which args they provide, and field comments on ComponentMetrics subclasses for which parser provides each field. """ ... def __init_subclass__(cls): _COMPONENT_METRICS_REGISTRY[cls.component_type()] = cls @classmethod def from_vllm_config(cls, vllm_config: VllmConfig) -> Self: """ Instantiate this class from VllmConfig. Raises ValidationError if parsing fails. """ parser = cls.get_parser() parsed_args = parser.parse(vllm_config) try: return cls.model_validate(parsed_args.model_dump()) except ValidationError as e: raise InvalidComponent(f"Invalid {cls.component_type()} config: {e}") from e @classmethod def registered_metrics(cls) -> Iterable[type["ComponentMetrics"]]: return iter(_COMPONENT_METRICS_REGISTRY.values()) @abstractmethod def get_num_flops_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: ... @abstractmethod def get_read_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: ... @abstractmethod def get_write_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: ... def get_num_flops(self, ctx: ExecutionContext, per_gpu: bool = True) -> int: return sum(self.get_num_flops_breakdown(ctx, per_gpu).values()) def get_read_bytes(self, ctx: ExecutionContext, per_gpu: bool = True) -> int: return sum(self.get_read_bytes_breakdown(ctx, per_gpu).values()) def get_write_bytes(self, ctx: ExecutionContext, per_gpu: bool = True) -> int: return sum(self.get_write_bytes_breakdown(ctx, per_gpu).values()) #### parsers #### class BaseConfigParser(Parser): """ Parses base model configuration. Provides: vocab_size, hidden_size, num_attention_heads, num_hidden_layers, weight_byte_size, activation_byte_size, dp_size, tp_size, pp_size, enable_ep """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: model_config = vllm_config.model_config args.vocab_size = model_config.get_vocab_size() args.hidden_size = model_config.get_hidden_size() # NOTE: model_config.get_attention_heads() divide by TP # so we access field manually here to get total num_heads args.num_attention_heads = get_required( model_config.hf_text_config, "num_attention_heads" ) args.num_hidden_layers = get_required( model_config.hf_text_config, "num_hidden_layers" ) model_dtype = vllm_config.model_config.dtype if isinstance(model_dtype, torch.dtype): torch_dtype = model_dtype elif isinstance(model_dtype, str) and model_dtype in STR_DTYPE_TO_TORCH_DTYPE: torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype] else: # FIXME: handle this better logger.warning( "Unknown model_dtype %s, defaulting to bfloat16", model_dtype, ) torch_dtype = torch.bfloat16 args.weight_byte_size = get_dtype_size(torch_dtype) # FIXME: handle this better by parsing whether activations use # bf16, fp32, etc... args.activation_byte_size = 2 args.dp_size = vllm_config.parallel_config.data_parallel_size args.tp_size = vllm_config.parallel_config.tensor_parallel_size args.pp_size = vllm_config.parallel_config.pipeline_parallel_size args.enable_ep = vllm_config.parallel_config.enable_expert_parallel return args #### Attention #### class BaseAttentionConfigParser(Parser): """ Parses attention-specific configuration. Provides: num_key_value_heads, head_dim, cache_byte_size """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: model_config = vllm_config.model_config args.num_key_value_heads = model_config.get_total_num_kv_heads() args.head_dim = model_config.get_head_size() model_dtype = vllm_config.model_config.dtype cache_dtype = vllm_config.cache_config.cache_dtype kv_cache_torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype) args.cache_byte_size = get_dtype_size(kv_cache_torch_dtype) return args class AttentionQuantizationConfigParser(Parser): """ Parses quantization configuration for attention layers. Overrides: weight_byte_size """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: cfg = vllm_config.quant_config if cfg is None: return args quant_method = cfg.get_name() if quant_method in ["fp8", "fbgemm_fp8"]: # FIXME: This is a hacky coarse-grained fp8 quantization detection. # FIXME: These configs also have concept of "ignored layers" and we # need to solve the same problem as above. args.weight_byte_size = 1 elif quant_method == "mxfp4": # FIXME: Also has "ignored layers" issue above args.weight_byte_size = 0.5 else: # FIXME: Add more parsing logic for different quant methods. raise InvalidComponent return args class AttentionMetrics(ComponentMetrics): # From BaseConfigParser num_hidden_layers: int = Field(..., gt=0) hidden_size: int = Field(..., gt=0) num_attention_heads: int = Field(..., gt=0) activation_byte_size: int = Field(..., gt=0) tp_size: int = Field(..., gt=0) pp_size: int = Field(..., gt=0) # From BaseAttentionConfigParser num_key_value_heads: int = Field(..., gt=0) head_dim: int = Field(..., gt=0) cache_byte_size: int = Field(..., gt=0) # From BaseConfig Parser, overridden by AttentionQuantizationConfigParser weight_byte_size: int | float = Field(..., gt=0) # TODO: discern cases where we have mixture of different attention layer types # such as SWA, MLA, etc. @classmethod def component_type(cls) -> str: return "attn" @classmethod def get_parser(cls) -> ParserChain: return ParserChain( BaseConfigParser(), BaseAttentionConfigParser(), AttentionQuantizationConfigParser(), ) def get_num_flops_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: L, D, q, kv, d = ( self.num_hidden_layers, self.hidden_size, self.num_attention_heads, self.num_key_value_heads, self.head_dim, ) T = ctx.total_num_tokens() TC = ctx.total_token_context_product() if per_gpu: L //= self.pp_size # tensor parallel along heads q = max(1, q // self.tp_size) kv = max(1, kv // self.tp_size) return { "qkv_proj": 2 * T * D * (q + 2 * kv) * d * L, "attn_qk": 2 * q * TC * d * L, "attn_av": 2 * q * TC * d * L, "out_proj": 2 * T * D * q * d * L, } def get_read_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: L, D, q, kv, d = ( self.num_hidden_layers, self.hidden_size, self.num_attention_heads, self.num_key_value_heads, self.head_dim, ) T = ctx.total_num_tokens() if per_gpu: L //= self.pp_size # tensor parallel along heads q = max(1, q // self.tp_size) kv = max(1, kv // self.tp_size) read_bytes = {} read_bytes["qkv_input"] = T * D * self.activation_byte_size * L read_bytes["qkv_weight"] = int(D * (q + 2 * kv) * d * self.weight_byte_size * L) # Attention input reads differ between prefill and decode # Prefill: read Q, K, V activations (all in activation_byte_size) if ctx.prefill_num_tokens > 0: read_bytes["attn_input"] = ( (ctx.prefill_num_tokens * q + 2 * ctx.prefill_context_len * kv) * d * self.activation_byte_size * L ) # Decode: read Q activations + read K, V from cache (in cache_byte_size) if ctx.decode_num_tokens > 0: read_bytes["attn_input"] = read_bytes.get("attn_input", 0) + ( ctx.decode_num_tokens * q * d * self.activation_byte_size * L + 2 * ctx.decode_context_len * kv * d * self.cache_byte_size * L ) read_bytes["out_input"] = T * q * d * self.activation_byte_size * L read_bytes["out_weight"] = int(q * d * D * self.weight_byte_size * L) return read_bytes def get_write_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate write memory traffic for attention layers.""" L, D, q, kv, d = ( self.num_hidden_layers, self.hidden_size, self.num_attention_heads, self.num_key_value_heads, self.head_dim, ) T = ctx.total_num_tokens() if per_gpu: L //= self.pp_size # tensor parallel along heads q = max(1, q // self.tp_size) kv = max(1, kv // self.tp_size) return { "qkv_output": T * (q + 2 * kv) * d * self.activation_byte_size * L, "kv_cache": 2 * T * kv * d * self.cache_byte_size * L, "out_output": T * D * self.activation_byte_size * L, } #### Ffn #### class BaseFfnConfigParser(Parser): """ Parses FFN and MoE configuration. Provides: intermediate_size, num_experts, num_experts_per_tok, moe_intermediate_size, num_shared_experts, num_moe_layers """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: cfg = vllm_config.model_config.hf_config if hasattr(cfg, "text_config") and cfg.text_config is not None: cfg = cfg.text_config args.intermediate_size = getattr(cfg, "intermediate_size", args.hidden_size * 4) # Try different naming conventions. args.num_experts = vllm_config.model_config.get_num_experts() args.num_experts_per_tok = getattr_from_list( cfg, ["num_experts_per_tok", "moe_topk"], 0 ) args.moe_intermediate_size = getattr_from_list( cfg, ["moe_intermediate_size", "intermediate_size"], 0 ) args.num_shared_experts = getattr_from_list( cfg, ["n_shared_experts", "num_shared_experts"], 0 ) is_moe = args.num_experts != 0 # Assume all MoE layers by default args.num_moe_layers = args.num_hidden_layers if is_moe else 0 return args class FfnParallelParser(Parser): """ Parses FFN parallelism configuration. Provides: ffn_tp_size, ffn_ep_size """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: # NOTE: ffn tp_size does not equal the tp_size parameter directly. # e.g.) If we use DP2TP4, ffn will use TP8 (or EP8 if EP is enabled.) if args.enable_ep: ffn_tp_size, ffn_ep_size = 1, args.dp_size * args.tp_size else: ffn_tp_size, ffn_ep_size = args.dp_size * args.tp_size, 1 args.ffn_tp_size = ffn_tp_size args.ffn_ep_size = ffn_ep_size return args class InterleaveMoeLayerStepParser(Parser): """ Parses interleave_moe_layer_step field for models like Llama4. Overrides: num_moe_layers """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: cfg = vllm_config.model_config.hf_config if hasattr(cfg, "text_config") and cfg.text_config is not None: cfg = cfg.text_config if ( hasattr(cfg, "interleave_moe_layer_step") and cfg.interleave_moe_layer_step > 0 ): args.num_moe_layers = len( [ layer for layer in range(args.num_hidden_layers) if (layer + 1) % cfg.interleave_moe_layer_step == 0 ] ) return args class MoeLayerFreqParser(Parser): """ Parses moe_layer_freq and first_k_dense_replace fields for models like Deepseek. Overrides: num_moe_layers """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: cfg = vllm_config.model_config.hf_config if hasattr(cfg, "text_config") and cfg.text_config is not None: cfg = cfg.text_config if hasattr(cfg, "moe_layer_freq") and hasattr(cfg, "first_k_dense_replace"): args.num_moe_layers = len( [ layer for layer in range(args.num_hidden_layers) if layer >= cfg.first_k_dense_replace and layer % cfg.moe_layer_freq == 0 ] ) return args class FfnQuantizationConfigParser(Parser): """ Parses quantization configuration for FFN layers. Overrides: weight_byte_size """ def parse(self, args: ParsedArgs, vllm_config: VllmConfig) -> ParsedArgs: cfg = vllm_config.quant_config if cfg is None: return args quant_method = cfg.get_name() if quant_method in ["fp8", "fbgemm_fp8"]: # FIXME: This is a hacky coarse-grained fp8 quantization detection. # (there might be more quantization methods for fp8). # FIXME: These configs also have concept of "ignored layers" and we # need to solve the same problem as above. args.weight_byte_size = 1 pass elif quant_method == "mxfp4": # FIXME: Also has "ignored layers" issue above args.weight_byte_size = 0.5 else: # FIXME: Add more parsing logic for different quant methods. raise InvalidComponent return args class FfnMetrics(ComponentMetrics): # From BaseConfigParser num_hidden_layers: int = Field(..., gt=0) hidden_size: int = Field(..., gt=0) activation_byte_size: int = Field(..., gt=0) pp_size: int = Field(..., gt=0) # From FfnParallelParser ffn_tp_size: int = Field(..., gt=0) ffn_ep_size: int = Field(..., gt=0) # From BaseFfnConfigParser intermediate_size: int = Field(..., gt=0) num_experts: int = Field(0) num_experts_per_tok: int = Field(1) moe_intermediate_size: int = Field(0) num_shared_experts: int = Field(0) # From BaseConfigParser, can be overridden InterleaveMoeLayerStep or MoeLayerFreq num_moe_layers: int = Field(..., ge=0) # FIXME: might have to make this more granular # (i.e. dense_weight_byte_size, moe_routed_weight_byte_size, # moe_shared_weight_byte_size) # since it can differ from byte size of other components (e.g. attn) # and can differ even from each other. # From BaseConfigParser, can be overridden by FfnQuantizationConfigParser weight_byte_size: int | float = Field(..., gt=0) @model_validator(mode="after") def validate_moe_fields(self) -> Self: """Validate that MoE-related fields are properly set when num_moe_layers > 0.""" if self.num_moe_layers > 0: assert self.num_experts, f"{self.num_experts=}" assert self.num_experts_per_tok, f"{self.num_experts_per_tok=}" assert self.moe_intermediate_size, f"{self.moe_intermediate_size=}" return self @classmethod def component_type(cls) -> str: return "ffn" @classmethod def get_parser(cls) -> ParserChain: return ParserChain( BaseConfigParser(), FfnParallelParser(), BaseFfnConfigParser(), InterleaveMoeLayerStepParser(), MoeLayerFreqParser(), FfnQuantizationConfigParser(), ) def get_num_flops_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate flops breakdown for FFN layers.""" L, D, DI = self.num_hidden_layers, self.hidden_size, self.intermediate_size Lm, E, MI, S = ( self.num_moe_layers, self.num_experts_per_tok, self.moe_intermediate_size, self.num_shared_experts, ) T = ctx.total_num_tokens() Ld = L - Lm num_activated_tokens = T * E if E else 0 if per_gpu: Ld //= self.pp_size Lm //= self.pp_size DI //= self.ffn_tp_size if MI is not None: MI //= self.ffn_tp_size if E: num_activated_tokens //= self.ffn_ep_size flops = {} # Dense FFN layers (SwiGLU: 3 linear layers: up, gate, down) if Ld: flops["dense_ffn"] = 2 * D * 3 * DI * T * Ld # MoE routed experts (each token activates E experts) if Lm and E: flops["routed_ffn"] = 2 * D * 3 * MI * num_activated_tokens * Lm # MoE shared experts (all S shared experts run for every token) if Lm and S: flops["shared_ffn"] = 2 * D * 3 * MI * S * T * Lm return flops def get_read_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate read memory traffic for FFN layers.""" L, D, DI = self.num_hidden_layers, self.hidden_size, self.intermediate_size Lm, E, MI, S = ( self.num_moe_layers, self.num_experts_per_tok, self.moe_intermediate_size, self.num_shared_experts, ) T = ctx.total_num_tokens() num_experts = self.num_experts Ld = L - Lm num_activated_tokens = T * E if E else 0 if per_gpu: Ld //= self.pp_size Lm //= self.pp_size DI //= self.ffn_tp_size if MI is not None: MI //= self.ffn_tp_size if E: num_activated_tokens //= self.ffn_ep_size if num_experts is not None: num_experts //= self.ffn_ep_size read_bytes = {} # Dense FFN layers (3 GEMMs: up, gate, down projections + SiLU activation) if Ld: read_bytes["dense_up_gate_input"] = int( T * D * self.activation_byte_size * Ld ) read_bytes["dense_up_gate_weights"] = int( 2 * D * DI * self.weight_byte_size * Ld ) read_bytes["dense_silu_input"] = int( 2 * T * DI * self.activation_byte_size * Ld ) read_bytes["dense_down_input"] = int( T * DI * self.activation_byte_size * Ld ) read_bytes["dense_down_weights"] = int(D * DI * self.weight_byte_size * Ld) if Lm: # MoE routed expert reads if E: # FIXME: Assume perfect load balancing for now. num_activated_experts = min(num_activated_tokens, num_experts) read_bytes["routed_up_gate_input"] = int( num_activated_tokens * D * self.activation_byte_size * Lm ) read_bytes["routed_up_gate_weights"] = int( 2 * D * MI * num_activated_experts * self.weight_byte_size * Lm ) read_bytes["routed_silu_input"] = int( 2 * num_activated_tokens * MI * self.activation_byte_size * Lm ) read_bytes["routed_down_input"] = int( num_activated_tokens * MI * self.activation_byte_size * Lm ) read_bytes["routed_down_weights"] = int( D * MI * num_activated_experts * self.weight_byte_size * Lm ) # MoE shared expert reads if S: read_bytes["shared_up_gate_input"] = int( T * D * self.activation_byte_size * Lm ) read_bytes["shared_up_gate_weights"] = int( 2 * D * MI * S * self.weight_byte_size * Lm ) read_bytes["shared_silu_input"] = int( 2 * T * MI * S * self.activation_byte_size * Lm ) read_bytes["shared_down_input"] = int( T * MI * self.activation_byte_size * Lm ) read_bytes["shared_down_weights"] = int( D * MI * S * self.weight_byte_size * Lm ) return read_bytes def get_write_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate write memory traffic for FFN layers.""" L, D, DI = self.num_hidden_layers, self.hidden_size, self.intermediate_size Lm, E, MI, S = ( self.num_moe_layers, self.num_experts_per_tok, self.moe_intermediate_size, self.num_shared_experts, ) T = ctx.total_num_tokens() Ld = L - Lm num_activated_tokens = T * E if E else 0 if per_gpu: Ld //= self.pp_size Lm //= self.pp_size DI //= self.ffn_tp_size if MI is not None: MI //= self.ffn_tp_size if E: num_activated_tokens //= self.ffn_ep_size write_bytes = {} # Dense FFN layers if Ld: write_bytes["dense_up_gate_output"] = int( 2 * T * DI * self.activation_byte_size * Ld ) write_bytes["dense_silu_output"] = int( T * DI * self.activation_byte_size * Ld ) write_bytes["dense_down_output"] = int( T * D * self.activation_byte_size * Ld ) # MoE outputs if Lm: if E: write_bytes["routed_up_gate_output"] = int( 2 * num_activated_tokens * MI * self.activation_byte_size * Lm ) write_bytes["routed_silu_output"] = int( num_activated_tokens * MI * self.activation_byte_size * Lm ) write_bytes["routed_down_output"] = int( num_activated_tokens * D * self.activation_byte_size * Lm ) if S: write_bytes["shared_up_gate_output"] = int( 2 * T * S * MI * self.activation_byte_size * Lm ) write_bytes["shared_silu_output"] = int( T * S * MI * self.activation_byte_size * Lm ) write_bytes["shared_down_output"] = int( T * S * D * self.activation_byte_size * Lm ) return write_bytes #### Unembed #### class UnembedMetrics(ComponentMetrics): # From BaseConfigParser hidden_size: int = Field(..., gt=0) vocab_size: int = Field(..., gt=0) weight_byte_size: int = Field(..., gt=0) activation_byte_size: int = Field(..., gt=0) tp_size: int @classmethod def component_type(cls) -> str: return "unembed" @classmethod def get_parser(cls) -> ParserChain: return ParserChain( BaseConfigParser(), ) def get_num_flops_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate flops breakdown for unembedding layer.""" D, V = self.hidden_size, self.vocab_size T = ctx.total_num_tokens() if per_gpu: V //= self.tp_size return { "unembed": 2 * T * D * V, } def get_read_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate read memory traffic for unembedding layer.""" D, V = self.hidden_size, self.vocab_size T = ctx.total_num_tokens() if per_gpu: V //= self.tp_size return { "input": T * D * self.activation_byte_size, "weight": D * V * self.weight_byte_size, } def get_write_bytes_breakdown( self, ctx: ExecutionContext, per_gpu: bool = True ) -> dict[str, int]: """Calculate write memory traffic for unembedding layer.""" V = self.vocab_size T = ctx.total_num_tokens() if per_gpu: V //= self.tp_size return { "output": T * V * self.activation_byte_size, } #### ModelMetrics #### class ModelMetrics: def __init__(self, vllm_config: VllmConfig) -> None: """ Parse vllm_config to instantiate metrics for each component. is_enabled() will return False if no component metrics could be instantiated. """ self.vllm_config = vllm_config self.metrics: list[ComponentMetrics] = [] for metric_cls in ComponentMetrics.registered_metrics(): try: metric = metric_cls.from_vllm_config(vllm_config) self.metrics.append(metric) logger.info( "Instantiated ComponentMetrics [%s] with (%s)", metric.component_type(), str(metric), ) except InvalidComponent as e: logger.debug( "Failed to instantiate %s from %s", metric_cls.component_type(), str(e), ) def is_enabled(self) -> bool: return len(self.metrics) > 0 def get_num_flops(self, ctx: ExecutionContext, per_gpu: bool = True) -> int: return sum(metric.get_num_flops(ctx, per_gpu) for metric in self.metrics) def get_read_bytes(self, ctx: ExecutionContext, per_gpu: bool = True) -> int:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/ray_wrappers.py
vllm/v1/metrics/ray_wrappers.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorPrometheus from vllm.v1.metrics.loggers import PrometheusStatLogger from vllm.v1.spec_decode.metrics import SpecDecodingProm try: from ray import serve as ray_serve from ray.util import metrics as ray_metrics from ray.util.metrics import Metric except ImportError: ray_metrics = None ray_serve = None import regex as re def _get_replica_id() -> str | None: """Get the current Ray Serve replica ID, or None if not in a Serve context.""" if ray_serve is None: return None try: return ray_serve.get_replica_context().replica_id.unique_id except ray_serve.exceptions.RayServeException: return None class RayPrometheusMetric: def __init__(self): if ray_metrics is None: raise ImportError("RayPrometheusMetric requires Ray to be installed.") self.metric: Metric = None @staticmethod def _get_tag_keys(labelnames: list[str] | None) -> tuple[str, ...]: labels = list(labelnames) if labelnames else [] labels.append("ReplicaId") return tuple(labels) def labels(self, *labels, **labelskwargs): if labels: # -1 because ReplicaId was added automatically expected = len(self.metric._tag_keys) - 1 if len(labels) != expected: raise ValueError( "Number of labels must match the number of tag keys. " f"Expected {expected}, got {len(labels)}" ) labelskwargs.update(zip(self.metric._tag_keys, labels)) labelskwargs["ReplicaId"] = _get_replica_id() or "" if labelskwargs: for k, v in labelskwargs.items(): if not isinstance(v, str): labelskwargs[k] = str(v) self.metric.set_default_tags(labelskwargs) return self @staticmethod def _get_sanitized_opentelemetry_name(name: str) -> str: """ For compatibility with Ray + OpenTelemetry, the metric name must be sanitized. In particular, this replaces disallowed character (e.g., ':') with '_' in the metric name. Allowed characters: a-z, A-Z, 0-9, _ # ruff: noqa: E501 Ref: https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/src/metrics/instrument_metadata_validator.cc#L22-L23 Ref: https://github.com/ray-project/ray/blob/master/src/ray/stats/metric.cc#L107 """ return re.sub(r"[^a-zA-Z0-9_]", "_", name) class RayGaugeWrapper(RayPrometheusMetric): """Wraps around ray.util.metrics.Gauge to provide same API as prometheus_client.Gauge""" def __init__( self, name: str, documentation: str | None = "", labelnames: list[str] | None = None, multiprocess_mode: str | None = "", ): # All Ray metrics are keyed by WorkerId, so multiprocess modes like # "mostrecent", "all", "sum" do not apply. This logic can be manually # implemented at the observability layer (Prometheus/Grafana). del multiprocess_mode tag_keys = self._get_tag_keys(labelnames) name = self._get_sanitized_opentelemetry_name(name) self.metric = ray_metrics.Gauge( name=name, description=documentation, tag_keys=tag_keys, ) def set(self, value: int | float): return self.metric.set(value) def set_to_current_time(self): # ray metrics doesn't have set_to_current time, https://docs.ray.io/en/latest/_modules/ray/util/metrics.html return self.metric.set(time.time()) class RayCounterWrapper(RayPrometheusMetric): """Wraps around ray.util.metrics.Counter to provide same API as prometheus_client.Counter""" def __init__( self, name: str, documentation: str | None = "", labelnames: list[str] | None = None, ): tag_keys = self._get_tag_keys(labelnames) name = self._get_sanitized_opentelemetry_name(name) self.metric = ray_metrics.Counter( name=name, description=documentation, tag_keys=tag_keys, ) def inc(self, value: int | float = 1.0): if value == 0: return return self.metric.inc(value) class RayHistogramWrapper(RayPrometheusMetric): """Wraps around ray.util.metrics.Histogram to provide same API as prometheus_client.Histogram""" def __init__( self, name: str, documentation: str | None = "", labelnames: list[str] | None = None, buckets: list[float] | None = None, ): tag_keys = self._get_tag_keys(labelnames) name = self._get_sanitized_opentelemetry_name(name) boundaries = buckets if buckets else [] self.metric = ray_metrics.Histogram( name=name, description=documentation, tag_keys=tag_keys, boundaries=boundaries, ) def observe(self, value: int | float): return self.metric.observe(value) class RaySpecDecodingProm(SpecDecodingProm): """ RaySpecDecodingProm is used by RayMetrics to log to Ray metrics. Provides the same metrics as SpecDecodingProm but uses Ray's util.metrics library. """ _counter_cls = RayCounterWrapper class RayKVConnectorPrometheus(KVConnectorPrometheus): """ RayKVConnectorPrometheus is used by RayMetrics to log Ray metrics. Provides the same metrics as KV connectors but uses Ray's util.metrics library. """ _gauge_cls = RayGaugeWrapper _counter_cls = RayCounterWrapper _histogram_cls = RayHistogramWrapper class RayPrometheusStatLogger(PrometheusStatLogger): """RayPrometheusStatLogger uses Ray metrics instead.""" _gauge_cls = RayGaugeWrapper _counter_cls = RayCounterWrapper _histogram_cls = RayHistogramWrapper _spec_decoding_cls = RaySpecDecodingProm _kv_connector_cls = RayKVConnectorPrometheus @staticmethod def _unregister_vllm_metrics(): # No-op on purpose pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/loggers.py
vllm/v1/metrics/loggers.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import logging import time from abc import ABC, abstractmethod from collections.abc import Callable from typing import TypeAlias from prometheus_client import Counter, Gauge, Histogram import vllm.envs as envs from vllm.compilation.cuda_graph import CUDAGraphLogging from vllm.config import SupportsMetricsInfo, VllmConfig from vllm.distributed.kv_transfer.kv_connector.v1.metrics import ( KVConnectorLogging, KVConnectorPrometheus, ) from vllm.logger import init_logger from vllm.plugins import STAT_LOGGER_PLUGINS_GROUP, load_plugins_by_group from vllm.v1.engine import FinishReason from vllm.v1.metrics.perf import PerfMetricsLogging from vllm.v1.metrics.prometheus import unregister_vllm_metrics from vllm.v1.metrics.stats import ( CachingMetrics, IterationStats, MultiModalCacheStats, SchedulerStats, ) from vllm.v1.spec_decode.metrics import SpecDecodingLogging, SpecDecodingProm logger = init_logger(__name__) PerEngineStatLoggerFactory = Callable[[VllmConfig, int], "StatLoggerBase"] AggregateStatLoggerFactory = type["AggregateStatLoggerBase"] StatLoggerFactory = AggregateStatLoggerFactory | PerEngineStatLoggerFactory class StatLoggerBase(ABC): """Interface for logging metrics. API users may define custom loggers that implement this interface. However, note that the `SchedulerStats` and `IterationStats` classes are not considered stable interfaces and may change in future versions. """ @abstractmethod def __init__(self, vllm_config: VllmConfig, engine_index: int = 0): ... @abstractmethod def record( self, scheduler_stats: SchedulerStats | None, iteration_stats: IterationStats | None, mm_cache_stats: MultiModalCacheStats | None = None, engine_idx: int = 0, ): ... @abstractmethod def log_engine_initialized(self): ... def log(self): # noqa pass def record_sleep_state(self, is_awake: int, level: int): # noqa pass def load_stat_logger_plugin_factories() -> list[StatLoggerFactory]: factories: list[StatLoggerFactory] = [] for name, plugin_class in load_plugins_by_group(STAT_LOGGER_PLUGINS_GROUP).items(): if not isinstance(plugin_class, type) or not issubclass( plugin_class, StatLoggerBase ): raise TypeError( f"Stat logger plugin {name!r} must be a subclass of " f"StatLoggerBase (got {plugin_class!r})." ) factories.append(plugin_class) return factories class AggregateStatLoggerBase(StatLoggerBase): """Abstract base class for loggers that aggregate across multiple DP engines.""" @abstractmethod def __init__(self, vllm_config: VllmConfig, engine_indexes: list[int]): ... class LoggingStatLogger(StatLoggerBase): def __init__(self, vllm_config: VllmConfig, engine_index: int = 0): self.engine_index = engine_index self.vllm_config = vllm_config self._reset(time.monotonic()) self.last_scheduler_stats = SchedulerStats() # Caching metrics. This cannot be reset. # TODO: Make the interval configurable. self.prefix_caching_metrics = CachingMetrics() self.connector_prefix_caching_metrics = CachingMetrics() self.mm_caching_metrics = CachingMetrics() self.spec_decoding_logging = SpecDecodingLogging() kv_transfer_config = self.vllm_config.kv_transfer_config self.kv_connector_logging = KVConnectorLogging(kv_transfer_config) self.cudagraph_logging = None if self.vllm_config.observability_config.cudagraph_metrics: self.cudagraph_logging = CUDAGraphLogging( self.vllm_config.compilation_config.cudagraph_mode, self.vllm_config.compilation_config.cudagraph_capture_sizes, ) self.last_prompt_throughput: float = 0.0 self.last_generation_throughput: float = 0.0 self.engine_is_idle = False self.aggregated = False if self._enable_perf_stats(): self.perf_metrics_logging = PerfMetricsLogging(vllm_config) def _reset(self, now): self.last_log_time = now # Tracked stats over current local logging interval. self.num_prompt_tokens: int = 0 self.num_generation_tokens: int = 0 self.num_corrupted_reqs: int = 0 self.num_preemptions: int = 0 def _enable_perf_stats(self) -> bool: return self.vllm_config.observability_config.enable_mfu_metrics def _track_iteration_stats(self, iteration_stats: IterationStats): # Save tracked stats for token counters. self.num_prompt_tokens += iteration_stats.num_prompt_tokens self.num_generation_tokens += iteration_stats.num_generation_tokens self.num_corrupted_reqs += iteration_stats.num_corrupted_reqs self.num_preemptions += iteration_stats.num_preempted_reqs def _get_throughput(self, tracked_stats: int, now: float) -> float: # Compute summary metrics for tracked stats delta_time = now - self.last_log_time if delta_time <= 0.0: return 0.0 return float(tracked_stats / delta_time) @property def log_prefix(self): return "Engine {:03d}: ".format(self.engine_index) def record( self, scheduler_stats: SchedulerStats | None, iteration_stats: IterationStats | None, mm_cache_stats: MultiModalCacheStats | None = None, engine_idx: int = 0, ): """Log Stats to standard output.""" if iteration_stats: self._track_iteration_stats(iteration_stats) if scheduler_stats is not None: self.prefix_caching_metrics.observe(scheduler_stats.prefix_cache_stats) if scheduler_stats.connector_prefix_cache_stats is not None: self.connector_prefix_caching_metrics.observe( scheduler_stats.connector_prefix_cache_stats ) if scheduler_stats.spec_decoding_stats is not None: self.spec_decoding_logging.observe(scheduler_stats.spec_decoding_stats) if kv_connector_stats := scheduler_stats.kv_connector_stats: self.kv_connector_logging.observe(kv_connector_stats) if ( self.cudagraph_logging is not None and scheduler_stats.cudagraph_stats is not None ): self.cudagraph_logging.observe(scheduler_stats.cudagraph_stats) if not self.aggregated: self.last_scheduler_stats = scheduler_stats if (perf_stats := scheduler_stats.perf_stats) and self._enable_perf_stats(): self.perf_metrics_logging.observe(perf_stats) if mm_cache_stats: self.mm_caching_metrics.observe(mm_cache_stats) def _update_stats(self): now = time.monotonic() prompt_throughput = self._get_throughput(self.num_prompt_tokens, now) generation_throughput = self._get_throughput(self.num_generation_tokens, now) self._reset(now) self.engine_is_idle = not any( ( prompt_throughput, generation_throughput, self.last_prompt_throughput, self.last_generation_throughput, ) ) self.last_generation_throughput = generation_throughput self.last_prompt_throughput = prompt_throughput def aggregate_scheduler_stats(self): # noop for per engine loggers return def log(self): self._update_stats() self.aggregate_scheduler_stats() # Avoid log noise on an idle production system log_fn = logger.debug if self.engine_is_idle else logger.info # Format and print output. log_parts = [ "Avg prompt throughput: %.1f tokens/s", "Avg generation throughput: %.1f tokens/s", "Running: %d reqs", "Waiting: %d reqs", ] log_args: list[int | float | str] = [ self.last_prompt_throughput, self.last_generation_throughput, self.last_scheduler_stats.num_running_reqs, self.last_scheduler_stats.num_waiting_reqs, ] if self.num_preemptions > 0: log_parts.append("Preemptions: %d") log_args.append(self.num_preemptions) log_parts.extend( [ "GPU KV cache usage: %.1f%%", "Prefix cache hit rate: %.1f%%", ] ) log_args.extend( [ self.last_scheduler_stats.kv_cache_usage * 100, self.prefix_caching_metrics.hit_rate * 100, ] ) if envs.VLLM_COMPUTE_NANS_IN_LOGITS: log_parts.append("Corrupted: %d reqs") log_args.append(self.num_corrupted_reqs) if not self.connector_prefix_caching_metrics.empty: log_parts.append("External prefix cache hit rate: %.1f%%") log_args.append(self.connector_prefix_caching_metrics.hit_rate * 100) if not self.mm_caching_metrics.empty: log_parts.append("MM cache hit rate: %.1f%%") log_args.append(self.mm_caching_metrics.hit_rate * 100) log_fn( self.log_prefix + ", ".join(log_parts), *log_args, ) self.spec_decoding_logging.log(log_fn=log_fn) self.kv_connector_logging.log(log_fn=log_fn) if self.cudagraph_logging is not None: self.cudagraph_logging.log(log_fn=log_fn) if self._enable_perf_stats(): self.perf_metrics_logging.log(log_fn=log_fn, log_prefix=self.log_prefix) def log_engine_initialized(self): if self.vllm_config.cache_config.num_gpu_blocks: logger.debug( "Engine %03d: vllm cache_config_info with initialization " "after num_gpu_blocks is: %d", self.engine_index, self.vllm_config.cache_config.num_gpu_blocks, ) class AggregatedLoggingStatLogger(LoggingStatLogger, AggregateStatLoggerBase): def __init__( self, vllm_config: VllmConfig, engine_indexes: list[int], ): self.engine_indexes = engine_indexes self.last_scheduler_stats_dict: dict[int, SchedulerStats] = { idx: SchedulerStats() for idx in self.engine_indexes } LoggingStatLogger.__init__(self, vllm_config, engine_index=-1) self.aggregated = True @property def log_prefix(self): return "{} Engines Aggregated: ".format(len(self.engine_indexes)) def _enable_perf_stats(self) -> bool: # Adding per_gpu perf stats across engines can lead to misleading numbers. return False def record( self, scheduler_stats: SchedulerStats | None, iteration_stats: IterationStats | None, mm_cache_stats: MultiModalCacheStats | None = None, engine_idx: int = 0, ): if engine_idx not in self.engine_indexes: logger.warning("Unexpected engine_idx: %d", engine_idx) return LoggingStatLogger.record( self, scheduler_stats, iteration_stats, mm_cache_stats=mm_cache_stats, engine_idx=engine_idx, ) if scheduler_stats is not None: self.last_scheduler_stats_dict[engine_idx] = scheduler_stats def aggregate_scheduler_stats(self): self.last_scheduler_stats = SchedulerStats() for last_scheduler_stats in self.last_scheduler_stats_dict.values(): self.last_scheduler_stats.num_waiting_reqs += ( last_scheduler_stats.num_waiting_reqs ) self.last_scheduler_stats.num_running_reqs += ( last_scheduler_stats.num_running_reqs ) self.last_scheduler_stats.kv_cache_usage += ( last_scheduler_stats.kv_cache_usage ) self.last_scheduler_stats.kv_cache_usage /= len(self.last_scheduler_stats_dict) def log(self): LoggingStatLogger.log(self) def log_engine_initialized(self): if self.vllm_config.cache_config.num_gpu_blocks: logger.info( "%d Engines: vllm cache_config_info with initialization " "after num_gpu_blocks is: %d", len(self.engine_indexes), self.vllm_config.cache_config.num_gpu_blocks, ) class PerEngineStatLoggerAdapter(AggregateStatLoggerBase): def __init__( self, vllm_config: VllmConfig, engine_indexes: list[int], per_engine_stat_logger_factory: PerEngineStatLoggerFactory, ) -> None: self.per_engine_stat_loggers = {} self.engine_indexes = engine_indexes for engine_index in engine_indexes: self.per_engine_stat_loggers[engine_index] = per_engine_stat_logger_factory( vllm_config, engine_index ) def record( self, scheduler_stats: SchedulerStats | None, iteration_stats: IterationStats | None, mm_cache_stats: MultiModalCacheStats | None = None, engine_idx: int = 0, ): if engine_idx not in self.per_engine_stat_loggers: logger.warning("Unexpected engine_idx: %d", engine_idx) return self.per_engine_stat_loggers[engine_idx].record( scheduler_stats, iteration_stats, mm_cache_stats=mm_cache_stats, engine_idx=engine_idx, ) def log(self): for per_engine_stat_logger in self.per_engine_stat_loggers.values(): per_engine_stat_logger.log() def log_engine_initialized(self): for per_engine_stat_logger in self.per_engine_stat_loggers.values(): per_engine_stat_logger.log_engine_initialized() class PrometheusStatLogger(AggregateStatLoggerBase): _gauge_cls = Gauge _counter_cls = Counter _histogram_cls = Histogram _spec_decoding_cls = SpecDecodingProm _kv_connector_cls = KVConnectorPrometheus def __init__( self, vllm_config: VllmConfig, engine_indexes: list[int] | None = None ): if engine_indexes is None: engine_indexes = [0] self.engine_indexes = engine_indexes unregister_vllm_metrics() self.vllm_config = vllm_config # Use this flag to hide metrics that were deprecated in # a previous release and which will be removed future self.show_hidden_metrics = vllm_config.observability_config.show_hidden_metrics self.kv_cache_metrics_enabled = ( vllm_config.observability_config.kv_cache_metrics ) labelnames = ["model_name", "engine"] model_name = vllm_config.model_config.served_model_name max_model_len = vllm_config.model_config.max_model_len per_engine_labelvalues: dict[int, list[object]] = { idx: [model_name, str(idx)] for idx in engine_indexes } self.spec_decoding_prom = self._spec_decoding_cls( vllm_config.speculative_config, labelnames, per_engine_labelvalues ) self.kv_connector_prom = self._kv_connector_cls( vllm_config, labelnames, per_engine_labelvalues ) # # Scheduler state # gauge_scheduler_running = self._gauge_cls( name="vllm:num_requests_running", documentation="Number of requests in model execution batches.", multiprocess_mode="mostrecent", labelnames=labelnames, ) self.gauge_scheduler_running = make_per_engine( gauge_scheduler_running, engine_indexes, model_name ) gauge_scheduler_waiting = self._gauge_cls( name="vllm:num_requests_waiting", documentation="Number of requests waiting to be processed.", multiprocess_mode="mostrecent", labelnames=labelnames, ) self.gauge_scheduler_waiting = make_per_engine( gauge_scheduler_waiting, engine_indexes, model_name ) gauge_engine_sleep_state = self._gauge_cls( name="vllm:engine_sleep_state", documentation=( "Engine sleep state; awake = 0 means engine is sleeping; " "awake = 1 means engine is awake; " "weights_offloaded = 1 means sleep level 1; " "discard_all = 1 means sleep level 2." ), labelnames=labelnames + ["sleep_state"], multiprocess_mode="mostrecent", ) self.gauge_engine_sleep_state = {} sleep_state = ["awake", "weights_offloaded", "discard_all"] for s in sleep_state: self.gauge_engine_sleep_state[s] = { idx: gauge_engine_sleep_state.labels( engine=idx, model_name=model_name, sleep_state=s ) for idx in engine_indexes } # Setting default values self.record_sleep_state() gauge_kv_cache_usage = self._gauge_cls( name="vllm:kv_cache_usage_perc", documentation="KV-cache usage. 1 means 100 percent usage.", multiprocess_mode="mostrecent", labelnames=labelnames, ) self.gauge_kv_cache_usage = make_per_engine( gauge_kv_cache_usage, engine_indexes, model_name ) if envs.VLLM_COMPUTE_NANS_IN_LOGITS: counter_corrupted_requests = self._counter_cls( name="vllm:corrupted_requests", documentation=( "Corrupted requests, in terms of total number of requests " "with NaNs in logits." ), labelnames=labelnames, ) self.counter_corrupted_requests = make_per_engine( counter_corrupted_requests, engine_indexes, model_name ) counter_prefix_cache_queries = self._counter_cls( name="vllm:prefix_cache_queries", documentation=( "Prefix cache queries, in terms of number of queried tokens." ), labelnames=labelnames, ) self.counter_prefix_cache_queries = make_per_engine( counter_prefix_cache_queries, engine_indexes, model_name ) counter_prefix_cache_hits = self._counter_cls( name="vllm:prefix_cache_hits", documentation=("Prefix cache hits, in terms of number of cached tokens."), labelnames=labelnames, ) self.counter_prefix_cache_hits = make_per_engine( counter_prefix_cache_hits, engine_indexes, model_name ) # # External - KV connector prefix cache # counter_connector_prefix_cache_queries = self._counter_cls( name="vllm:external_prefix_cache_queries", documentation=( "External prefix cache queries from KV connector " "cross-instance cache sharing, in terms of number of queried tokens." ), labelnames=labelnames, ) self.counter_connector_prefix_cache_queries = make_per_engine( counter_connector_prefix_cache_queries, engine_indexes, model_name ) counter_connector_prefix_cache_hits = self._counter_cls( name="vllm:external_prefix_cache_hits", documentation=( "External prefix cache hits from KV connector " "cross-instance cache sharing, in terms of number of cached tokens." ), labelnames=labelnames, ) self.counter_connector_prefix_cache_hits = make_per_engine( counter_connector_prefix_cache_hits, engine_indexes, model_name ) # # Multi-modal cache # counter_mm_cache_queries = self._counter_cls( name="vllm:mm_cache_queries", documentation=( "Multi-modal cache queries, in terms of number of queried items." ), labelnames=labelnames, ) self.counter_mm_cache_queries = make_per_engine( counter_mm_cache_queries, engine_indexes, model_name ) counter_mm_cache_hits = self._counter_cls( name="vllm:mm_cache_hits", documentation=( "Multi-modal cache hits, in terms of number of cached items." ), labelnames=labelnames, ) self.counter_mm_cache_hits = make_per_engine( counter_mm_cache_hits, engine_indexes, model_name ) # # Counters # counter_num_preempted_reqs = self._counter_cls( name="vllm:num_preemptions", documentation="Cumulative number of preemption from the engine.", labelnames=labelnames, ) self.counter_num_preempted_reqs = make_per_engine( counter_num_preempted_reqs, engine_indexes, model_name ) counter_prompt_tokens = self._counter_cls( name="vllm:prompt_tokens", documentation="Number of prefill tokens processed.", labelnames=labelnames, ) self.counter_prompt_tokens = make_per_engine( counter_prompt_tokens, engine_indexes, model_name ) counter_generation_tokens = self._counter_cls( name="vllm:generation_tokens", documentation="Number of generation tokens processed.", labelnames=labelnames, ) self.counter_generation_tokens = make_per_engine( counter_generation_tokens, engine_indexes, model_name ) self.counter_request_success: dict[FinishReason, dict[int, Counter]] = {} counter_request_success_base = self._counter_cls( name="vllm:request_success", documentation="Count of successfully processed requests.", labelnames=labelnames + ["finished_reason"], ) for reason in FinishReason: self.counter_request_success[reason] = { idx: counter_request_success_base.labels( model_name, str(idx), str(reason) ) for idx in engine_indexes } # # Histograms of counts # histogram_num_prompt_tokens_request = self._histogram_cls( name="vllm:request_prompt_tokens", documentation="Number of prefill tokens processed.", buckets=build_1_2_5_buckets(max_model_len), labelnames=labelnames, ) self.histogram_num_prompt_tokens_request = make_per_engine( histogram_num_prompt_tokens_request, engine_indexes, model_name ) histogram_num_generation_tokens_request = self._histogram_cls( name="vllm:request_generation_tokens", documentation="Number of generation tokens processed.", buckets=build_1_2_5_buckets(max_model_len), labelnames=labelnames, ) self.histogram_num_generation_tokens_request = make_per_engine( histogram_num_generation_tokens_request, engine_indexes, model_name ) # TODO: This metric might be incorrect in case of using multiple # api_server counts which uses prometheus mp. # See: https://github.com/vllm-project/vllm/pull/18053 histogram_iteration_tokens = self._histogram_cls( name="vllm:iteration_tokens_total", documentation="Histogram of number of tokens per engine_step.", buckets=[1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384], labelnames=labelnames, ) self.histogram_iteration_tokens = make_per_engine( histogram_iteration_tokens, engine_indexes, model_name ) histogram_max_num_generation_tokens_request = self._histogram_cls( name="vllm:request_max_num_generation_tokens", documentation="Histogram of maximum number of requested generation tokens.", buckets=build_1_2_5_buckets(max_model_len), labelnames=labelnames, ) self.histogram_max_num_generation_tokens_request = make_per_engine( histogram_max_num_generation_tokens_request, engine_indexes, model_name ) histogram_n_request = self._histogram_cls( name="vllm:request_params_n", documentation="Histogram of the n request parameter.", buckets=[1, 2, 5, 10, 20], labelnames=labelnames, ) self.histogram_n_request = make_per_engine( histogram_n_request, engine_indexes, model_name ) histogram_max_tokens_request = self._histogram_cls( name="vllm:request_params_max_tokens", documentation="Histogram of the max_tokens request parameter.", buckets=build_1_2_5_buckets(max_model_len), labelnames=labelnames, ) self.histogram_max_tokens_request = make_per_engine( histogram_max_tokens_request, engine_indexes, model_name ) # # Histogram of timing intervals # histogram_time_to_first_token = self._histogram_cls( name="vllm:time_to_first_token_seconds", documentation="Histogram of time to first token in seconds.", buckets=[ 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, 160.0, 640.0, 2560.0, ], labelnames=labelnames, ) self.histogram_time_to_first_token = make_per_engine( histogram_time_to_first_token, engine_indexes, model_name ) # Deprecated in 0.11 - Renamed as vllm:inter_token_latency_seconds # With 0.12.x you can enable with --show-hidden-metrics-for-version=0.11 # TODO: remove in 0.13.0 if self.show_hidden_metrics: histogram_time_per_output_token = self._histogram_cls( name="vllm:time_per_output_token_seconds", documentation=( "Histogram of time per output token in seconds." "DEPRECATED: Use vllm:inter_token_latency_seconds instead." ), buckets=[ 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, ], labelnames=labelnames, ) self.histogram_time_per_output_token = make_per_engine( histogram_time_per_output_token, engine_indexes, model_name ) histogram_inter_token_latency = self._histogram_cls( name="vllm:inter_token_latency_seconds", documentation="Histogram of inter-token latency in seconds.", buckets=[ 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, ], labelnames=labelnames, ) self.histogram_inter_token_latency = make_per_engine( histogram_inter_token_latency, engine_indexes, model_name ) histogram_request_time_per_output_token = self._histogram_cls( name="vllm:request_time_per_output_token_seconds", documentation="Histogram of time_per_output_token_seconds per request.", buckets=[ 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, ], labelnames=labelnames, ) self.histogram_request_time_per_output_token = make_per_engine( histogram_request_time_per_output_token, engine_indexes, model_name ) request_latency_buckets = [ 0.3, 0.5, 0.8, 1.0, 1.5, 2.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0, 120.0, 240.0, 480.0, 960.0, 1920.0, 7680.0, ] histogram_e2e_time_request = self._histogram_cls( name="vllm:e2e_request_latency_seconds", documentation="Histogram of e2e request latency in seconds.", buckets=request_latency_buckets, labelnames=labelnames, ) self.histogram_e2e_time_request = make_per_engine( histogram_e2e_time_request, engine_indexes, model_name ) histogram_queue_time_request = self._histogram_cls( name="vllm:request_queue_time_seconds", documentation="Histogram of time spent in WAITING phase for request.", buckets=request_latency_buckets, labelnames=labelnames, ) self.histogram_queue_time_request = make_per_engine( histogram_queue_time_request, engine_indexes, model_name ) histogram_inference_time_request = self._histogram_cls( name="vllm:request_inference_time_seconds", documentation="Histogram of time spent in RUNNING phase for request.", buckets=request_latency_buckets, labelnames=labelnames, ) self.histogram_inference_time_request = make_per_engine( histogram_inference_time_request, engine_indexes, model_name ) histogram_prefill_time_request = self._histogram_cls( name="vllm:request_prefill_time_seconds", documentation="Histogram of time spent in PREFILL phase for request.", buckets=request_latency_buckets, labelnames=labelnames, ) self.histogram_prefill_time_request = make_per_engine( histogram_prefill_time_request, engine_indexes, model_name ) histogram_decode_time_request = self._histogram_cls( name="vllm:request_decode_time_seconds", documentation="Histogram of time spent in DECODE phase for request.", buckets=request_latency_buckets, labelnames=labelnames, ) self.histogram_decode_time_request = make_per_engine( histogram_decode_time_request, engine_indexes, model_name ) histogram_prefill_kv_computed_request = self._histogram_cls( name="vllm:request_prefill_kv_computed_tokens", documentation=( "Histogram of new KV tokens computed during prefill " "(excluding cached tokens)." ), buckets=build_1_2_5_buckets(max_model_len), labelnames=labelnames, ) self.histogram_prefill_kv_computed_request = make_per_engine( histogram_prefill_kv_computed_request, engine_indexes, model_name ) # # KV Cache residency metrics # if self.kv_cache_metrics_enabled: kv_cache_residency_buckets = [ 0.001, 0.002, 0.005, 0.01, 0.02,
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/reader.py
vllm/v1/metrics/reader.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass from prometheus_client import REGISTRY from prometheus_client import Metric as PromMetric from prometheus_client.samples import Sample @dataclass class Metric: """A base class for prometheus metrics. Each metric may be associated with key=value labels, and in some cases a single vLLM instance may have multiple metrics with the same name but different sets of labels. """ name: str labels: dict[str, str] @dataclass class Counter(Metric): """A monotonically increasing integer counter.""" value: int @dataclass class Vector(Metric): """An ordered array of integer counters. This type - which doesn't exist in Prometheus - models one very specific metric, vllm:spec_decode_num_accepted_tokens_per_pos. """ values: list[int] @dataclass class Gauge(Metric): """A numerical value that can go up or down.""" value: float @dataclass class Histogram(Metric): """Observations recorded in configurable buckets. Buckets are represented by a dictionary. The key is the upper limit of the bucket, and the value is the observed count in that bucket. A '+Inf' key always exists. The count property is the total count across all buckets, identical to the count of the '+Inf' bucket. The sum property is the total sum of all observed values. """ count: int sum: float buckets: dict[str, int] def get_metrics_snapshot() -> list[Metric]: """An API for accessing in-memory Prometheus metrics. Example: >>> for metric in llm.get_metrics(): ... if isinstance(metric, Counter): ... print(f"{metric} = {metric.value}") ... elif isinstance(metric, Gauge): ... print(f"{metric} = {metric.value}") ... elif isinstance(metric, Histogram): ... print(f"{metric}") ... print(f" sum = {metric.sum}") ... print(f" count = {metric.count}") ... for bucket_le, value in metrics.buckets.items(): ... print(f" {bucket_le} = {value}") """ collected: list[Metric] = [] for metric in REGISTRY.collect(): if not metric.name.startswith("vllm:"): continue if metric.type == "gauge": samples = _get_samples(metric) for s in samples: collected.append( Gauge(name=metric.name, labels=s.labels, value=s.value) ) elif metric.type == "counter": samples = _get_samples(metric, "_total") if metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos": # # Ugly vllm:num_accepted_tokens_per_pos special case. # # This metric is a vector of counters - for each spec # decoding token position, we observe the number of # accepted tokens using a Counter labeled with 'position'. # We convert these into a vector of integer values. # for labels, values in _digest_num_accepted_by_pos_samples(samples): collected.append( Vector(name=metric.name, labels=labels, values=values) ) else: for s in samples: collected.append( Counter(name=metric.name, labels=s.labels, value=int(s.value)) ) elif metric.type == "histogram": # # A histogram has a number of '_bucket' samples where # the 'le' label represents the upper limit of the bucket. # We convert these bucketized values into a dict of values # indexed by the value of the 'le' label. The 'le=+Inf' # label is a special case, catching all values observed. # bucket_samples = _get_samples(metric, "_bucket") count_samples = _get_samples(metric, "_count") sum_samples = _get_samples(metric, "_sum") for labels, buckets, count_value, sum_value in _digest_histogram( bucket_samples, count_samples, sum_samples ): collected.append( Histogram( name=metric.name, labels=labels, buckets=buckets, count=count_value, sum=sum_value, ) ) else: raise AssertionError(f"Unknown metric type {metric.type}") return collected def _get_samples(metric: PromMetric, suffix: str | None = None) -> list[Sample]: name = (metric.name + suffix) if suffix is not None else metric.name return [s for s in metric.samples if s.name == name] def _strip_label(labels: dict[str, str], key_to_remove: str) -> dict[str, str]: labels_copy = labels.copy() labels_copy.pop(key_to_remove) return labels_copy def _digest_histogram( bucket_samples: list[Sample], count_samples: list[Sample], sum_samples: list[Sample] ) -> list[tuple[dict[str, str], dict[str, int], int, float]]: # # In the case of DP, we have an indigestable # per-bucket-per-engine count as a list of labelled # samples, along with total and sum samples # # bucket_samples (in): # labels = {bucket: 100, idx: 0}, value = 2 # labels = {bucket: 200, idx: 0}, value = 4 # labels = {bucket: Inf, idx: 0}, value = 10 # labels = {bucket: 100, idx: 1}, value = 1 # labels = {bucket: 200, idx: 2}, value = 5 # labels = {bucket: Inf, idx: 3}, value = 7 # count_samples (in): # labels = {idx: 0}, value = 10 # labels = {idx: 1}, value = 7 # sum_samples (in): # labels = {idx: 0}, value = 2000 # labels = {idx: 1}, value = 1200 # # output: [ # {idx: 0}, {"100": 2, "200": 4, "Inf": 10}, 10, 2000 # {idx: 1}, {"100": 1, "200": 5, "Inf": 7}, 7, 1200 # ] buckets_by_labels: dict[frozenset[tuple[str, str]], dict[str, int]] = {} for s in bucket_samples: bucket = s.labels["le"] labels_key = frozenset(_strip_label(s.labels, "le").items()) if labels_key not in buckets_by_labels: buckets_by_labels[labels_key] = {} buckets_by_labels[labels_key][bucket] = int(s.value) counts_by_labels: dict[frozenset[tuple[str, str]], int] = {} for s in count_samples: labels_key = frozenset(s.labels.items()) counts_by_labels[labels_key] = int(s.value) sums_by_labels: dict[frozenset[tuple[str, str]], float] = {} for s in sum_samples: labels_key = frozenset(s.labels.items()) sums_by_labels[labels_key] = s.value assert ( set(buckets_by_labels.keys()) == set(counts_by_labels.keys()) == set(sums_by_labels.keys()) ) output = [] label_keys = list(buckets_by_labels.keys()) for k in label_keys: labels = dict(k) output.append( (labels, buckets_by_labels[k], counts_by_labels[k], sums_by_labels[k]) ) return output def _digest_num_accepted_by_pos_samples( samples: list[Sample], ) -> list[tuple[dict[str, str], list[int]]]: # # In the case of DP, we have an indigestable # per-position-per-engine count as a list of # labelled samples # # samples (in): # labels = {pos: 0, idx: 0}, value = 10 # labels = {pos: 1, idx: 0}, value = 7 # labels = {pos: 2, idx: 0}, value = 2 # labels = {pos: 0, idx: 1}, value = 5 # labels = {pos: 1, idx: 1}, value = 3 # labels = {pos: 2, idx: 1}, value = 1 # # output: [ # {idx: 0}, [10, 7, 2] # {idx: 1}, [5, 3, 1] # ] # max_pos = 0 values_by_labels: dict[frozenset[tuple[str, str]], dict[int, int]] = {} for s in samples: position = int(s.labels["position"]) max_pos = max(max_pos, position) labels_key = frozenset(_strip_label(s.labels, "position").items()) if labels_key not in values_by_labels: values_by_labels[labels_key] = {} values_by_labels[labels_key][position] = int(s.value) output = [] for labels_key, values_by_position in values_by_labels.items(): labels = dict(labels_key) values = [0] * (max_pos + 1) for pos, val in values_by_position.items(): values[pos] = val output.append((labels, values)) return output
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/__init__.py
vllm/v1/metrics/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/metrics/stats.py
vllm/v1/metrics/stats.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from collections import defaultdict, deque from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any import vllm.envs as envs from vllm.compilation.cuda_graph import CUDAGraphStat from vllm.v1.metrics.perf import PerfStats from vllm.v1.spec_decode.metrics import SpecDecodingStats if TYPE_CHECKING: from vllm.v1.engine import EngineCoreEvent, EngineCoreOutput, FinishReason @dataclass class BaseCacheStats: """Stores cache hit statistics.""" reset: bool = False """Whether the cache was reset.""" requests: int = 0 """The number of requests in this update.""" queries: int = 0 """The number of queries in these requests.""" hits: int = 0 """The number of hits in these requests.""" class CachingMetrics: """Metrics for caching with a hit rate of the most recent N requests. Args: interval: The number of the most recent requests to aggregate. Defaults to 1000. """ def __init__(self, max_recent_requests: int = 1000) -> None: super().__init__() self.max_recent_requests = max_recent_requests # The current aggregated values. self.aggregated_requests = 0 self.aggregated_query_total = 0 self.aggregated_query_hit = 0 # A deque of (requests, queries, hits) for the most recent requests. self.query_queue = deque[tuple[int, int, int]]() def observe(self, stats: BaseCacheStats): """Observe the prefix caching for a set of requests. This function is called with information gathered when new requests are being scheduled and are looking for computed blocks. When there are more than `max_recent_requests` requests, the oldest set of requests are removed from the metrics. Args: stats: The prefix cache stats. """ # reset_prefix_cache was invoked before the current update. # Reset the metrics before aggregating the current stats. if stats.reset: self.reset() # DO NOT appending empty stats to avoid helpful info get kicked out # due to sliding window. if stats.requests == 0: return # Update the metrics. self.query_queue.append((stats.requests, stats.queries, stats.hits)) self.aggregated_requests += stats.requests self.aggregated_query_total += stats.queries self.aggregated_query_hit += stats.hits # Remove the oldest stats until number of requests does not exceed # the limit. # NOTE: We preserve the latest added stats regardless. while ( len(self.query_queue) > 1 and self.aggregated_requests > self.max_recent_requests ): old_requests, old_queries, old_hits = self.query_queue.popleft() self.aggregated_requests -= old_requests self.aggregated_query_total -= old_queries self.aggregated_query_hit -= old_hits def reset(self): """Reset the metrics.""" self.aggregated_requests = 0 self.aggregated_query_total = 0 self.aggregated_query_hit = 0 self.query_queue.clear() @property def empty(self) -> bool: """Return true if no requests have been observed.""" return self.aggregated_requests == 0 @property def hit_rate(self) -> float: """Calculate the hit rate for the past N requests.""" if self.aggregated_query_total == 0: return 0.0 return self.aggregated_query_hit / self.aggregated_query_total @dataclass class PrefixCacheStats(BaseCacheStats): """ Stores prefix cache hit statistics. - `reset`: Whether `reset_prefix_cache` was invoked. - `queries`: Refers to the number of tokens that were queried. """ preempted_requests: int = 0 """The number of previously preempted requests in this update.""" preempted_queries: int = 0 """The `queries` number for preempted requests.""" preempted_hits: int = 0 """The `hits` number for preempted requests.""" def record(self, num_tokens: int, num_hits: int, preempted: bool) -> None: """Aggregate request information into the stats.""" if preempted: # Previously preempted request self.preempted_requests += 1 self.preempted_queries += num_tokens self.preempted_hits += num_hits else: # New request self.requests += 1 self.queries += num_tokens self.hits += num_hits @dataclass class MultiModalCacheStats(BaseCacheStats): """ Stores multi-modal cache hit statistics. - `reset`: Whether `reset_mm_cache` was invoked. - `queries`: Refers to the number of multi-modal data items that were queried. """ @dataclass class KVCacheEvictionEvent: """Single KV cache block eviction sample.""" lifetime_seconds: float idle_seconds: float reuse_gaps_seconds: tuple[float, ...] @dataclass class SchedulerStats: """Stats associated with the scheduler.""" num_running_reqs: int = 0 num_waiting_reqs: int = 0 # These are used for internal DP load-balancing. step_counter: int = 0 current_wave: int = 0 kv_cache_usage: float = 0.0 prefix_cache_stats: PrefixCacheStats = field(default_factory=PrefixCacheStats) connector_prefix_cache_stats: PrefixCacheStats | None = None kv_cache_eviction_events: list[KVCacheEvictionEvent] = field(default_factory=list) spec_decoding_stats: SpecDecodingStats | None = None kv_connector_stats: dict[str, Any] | None = None waiting_lora_adapters: dict[str, int] = field(default_factory=dict) running_lora_adapters: dict[str, int] = field(default_factory=dict) cudagraph_stats: CUDAGraphStat | None = None perf_stats: PerfStats | None = None @dataclass class RequestStateStats: """Stats that need to be tracked across delta updates.""" num_generation_tokens: int = 0 # This is an engine frontend timestamp (wall-clock) arrival_time: float = 0.0 # These are engine core timestamps (monotonic) queued_ts: float = 0.0 scheduled_ts: float = 0.0 first_token_ts: float = 0.0 last_token_ts: float = 0.0 # first token latency first_token_latency: float = 0.0 # Track if this request is corrupted (NaNs in logits) is_corrupted: bool = False @dataclass class FinishedRequestStats: """Stats associated with a finished request.""" finish_reason: "FinishReason" e2e_latency: float = 0.0 num_prompt_tokens: int = 0 num_generation_tokens: int = 0 max_tokens_param: int | None = None queued_time: float = 0.0 prefill_time: float = 0.0 inference_time: float = 0.0 decode_time: float = 0.0 mean_time_per_output_token: float = 0.0 is_corrupted: bool = False num_cached_tokens: int = 0 class IterationStats: """Stats associated with a single set of EngineCoreOutputs.""" def __init__(self): self.iteration_timestamp = time.time() self.num_generation_tokens = 0 self.num_prompt_tokens = 0 self.num_preempted_reqs = 0 self.finished_requests: list[FinishedRequestStats] = [] self.max_num_generation_tokens_iter: list[int] = [] self.n_params_iter: list[int] = [] self.time_to_first_tokens_iter: list[float] = [] self.inter_token_latencies_iter: list[float] = [] self.num_corrupted_reqs: int = 0 def __repr__(self) -> str: field_to_value_str = ", ".join(f"{k}={v}" for k, v in vars(self).items()) return f"{self.__class__.__name__}({field_to_value_str})" def _time_since(self, start: float) -> float: """Calculate an interval relative to this iteration's timestamp.""" return self.iteration_timestamp - start def update_from_output( self, output: "EngineCoreOutput", engine_core_timestamp: float, is_prefilling: bool, prompt_len: int, req_stats: RequestStateStats, lora_states: "LoRARequestStates", lora_name: str | None, ): num_new_generation_tokens = len(output.new_token_ids) self.num_generation_tokens += num_new_generation_tokens if is_prefilling: self.num_prompt_tokens += prompt_len first_token_latency = self._time_since(req_stats.arrival_time) self.time_to_first_tokens_iter.append(first_token_latency) req_stats.first_token_latency = first_token_latency req_stats.num_generation_tokens += num_new_generation_tokens # Track if this request is corrupted (only check once per request) # Early exit if already marked as corrupted to avoid redundant checks if ( envs.VLLM_COMPUTE_NANS_IN_LOGITS and not req_stats.is_corrupted and output.num_nans_in_logits > 0 ): req_stats.is_corrupted = True # Process request-level engine core events if output.events is not None: self.update_from_events( output.request_id, output.events, is_prefilling, req_stats, lora_states, lora_name, ) # Process the batch-level "new tokens" engine core event if is_prefilling: req_stats.first_token_ts = engine_core_timestamp else: itl = engine_core_timestamp - req_stats.last_token_ts self.inter_token_latencies_iter.append(itl) req_stats.last_token_ts = engine_core_timestamp def update_from_events( self, req_id: str, events: list["EngineCoreEvent"], is_prefilling: bool, req_stats: RequestStateStats, lora_states: "LoRARequestStates", lora_name: str | None, ): # Avoid circular dependency from vllm.v1.engine import EngineCoreEventType for event in events: if event.type == EngineCoreEventType.QUEUED: req_stats.queued_ts = event.timestamp lora_states.request_waiting(req_id, lora_name) elif event.type == EngineCoreEventType.SCHEDULED: if req_stats.scheduled_ts == 0.0: # ignore preemptions req_stats.scheduled_ts = event.timestamp lora_states.request_running(req_id, lora_name) elif event.type == EngineCoreEventType.PREEMPTED: self.num_preempted_reqs += 1 lora_states.request_waiting(req_id, lora_name) def update_from_finished_request( self, finish_reason: "FinishReason", num_prompt_tokens: int, max_tokens_param: int | None, req_stats: RequestStateStats, num_cached_tokens: int = 0, ): e2e_latency = self._time_since(req_stats.arrival_time) # Queued interval is from first QUEUED event to first SCHEDULED queued_time = req_stats.scheduled_ts - req_stats.queued_ts # Prefill interval is from first SCHEDULED to first NEW_TOKEN # Any preemptions during prefill is included in the interval prefill_time = req_stats.first_token_ts - req_stats.scheduled_ts # Decode interval is from first NEW_TOKEN to last NEW_TOKEN # Any preemptions during decode are included decode_time = req_stats.last_token_ts - req_stats.first_token_ts # Inference interval is from first SCHEDULED to last NEW_TOKEN # Any preemptions during prefill or decode are included inference_time = req_stats.last_token_ts - req_stats.scheduled_ts # Do not count the token generated by the prefill phase mean_time_per_output_token = ( decode_time / (req_stats.num_generation_tokens - 1) if req_stats.num_generation_tokens - 1 > 0 else 0 ) finished_req = FinishedRequestStats( finish_reason=finish_reason, e2e_latency=e2e_latency, num_prompt_tokens=num_prompt_tokens, num_generation_tokens=req_stats.num_generation_tokens, max_tokens_param=max_tokens_param, queued_time=queued_time, prefill_time=prefill_time, inference_time=inference_time, decode_time=decode_time, mean_time_per_output_token=mean_time_per_output_token, is_corrupted=req_stats.is_corrupted, num_cached_tokens=num_cached_tokens, ) self.finished_requests.append(finished_req) # Count corrupted requests when they finish (only once per request) if req_stats.is_corrupted: self.num_corrupted_reqs += 1 class LoRAStats: """Tracks waiting and running request IDs for a single LoRA.""" def __init__(self): self.waiting: set[str] = set() self.running: set[str] = set() def update(self, req_id: str, waiting: bool, running: bool): assert not (waiting and running) if waiting: self.waiting.add(req_id) else: self.waiting.discard(req_id) if running: self.running.add(req_id) else: self.running.discard(req_id) @property def empty(self) -> bool: return not (self.waiting or self.running) class LoRARequestStates: """A per-LoRA count of running and waiting requests.""" def __init__(self, log_stats: bool = False): self.log_stats = log_stats self.requests: defaultdict[str, LoRAStats] = defaultdict(LoRAStats) def _request_update( self, req_id: str, lora_name: str | None, waiting: bool, running: bool ): if not self.log_stats or lora_name is None: return lora_stats = self.requests[lora_name] lora_stats.update(req_id, waiting, running) if lora_stats.empty: del self.requests[lora_name] def request_waiting(self, req_id: str, lora_name: str | None): self._request_update(req_id, lora_name, waiting=True, running=False) def request_running(self, req_id: str, lora_name: str | None): self._request_update(req_id, lora_name, waiting=False, running=True) def request_finished(self, req_id: str, lora_name: str | None): self._request_update(req_id, lora_name, waiting=False, running=False) def update_scheduler_stats(self, scheduler_stats: SchedulerStats | None): if not self.log_stats or scheduler_stats is None: return for lora_name, stats in self.requests.items(): scheduler_stats.waiting_lora_adapters[lora_name] = len(stats.waiting) scheduler_stats.running_lora_adapters[lora_name] = len(stats.running)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/metrics.py
vllm/v1/spec_decode/metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from dataclasses import dataclass, field import numpy as np import prometheus_client from vllm.config import SpeculativeConfig from vllm.logger import init_logger logger = init_logger(__name__) @dataclass class SpecDecodingStats: """Per-step iteration decoding stats from scheduler. Each scheduler step, statistics on spec decoding performance are aggregated across requests by the scheduler and returned to the frontend in EngineCoreOutputs->SchedulerStats. """ num_spec_tokens: int num_drafts: int = 0 num_draft_tokens: int = 0 num_accepted_tokens: int = 0 num_accepted_tokens_per_pos: list[int] = field(default_factory=list) @classmethod def new(cls, num_spec_tokens: int) -> "SpecDecodingStats": return cls( num_spec_tokens=num_spec_tokens, num_accepted_tokens_per_pos=[0] * num_spec_tokens, ) def observe_draft(self, num_draft_tokens: int, num_accepted_tokens: int): self.num_drafts += 1 self.num_draft_tokens += num_draft_tokens self.num_accepted_tokens += num_accepted_tokens assert num_accepted_tokens <= self.num_spec_tokens for i in range(num_accepted_tokens): self.num_accepted_tokens_per_pos[i] += 1 class SpecDecodingLogging: """Aggregate and log spec decoding metrics. LoggingStatLogger aggregates per-iteration metrics over a set time interval using observe() and then logs them using log() before resetting to zero. """ def __init__(self): self.reset() def reset(self): self.num_drafts: list[int] = [] self.num_draft_tokens: list[int] = [] self.num_accepted_tokens: list[int] = [] self.accepted_tokens_per_pos_lists: list[list[int]] = [] self.last_log_time = time.monotonic() def observe(self, spec_decoding_stats: SpecDecodingStats): self.num_drafts.append(spec_decoding_stats.num_drafts) self.num_draft_tokens.append(spec_decoding_stats.num_draft_tokens) self.num_accepted_tokens.append(spec_decoding_stats.num_accepted_tokens) self.accepted_tokens_per_pos_lists.append( spec_decoding_stats.num_accepted_tokens_per_pos ) def log(self, log_fn=logger.info): if not self.num_drafts: return num_drafts = np.sum(self.num_drafts) num_draft_tokens = np.sum(self.num_draft_tokens) num_accepted_tokens = np.sum(self.num_accepted_tokens) draft_throughput = 0 accepted_throughput = 0 elapsed_time = time.monotonic() - self.last_log_time if elapsed_time > 0: draft_throughput = num_draft_tokens / elapsed_time accepted_throughput = num_accepted_tokens / elapsed_time draft_acceptance_rate = ( num_accepted_tokens / num_draft_tokens * 100 if num_draft_tokens > 0 else float("nan") ) # Conventionally, mean acceptance length includes the bonus token mean_acceptance_length = 1 + (num_accepted_tokens / num_drafts) pos_matrix = np.array(self.accepted_tokens_per_pos_lists) acceptance_rates = np.sum(pos_matrix, axis=0) / num_drafts rates_str = ", ".join(f"{p:.3f}" for p in acceptance_rates) log_fn( "SpecDecoding metrics: " "Mean acceptance length: %.2f, " "Accepted throughput: %.2f tokens/s, " "Drafted throughput: %.2f tokens/s, " "Accepted: %d tokens, " "Drafted: %d tokens, " "Per-position acceptance rate: %s, " "Avg Draft acceptance rate: %.1f%%", mean_acceptance_length, accepted_throughput, draft_throughput, num_accepted_tokens, num_draft_tokens, rates_str, draft_acceptance_rate, ) self.reset() class SpecDecodingProm: """Record spec decoding metrics in Prometheus. The acceptance rate can be calculated using a PromQL query: rate(vllm:spec_decode_num_accepted_tokens_total[$interval]) / rate(vllm:spec_decode_num_draft_tokens_total[$interval]) The mean acceptance length (conventionally including bonus tokens) can be calculated using: 1 + ( rate(vllm:spec_decode_num_accepted_tokens_total[$interval]) / rate(vllm:spec_decode_num_drafts[$interval])) A per-position acceptance rate vector can be computed using vllm:spec_decode_num_accepted_tokens_per_pos[$interval] / vllm:spec_decode_num_drafts[$interval] """ _counter_cls = prometheus_client.Counter def __init__( self, speculative_config: SpeculativeConfig | None, labelnames: list[str], per_engine_labelvalues: dict[int, list[object]], ): self.spec_decoding_enabled = speculative_config is not None if not self.spec_decoding_enabled: return counter_drafts = self._counter_cls( name="vllm:spec_decode_num_drafts", documentation="Number of spec decoding drafts.", labelnames=labelnames, ) self.counter_spec_decode_num_drafts = make_per_engine( counter_drafts, per_engine_labelvalues ) counter_draft_tokens = self._counter_cls( name="vllm:spec_decode_num_draft_tokens", documentation="Number of draft tokens.", labelnames=labelnames, ) self.counter_spec_decode_num_draft_tokens = make_per_engine( counter_draft_tokens, per_engine_labelvalues ) counter_accepted_tokens = self._counter_cls( name="vllm:spec_decode_num_accepted_tokens", documentation="Number of accepted tokens.", labelnames=labelnames, ) self.counter_spec_decode_num_accepted_tokens = make_per_engine( counter_accepted_tokens, per_engine_labelvalues ) assert speculative_config is not None num_spec_tokens = ( speculative_config.num_speculative_tokens if self.spec_decoding_enabled else 0 ) pos_labelnames = labelnames + ["position"] base_counter = self._counter_cls( name="vllm:spec_decode_num_accepted_tokens_per_pos", documentation="Accepted tokens per draft position.", labelnames=pos_labelnames, ) self.counter_spec_decode_num_accepted_tokens_per_pos: dict[ int, list[prometheus_client.Counter] ] = { idx: [base_counter.labels(*lv, str(pos)) for pos in range(num_spec_tokens)] for idx, lv in per_engine_labelvalues.items() } def observe(self, spec_decoding_stats: SpecDecodingStats, engine_idx: int = 0): if not self.spec_decoding_enabled: return self.counter_spec_decode_num_drafts[engine_idx].inc( spec_decoding_stats.num_drafts ) self.counter_spec_decode_num_draft_tokens[engine_idx].inc( spec_decoding_stats.num_draft_tokens ) self.counter_spec_decode_num_accepted_tokens[engine_idx].inc( spec_decoding_stats.num_accepted_tokens ) for pos, counter in enumerate( self.counter_spec_decode_num_accepted_tokens_per_pos[engine_idx] ): counter.inc(spec_decoding_stats.num_accepted_tokens_per_pos[pos]) def make_per_engine( counter: prometheus_client.Counter, per_engine_labelvalues: dict[int, list[object]], ): """Create a counter for each label value.""" return { idx: counter.labels(*labelvalues) for idx, labelvalues in per_engine_labelvalues.items() }
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/suffix_decoding.py
vllm/v1/spec_decode/suffix_decoding.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.config import VllmConfig from vllm.v1.worker.gpu_input_batch import InputBatch class SuffixDecodingProposer: """ Speculative decoding proposer for Suffix Decoding (https://arxiv.org/pdf/2411.04975). This class imports and uses the official implementation from Arctic Inference (https://github.com/snowflakedb/ArcticInference). """ def __init__(self, vllm_config: VllmConfig): config = vllm_config.speculative_config self.num_speculative_tokens = config.num_speculative_tokens self.max_tree_depth = config.suffix_decoding_max_tree_depth self.max_spec_factor = config.suffix_decoding_max_spec_factor self.min_token_prob = config.suffix_decoding_min_token_prob self.max_model_len = vllm_config.model_config.max_model_len # Lazy import to avoid error when Suffix Decoding is not used. from arctic_inference.suffix_decoding import SuffixDecodingCache # Initialize and empty cache. This object will take care of caching request # outputs, evicting old requests, and manages the per-prompt suffix trees. self.suffix_cache = SuffixDecodingCache( max_tree_depth=config.suffix_decoding_max_tree_depth, max_cached_requests=config.suffix_decoding_max_cached_requests, ) def propose( self, input_batch: InputBatch, sampled_token_ids: list[list[int]], ) -> list[list[int]]: """ Propose speculative tokens for each request in the input batch. Suffix Decoding will speculate a dynamic number of tokens for each request every decoding step, so each entry in the returned list may have different lengths. """ draft_token_ids: list[list[int]] = [] for i, sampled_ids in enumerate(sampled_token_ids): if not sampled_ids: # Skip speculative decoding for partial prefills. draft_token_ids.append([]) continue # Skip requests that require sampling parameters that are not # supported with speculative decoding. req_id = input_batch.req_ids[i] if req_id in input_batch.spec_decode_unsupported_reqs: draft_token_ids.append([]) continue num_tokens = input_batch.num_tokens_no_spec[i] if num_tokens >= self.max_model_len: # Skip requests that have already reached the max model length. draft_token_ids.append([]) continue index = input_batch.req_id_to_index[req_id] if req_id not in self.suffix_cache.active_requests: if req_id in self.suffix_cache.cached_requests: # Reset the suffix cache for this request. self.suffix_cache.evict_cached_response(req_id) num_prompt_tokens = input_batch.num_prompt_tokens[index] prompt_token_ids = input_batch.token_ids_cpu[index, :num_prompt_tokens] # Start a new request, this will build the suffix tree for that prompt. self.suffix_cache.start_request(req_id, prompt_token_ids) # Append the newly sampled ids to the suffix cache for this request. self.suffix_cache.add_active_response(req_id, sampled_ids) # Suffix decoding only uses the most recent tokens up to max_tree_depth, so # we extract the pattern from the end of the input. start = max(0, num_tokens - self.max_tree_depth) pattern = input_batch.token_ids_cpu[i, start:num_tokens] draft = self.suffix_cache.speculate( req_id, pattern, max_spec_tokens=min( self.num_speculative_tokens, self.max_model_len - num_tokens - 1 ), max_spec_factor=self.max_spec_factor, min_token_prob=self.min_token_prob, ) draft_token_ids.append(draft.token_ids) # Stop requests that were not seen in the input batch. for req_id in ( self.suffix_cache.active_requests - input_batch.req_id_to_index.keys() ): self.suffix_cache.stop_request(req_id) return draft_token_ids def load_model(self, *args, **kwargs): # No model to load. pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/ngram_proposer.py
vllm/v1/spec_decode/ngram_proposer.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import numpy as np from numba import get_num_threads, jit, njit, prange, set_num_threads from vllm.config import VllmConfig class NgramProposer: def __init__(self, vllm_config: VllmConfig): assert vllm_config.speculative_config is not None assert vllm_config.speculative_config.prompt_lookup_min is not None assert vllm_config.speculative_config.prompt_lookup_max is not None # Minimum length of the n-gram to match. self.min_n = vllm_config.speculative_config.prompt_lookup_min # Maximum length of the n-gram to match. self.max_n = vllm_config.speculative_config.prompt_lookup_max # Number of tokens follow the match. If there are less than k # tokens follow the match, we will return the maximum amount of # tokens until the end. self.k = vllm_config.speculative_config.num_speculative_tokens # Maximum length of the model. self.max_model_len = vllm_config.model_config.max_model_len # Pre-allocate buffers for numba batch propose. max_num_seqs = vllm_config.scheduler_config.max_num_seqs self.valid_ngram_draft = np.zeros((max_num_seqs, self.k), dtype=np.int32) self.valid_ngram_num_drafts = np.zeros((max_num_seqs), dtype=np.int32) # Threshold of total number of tokens in the batch to enable # multi-threading in numba batch propose. self.num_tokens_threshold = 8192 tp_size = vllm_config.parallel_config.tensor_parallel_size cpu_count = os.cpu_count() # Max number of threads for numba parallel processing. if cpu_count: # Divide by 2 to use physical cores # and not logical cores (hyper-threading). # Cap the number of threads to 8 to avoid using too many threads # since other components like frontend (incl tokenization) # and Structured Outputs also use multiple threads. # TODO(ekagra-ranjan): bump up the cap from 1 to 8 # when TP parallelization for ngram is implemented. self.num_numba_thread_available = min(1, (cpu_count // 2)) # Divide by tp_size to ensure each tensor parallel rank # has some threads since all ranks will run this. self.num_numba_thread_available //= tp_size else: self.num_numba_thread_available = 1 # Trigger Numba JIT compilation for N-gram proposer. # This usually takes less than 1 second. self.propose( [[]] * 1024, [""] * 1024, np.zeros(1024, dtype=np.int32), np.zeros((1024, self.max_model_len), dtype=np.int32), set(), ) def batch_propose( self, num_requests: int, valid_ngram_requests: list, num_tokens_no_spec: np.ndarray, token_ids_cpu: np.ndarray, ) -> list[list[int]]: """Batch version of ngram proposer using numba for acceleration. Args: valid_ngram_requests: Set of indices of requests that need ngram proposals. num_tokens_no_spec: Numpy array of shape (batch_size,) representing the number of tokens without speculative tokens for each request. token_ids_cpu: Numpy array of shape (batch_size, max_model_len) representing the token IDs for each request. Returns: list[list[int]]: A list where each element is a list of proposed token IDs for the corresponding request. """ draft_token_ids: list[list[int]] = [] # Only run batch propose if there are requests needing ngram proposals. # avoid calling numba function with empty list which causes error # ValueError: cannot compute fingerprint of empty list if num_ngram_requests := len(valid_ngram_requests): original_num_numba_threads = get_num_threads() # Ensure we use at least one thread. # If total tokens is small, using multiple threads # may slow down due to overhead. total_tokens = np.sum(num_tokens_no_spec) if total_tokens >= self.num_tokens_threshold: final_num_threads = max( 1, min(self.num_numba_thread_available, num_ngram_requests) ) set_num_threads(final_num_threads) else: set_num_threads(1) batch_propose_numba( valid_ngram_requests, num_tokens_no_spec, token_ids_cpu, self.min_n, self.max_n, self.max_model_len, self.k, self.valid_ngram_draft, self.valid_ngram_num_drafts, ) # Restore original number of threads. set_num_threads(original_num_numba_threads) for i in range(num_requests): if i in valid_ngram_requests and self.valid_ngram_num_drafts[i] > 0: draft_token_ids.append( self.valid_ngram_draft[i, : self.valid_ngram_num_drafts[i]].tolist() ) else: draft_token_ids.append([]) return draft_token_ids def propose( self, sampled_token_ids: list[list[int]], req_ids: list[str], num_tokens_no_spec: np.ndarray, token_ids_cpu: np.ndarray, spec_decode_unsupported_reqs: set, ) -> list[list[int]]: # find which requests need ngram proposals valid_ngram_requests = [] for i, sampled_ids in enumerate(sampled_token_ids): num_sampled_ids = len(sampled_ids) if not num_sampled_ids: # Skip speculative decoding. continue # Skip requests that require sampling parameters that are not # supported with speculative decoding. req_id = req_ids[i] if req_id in spec_decode_unsupported_reqs: continue num_tokens = num_tokens_no_spec[i] if num_tokens >= self.max_model_len: # Skip requests that have already reached the max model length. continue valid_ngram_requests.append(i) draft_token_ids = self.batch_propose( len(sampled_token_ids), valid_ngram_requests, num_tokens_no_spec, token_ids_cpu, ) return draft_token_ids def load_model(self, *args, **kwargs): # No model to load. pass @njit(parallel=True) def batch_propose_numba( valid_ngram_requests: list, num_tokens_no_spec: np.ndarray, token_ids_cpu: np.ndarray, min_n: int, max_n: int, max_model_len: int, k: int, valid_ngram_draft: np.ndarray, valid_ngram_num_drafts: np.ndarray, ): for i in prange(len(valid_ngram_requests)): idx = valid_ngram_requests[i] num_tokens = num_tokens_no_spec[idx] context_token_ids = token_ids_cpu[idx, :num_tokens] drafter_output = _find_longest_matched_ngram_and_propose_tokens( origin_tokens=context_token_ids, min_ngram=min_n, max_ngram=max_n, max_model_len=max_model_len, k=k, ) valid_ngram_num_drafts[idx] = drafter_output.shape[0] if len(drafter_output): valid_ngram_draft[idx, : drafter_output.shape[0]] = drafter_output @jit(nopython=True) def _find_longest_matched_ngram_and_propose_tokens( origin_tokens: np.ndarray, min_ngram: int, max_ngram: int, max_model_len: int, k: int, ) -> np.ndarray: """ Find the longest n-gram which matches the suffix of the given tokens whose length is within [min_ngram, max_ngram] (inclusive). If found, we will extract k right after the matched ngram. """ # Do not generate draft tokens is context is shorter than minimum n-gram total_token = origin_tokens.shape[0] if total_token < min_ngram: return np.empty((0,), dtype=origin_tokens.dtype) # Do not generate draft tokens beyond the max model length. k = min(k, max_model_len - total_token) if k <= 0: return np.empty((0,), dtype=origin_tokens.dtype) # Flip tokens, and the goal become to find longest ngram # on the rightmost position which matches the prefix with # length [min_n, max_n] (inclusive). tokens = origin_tokens[::-1] # Longest prefix (not including itself) which is a suffix of # the current position. # lps[i] = max{v, where tokens[0:v] == tokens[i+1-v:i+1]} # # As ngram is capped by max_ngram to save memory, we only need to # store lps for the first max_ngram prefix. lps = np.zeros(max_ngram, dtype=np.int32) longest_ngram = 0 position = 0 # lps[0] always equal to 0, we start with index 1 prev_lps = 0 i = 1 while i < total_token: # tokens[:prev_lps] is the longest prefix as a suffix of tokens[:i] if tokens[prev_lps] == tokens[i]: # Token match: tokens[:prev_lps+1] is the longest prefix as # a suffix of tokens[:i+1] prev_lps += 1 # Check if we found a longer valid ngram. # # Update position when longest_ngram matched prev_lps, # as we want to get the target n-gram of the earliest position # in the original tokens (i.e. # latest position in the reversed tokens) if prev_lps >= longest_ngram: longest_ngram = prev_lps position = i if i < max_ngram: # Store LPS for the first max_ngram prefix lps[i] = prev_lps if prev_lps == max_ngram: # When prev_lps reached max_ngram, update prev_lps # to lps[max_ngram-1] to avoid matching ngram # longer than max_ngram prev_lps = lps[max_ngram - 1] i += 1 elif prev_lps != 0: # Token mismatch: try the second-longest prefix # among all suffix of tokens[:i], # which is the longest prefix of tokens[:prev_lps] prev_lps = lps[prev_lps - 1] else: # Token mismatch, and no more prefix (except empty string) # as a suffix of tokens[:i] i += 1 if longest_ngram < min_ngram: # No valid ngram is found return np.empty((0,), dtype=origin_tokens.dtype) # Flip the position back, so in origin_tokens, # origin_tokens[total_token-1-position:total_token-1-position+longest_ngram] # is the matched ngram, so we should start drafting tokens from # total_token-1-position+longest_ngram start_position = total_token - 1 - position + longest_ngram k = min(k, total_token - start_position) return origin_tokens[start_position : start_position + k]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/eagle.py
vllm/v1/spec_decode/eagle.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ast from dataclasses import replace from importlib.util import find_spec import numpy as np import torch import torch.nn as nn from vllm.attention.backends.registry import AttentionBackendEnum from vllm.config import ( CompilationMode, CUDAGraphMode, VllmConfig, get_layers_from_vllm_config, ) from vllm.distributed.parallel_state import get_pp_group from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase from vllm.model_executor.model_loader import get_model from vllm.model_executor.models import supports_multimodal from vllm.model_executor.models.deepseek_v2 import DeepseekV32IndexerCache from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.platforms import current_platform from vllm.triton_utils import triton from vllm.utils.platform_utils import is_pin_memory_available from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata from vllm.v1.attention.backends.tree_attn import ( TreeAttentionMetadata, TreeAttentionMetadataBuilder, ) from vllm.v1.attention.backends.triton_attn import TritonAttentionMetadata from vllm.v1.attention.backends.utils import ( AttentionMetadataBuilder, CommonAttentionMetadata, ) from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.sampler import _SAMPLING_EPS from vllm.v1.spec_decode.metadata import SpecDecodeMetadata from vllm.v1.spec_decode.utils import ( eagle_prepare_inputs_padded_kernel, eagle_prepare_next_token_padded_kernel, ) from vllm.v1.utils import CpuGpuBuffer from vllm.v1.worker.dp_utils import coordinate_batch_across_dp from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch logger = init_logger(__name__) PADDING_SLOT_ID = -1 class EagleProposer: def __init__( self, vllm_config: VllmConfig, device: torch.device, runner=None, ): self.vllm_config = vllm_config self.speculative_config = vllm_config.speculative_config assert self.speculative_config is not None self.draft_model_config = self.speculative_config.draft_model_config self.method = self.speculative_config.method self.runner = runner self.device = device self.dtype = vllm_config.model_config.dtype self.max_model_len = vllm_config.model_config.max_model_len self.dp_rank = vllm_config.parallel_config.data_parallel_rank self.num_speculative_tokens = self.speculative_config.num_speculative_tokens self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens self.token_arange_np = np.arange(self.max_num_tokens) # We need to get the hidden size from the draft model config because # the draft model's hidden size can be different from the target model's # hidden size (e.g., Llama 3.3 70B). self.hidden_size = self.draft_model_config.get_hidden_size() self.inputs_embeds_size = self.draft_model_config.get_inputs_embeds_size() # Multi-modal data support self.mm_registry = MULTIMODAL_REGISTRY self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs( vllm_config.model_config ) self.attn_metadata_builder: AttentionMetadataBuilder | None = None self.draft_indexer_metadata_builder: AttentionMetadataBuilder | None = None self.attn_layer_names: list[str] = [] self.indexer_layer_names: list[str] = [] self.eagle3_use_aux_hidden_state: bool = ( self._get_eagle3_use_aux_hidden_state_from_config() ) self.use_cuda_graph = False self.compilation_config = self.vllm_config.compilation_config if self.compilation_config.mode == CompilationMode.VLLM_COMPILE: cudagraph_mode = self.compilation_config.cudagraph_mode if cudagraph_mode != CUDAGraphMode.NONE and not cudagraph_mode.has_mode( CUDAGraphMode.PIECEWISE ): logger.warning( "Currently the eagle proposer only supports cudagraph_mode " "PIECEWISE, if you want the drafter to use cuda graphs, " "please set compilation_config.cudagraph_mode to PIECEWISE " "or FULL_AND_PIECEWISE" ) self.use_cuda_graph = ( cudagraph_mode.has_mode(CUDAGraphMode.PIECEWISE) and not self.speculative_config.enforce_eager ) # persistent buffers for cuda graph self.input_ids = torch.zeros( self.max_num_tokens, dtype=torch.int32, device=device ) self.uses_mrope = self.vllm_config.model_config.uses_mrope if self.uses_mrope: # NOTE: `mrope_positions` is implemented with one additional dummy # position on purpose to make it non-contiguous so that it can work # with torch compile. # See detailed explanation in https://github.com/vllm-project/vllm/pull/12128#discussion_r1926431923 # NOTE: When M-RoPE is enabled, position ids are 3D regardless of # the modality of inputs. For text-only inputs, each dimension has # identical position IDs, making M-RoPE functionally equivalent to # 1D-RoPE. # See page 5 of https://arxiv.org/abs/2409.12191 self.mrope_positions = torch.zeros( (3, self.max_num_tokens + 1), dtype=torch.int64, device=device ) else: # RoPE need (max_num_tokens,) self.positions = torch.zeros( self.max_num_tokens, dtype=torch.int64, device=device ) self.hidden_states = torch.zeros( (self.max_num_tokens, self.hidden_size), dtype=self.dtype, device=device ) # We need +1 here because the arange is used to set query_start_loc, # which has one more element than batch_size. max_batch_size = vllm_config.scheduler_config.max_num_seqs max_num_slots_for_arange = max(max_batch_size + 1, self.max_num_tokens) self.arange = torch.arange( max_num_slots_for_arange, device=device, dtype=torch.int32 ) self.inputs_embeds = torch.zeros( (self.max_num_tokens, self.inputs_embeds_size), dtype=self.dtype, device=device, ) self.backup_next_token_ids = CpuGpuBuffer( max_batch_size, dtype=torch.int32, pin_memory=is_pin_memory_available(), device=device, with_numpy=True, ) # Determine allowed attention backends once during initialization. self.allowed_attn_types: tuple | None = None if current_platform.is_rocm(): rocm_types = [TritonAttentionMetadata, FlashAttentionMetadata] # ROCM_AITER_FA is an optional backend if find_spec( AttentionBackendEnum.ROCM_AITER_FA.get_path(include_classname=False) ): from vllm.v1.attention.backends.rocm_aiter_fa import ( AiterFlashAttentionMetadata, ) rocm_types.append(AiterFlashAttentionMetadata) # TRITON_MLA backend support for MLA models (e.g., DeepSeek) from vllm.v1.attention.backends.mla.common import MLACommonMetadata rocm_types.append(MLACommonMetadata) self.allowed_attn_types = tuple(rocm_types) # Parse the speculative token tree. spec_token_tree = self.speculative_config.speculative_token_tree self.tree_choices: list[tuple[int, ...]] = ast.literal_eval(spec_token_tree) tree_depth = len(self.tree_choices[-1]) # Precompute per-level properties of the tree. num_drafts_per_level = [0] * tree_depth for node in self.tree_choices: num_drafts_per_level[len(node) - 1] += 1 self.cu_drafts_per_level = [num_drafts_per_level[0]] self.child_drafts_per_level = [num_drafts_per_level[0]] for level in range(1, tree_depth): self.cu_drafts_per_level.append( self.cu_drafts_per_level[-1] + num_drafts_per_level[level] ) self.child_drafts_per_level.append( num_drafts_per_level[level] // num_drafts_per_level[level - 1] ) # Precompute draft position offsets in flattened tree. self.tree_draft_pos_offsets = torch.arange( 1, len(self.tree_choices) + 1, device=device, dtype=torch.int32, ).repeat(max_batch_size, 1) def _get_positions(self, num_tokens: int): if self.uses_mrope: return self.mrope_positions[:, :num_tokens] return self.positions[:num_tokens] def _set_positions(self, num_tokens: int, positions: torch.Tensor): if self.uses_mrope: self.mrope_positions[:, :num_tokens] = positions else: self.positions[:num_tokens] = positions def propose( self, # [num_tokens] target_token_ids: torch.Tensor, # [num_tokens] or [3, num_tokens] when M-RoPE is enabled target_positions: torch.Tensor, # [num_tokens, hidden_size] target_hidden_states: torch.Tensor, # [batch_size] next_token_ids: torch.Tensor, last_token_indices: torch.Tensor | None, common_attn_metadata: CommonAttentionMetadata, sampling_metadata: SamplingMetadata, mm_embed_inputs: tuple[list[torch.Tensor], torch.Tensor] | None = None, num_rejected_tokens_gpu: torch.Tensor | None = None, ) -> torch.Tensor: num_tokens = target_token_ids.shape[0] batch_size = next_token_ids.shape[0] if last_token_indices is None: last_token_indices = common_attn_metadata.query_start_loc[1:] - 1 if self.method == "eagle3": assert isinstance(self.model, Eagle3LlamaForCausalLM) target_hidden_states = self.model.combine_hidden_states( target_hidden_states ) assert target_hidden_states.shape[-1] == self.hidden_size # Shift the input ids by one token. # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3] self.input_ids[: num_tokens - 1] = target_token_ids[1:] # Replace the last token with the next token. # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4] self.input_ids[last_token_indices] = next_token_ids assert self.runner is not None if self.attn_metadata_builder is None: attn_metadata_builder = self._get_attention_metadata_builder() else: attn_metadata_builder = self.attn_metadata_builder attn_metadata = attn_metadata_builder.build_for_drafting( common_attn_metadata=common_attn_metadata, draft_index=0 ) # FIXME: support hybrid kv for draft model (remove separate indexer) if self.draft_indexer_metadata_builder: draft_indexer_metadata = ( self.draft_indexer_metadata_builder.build_for_drafting( common_attn_metadata=common_attn_metadata, draft_index=0, ) ) else: draft_indexer_metadata = None # At this moment, we assume all eagle layers belong to the same KV # cache group, thus using the same attention metadata. per_layer_attn_metadata = {} for layer_name in self.attn_layer_names: per_layer_attn_metadata[layer_name] = attn_metadata for layer_name in self.indexer_layer_names: assert draft_indexer_metadata is not None per_layer_attn_metadata[layer_name] = draft_indexer_metadata num_tokens_dp_padded, num_tokens_across_dp = self._pad_batch_across_dp( num_tokens_unpadded=num_tokens, num_tokens_padded=num_tokens, ) cudagraph_runtime_mode = CUDAGraphMode.NONE if ( self.use_cuda_graph and num_tokens_dp_padded <= self.compilation_config.max_cudagraph_capture_size ): num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens_dp_padded) cudagraph_runtime_mode = CUDAGraphMode.PIECEWISE else: num_input_tokens = num_tokens_dp_padded if num_tokens_across_dp is not None: num_tokens_across_dp[self.dp_rank] = num_input_tokens # copy inputs to buffer for cudagraph self._set_positions(num_tokens, target_positions) self.hidden_states[:num_tokens] = target_hidden_states if self.supports_mm_inputs: mm_embeds, is_mm_embed = mm_embed_inputs or (None, None) self.inputs_embeds[:num_tokens] = self.model.embed_input_ids( self.input_ids[:num_tokens], multimodal_embeddings=mm_embeds, is_multimodal=is_mm_embed, ) input_ids = None inputs_embeds = self.inputs_embeds[:num_input_tokens] else: input_ids = self.input_ids[:num_input_tokens] inputs_embeds = None with set_forward_context( per_layer_attn_metadata, self.vllm_config, num_tokens=num_input_tokens, num_tokens_across_dp=num_tokens_across_dp, cudagraph_runtime_mode=cudagraph_runtime_mode, ): ret_hidden_states = self.model( input_ids=input_ids, positions=self._get_positions(num_input_tokens), hidden_states=self.hidden_states[:num_input_tokens], inputs_embeds=inputs_embeds, ) if self.method == "mtp": last_hidden_states = ret_hidden_states hidden_states = last_hidden_states else: last_hidden_states, hidden_states = ret_hidden_states sample_hidden_states = last_hidden_states[last_token_indices] logits = self.model.compute_logits(sample_hidden_states) # Early exit if there is only one draft token to be generated. if self.num_speculative_tokens == 1: draft_token_ids = logits.argmax(dim=-1) return draft_token_ids.view(-1, 1) if self.uses_mrope: positions = target_positions[:, last_token_indices] else: positions = target_positions[last_token_indices] if self.method in ( "deepseek_mtp", "ernie_mtp", "longcat_flash_mtp", "pangu_ultra_moe_mtp", ): hidden_states = self.hidden_states[last_token_indices] else: hidden_states = hidden_states[last_token_indices] if isinstance(attn_metadata, TreeAttentionMetadata): # Draft using tree attention. draft_token_ids_list = self.propose_tree( batch_size=batch_size, logits=logits, positions=positions, hidden_states=hidden_states, common_attn_metadata=common_attn_metadata, ) # [batch_size, num_tree_tokens] return torch.cat(draft_token_ids_list, dim=1) draft_token_ids = logits.argmax(dim=-1) if self.allowed_attn_types is not None and not isinstance( attn_metadata, self.allowed_attn_types ): raise ValueError( f"Unsupported attention metadata type for speculative " "decoding with num_speculative_tokens > 1: " f"{type(attn_metadata)}. Supported types are: " f"{self.allowed_attn_types}" ) # Generate the remaining draft tokens. draft_token_ids_list = [draft_token_ids] batch_size_dp_padded, batch_size_across_dp = self._pad_batch_across_dp( num_tokens_unpadded=batch_size, num_tokens_padded=batch_size, ) if ( self.use_cuda_graph and batch_size_dp_padded <= self.compilation_config.max_cudagraph_capture_size ): input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size_dp_padded) cudagraph_runtime_mode = CUDAGraphMode.PIECEWISE else: input_batch_size = batch_size_dp_padded cudagraph_runtime_mode = CUDAGraphMode.NONE if batch_size_across_dp is not None: batch_size_across_dp[self.dp_rank] = input_batch_size common_attn_metadata.num_actual_tokens = batch_size common_attn_metadata.max_query_len = 1 common_attn_metadata.query_start_loc = self.arange[: batch_size + 1] common_attn_metadata.query_start_loc_cpu = torch.from_numpy( self.token_arange_np[: batch_size + 1] ).clone() # In padded drafter batch, we need to adjust the sequence lengths # to remove the "padding" (i.e. rejected tokens). # Only apply this adjustment when we have rejected tokens # (i.e., not the first proposal). if self.num_speculative_tokens > 1 and num_rejected_tokens_gpu is not None: common_attn_metadata.seq_lens -= num_rejected_tokens_gpu # Invalidate the CPU-side shadows to avoid H<>D sync. common_attn_metadata._seq_lens_cpu = None common_attn_metadata._num_computed_tokens_cpu = None for token_index in range(self.num_speculative_tokens - 1): # Update the inputs. # cast to int32 is crucial when eagle model is compiled. # tensor.argmax() returns int64 by default. input_ids = draft_token_ids_list[-1].int() if self.uses_mrope: positions += 1 # NOTE(woosuk): We should handle the case where the draft model # generates tokens beyond the max model length. # Since it is complex to remove such requests from the batch, # we keep them in the batch but adjust the position ids # and slot mappings to avoid the # out-of-range access during the model execution. # The draft tokens generated with this adjustment # should be ignored. exceeds_max_model_len = positions[0] >= self.max_model_len # Mask out the position ids that exceed the max model length. # Otherwise, we may get out-of-range error in RoPE. clamped_positions = torch.where( exceeds_max_model_len.unsqueeze(0), torch.zeros_like(positions), positions, ) else: positions += 1 exceeds_max_model_len = positions >= self.max_model_len clamped_positions = torch.where(exceeds_max_model_len, 0, positions) # For data integrity when async scheduling, we shouldn't use in place # operations in case they are modified in next step's `prepare_input` # of main model. # Increment the sequence lengths. common_attn_metadata.seq_lens += 1 # For the requests that exceed the max model length, we set the # sequence length to 1 to minimize their overheads in attention. common_attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len, 1) # Also update the CPU-side shadow; NOTE: this is hacky and should be # removed in when common_attn_metadata.seq_lens_cpu is deprecated. if common_attn_metadata._seq_lens_cpu is not None: common_attn_metadata._seq_lens_cpu += 1 if common_attn_metadata._num_computed_tokens_cpu is not None: common_attn_metadata._num_computed_tokens_cpu += 1 # Compute the slot mapping. block_size = attn_metadata_builder.kv_cache_spec.block_size if self.uses_mrope: # all dimensions of positions are the same block_numbers = clamped_positions[0] // block_size else: block_numbers = clamped_positions // block_size block_ids = common_attn_metadata.block_table_tensor.gather( dim=1, index=block_numbers.view(-1, 1) ) block_ids = block_ids.view(-1) if self.uses_mrope: common_attn_metadata.slot_mapping = ( block_ids * block_size + clamped_positions[0] % block_size ) else: common_attn_metadata.slot_mapping = ( block_ids * block_size + clamped_positions % block_size ) # Mask out the slot mappings that exceed the max model length. # Otherwise, the KV cache will be inadvertently updated with the # padding tokens. common_attn_metadata.slot_mapping.masked_fill_( exceeds_max_model_len, PADDING_SLOT_ID ) # Rebuild attention metadata attn_metadata = attn_metadata_builder.build_for_drafting( # type: ignore common_attn_metadata=common_attn_metadata, draft_index=token_index + 1 ) for layer_name in self.attn_layer_names: per_layer_attn_metadata[layer_name] = attn_metadata # copy inputs to buffer for cudagraph self.input_ids[:batch_size] = input_ids self._set_positions(batch_size, clamped_positions) self.hidden_states[:batch_size] = hidden_states if self.supports_mm_inputs: self.inputs_embeds[:batch_size] = self.model.embed_input_ids(input_ids) input_ids = None inputs_embeds = self.inputs_embeds[:input_batch_size] else: input_ids = self.input_ids[:input_batch_size] inputs_embeds = None # Run the model. with set_forward_context( per_layer_attn_metadata, self.vllm_config, num_tokens=input_batch_size, num_tokens_across_dp=batch_size_across_dp, cudagraph_runtime_mode=cudagraph_runtime_mode, ): ret_hidden_states = self.model( input_ids=input_ids, positions=self._get_positions(input_batch_size), hidden_states=self.hidden_states[:input_batch_size], inputs_embeds=inputs_embeds, ) if self.method == "mtp": last_hidden_states = ret_hidden_states hidden_states = ret_hidden_states else: last_hidden_states, hidden_states = ret_hidden_states hidden_states = hidden_states[:batch_size] logits = self.model.compute_logits(last_hidden_states[:batch_size]) draft_token_ids = logits.argmax(dim=-1) draft_token_ids_list.append(draft_token_ids) # [batch_size, num_speculative_tokens] draft_token_ids = torch.stack(draft_token_ids_list, dim=1) return draft_token_ids def prepare_next_token_ids_cpu( self, sampled_token_ids: list[list[int]], requests: dict[str, CachedRequestState], gpu_input_batch: InputBatch, num_scheduled_tokens: dict[str, int], ) -> torch.Tensor: """ This function is used to prepare the inputs for speculative decoding. It calculates the next token ids for each request based on the sampled token ids from the CPU. If a request has no sampled token ids (e.g., during the initial decoding steps), it falls back to using the request state to get the next token id. """ req_ids = gpu_input_batch.req_ids next_token_ids: list[int] = [] for i, token_ids in enumerate(sampled_token_ids): if token_ids: # Common case. next_token_id = token_ids[-1] else: # Partial prefill (rare case). # Get the next token id from the request state. req_id = req_ids[i] req_state = requests[req_id] seq_len = req_state.num_computed_tokens + num_scheduled_tokens[req_id] next_token_id = req_state.get_token_id(seq_len) next_token_ids.append(next_token_id) next_token_ids = torch.tensor( next_token_ids, dtype=torch.int32, device=self.input_ids.device ) return next_token_ids def prepare_next_token_ids_padded( self, common_attn_metadata: CommonAttentionMetadata, sampled_token_ids: torch.Tensor, requests: dict[str, CachedRequestState], gpu_input_batch: InputBatch, discard_request_mask: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: """ This function is used to prepare the inputs for speculative decoding. It calculates the next token ids and the number of valid sampled tokens for each request, considering the "discarded" requests whose next token is not sampled and comes from `request.get_token_id()` instead. This is denoted the "backup" token id. It also counts rejected tokens via `sampled_token_ids`. """ # Precompute get_token_id for when there is no valid next token num_reqs = gpu_input_batch.num_reqs self.backup_next_token_ids.np[:num_reqs] = np.array( [ requests[gpu_input_batch.req_ids[i]].get_token_id( common_attn_metadata.seq_lens_cpu[i].item() ) for i in range(num_reqs) ], dtype=np.int32, ) self.backup_next_token_ids.copy_to_gpu(num_reqs) backup_tokens_gpu = self.backup_next_token_ids.gpu batch_size, num_tokens = sampled_token_ids.shape device = sampled_token_ids.device assert discard_request_mask.dtype == torch.bool assert backup_tokens_gpu.dtype == torch.int32 next_token_ids = torch.empty((batch_size,), dtype=torch.int32, device=device) valid_sampled_tokens_count = torch.empty( (batch_size,), dtype=torch.int32, device=device ) # Kernel grid: one program per request (row) grid = (batch_size,) # Find the next power of 2 for block sizes BLOCK_SIZE_TOKENS = triton.next_power_of_2(num_tokens) eagle_prepare_next_token_padded_kernel[grid]( sampled_token_ids, discard_request_mask, backup_tokens_gpu, next_token_ids, valid_sampled_tokens_count, gpu_input_batch.vocab_size, num_tokens, batch_size, sampled_token_ids.stride(0), BLOCK_SIZE_TOKENS=BLOCK_SIZE_TOKENS, ) return next_token_ids, valid_sampled_tokens_count def prepare_inputs_padded( self, common_attn_metadata: CommonAttentionMetadata, spec_decode_metadata: SpecDecodeMetadata, valid_sampled_tokens_count: torch.Tensor, ) -> tuple[CommonAttentionMetadata, torch.Tensor, torch.Tensor]: """ This function is used to prepare the inputs for speculative decoding It updates the common_attn_metadata for speculative decoding, but does not consider the rejected tokens. Instead, all tokens are included as inputs to the speculator, with the rejected tokens used as padding and filtered out later by `token_indices_to_sample`. No blocking CPU operations should be introduced in this function. """ num_reqs = common_attn_metadata.num_reqs device = valid_sampled_tokens_count.device token_indices_to_sample = torch.empty( (num_reqs,), dtype=torch.int32, device=device ) num_rejected_tokens_gpu = torch.empty( (num_reqs,), dtype=torch.int32, device=device ) grid = (num_reqs,) eagle_prepare_inputs_padded_kernel[grid]( spec_decode_metadata.cu_num_draft_tokens, valid_sampled_tokens_count, common_attn_metadata.query_start_loc, token_indices_to_sample, num_rejected_tokens_gpu, num_reqs, ) query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu new_query_len_per_req = query_start_loc_cpu[1:] - query_start_loc_cpu[:-1] total_num_tokens = query_start_loc_cpu[-1].item() spec_common_attn_metadata = CommonAttentionMetadata( query_start_loc=common_attn_metadata.query_start_loc, seq_lens=common_attn_metadata.seq_lens, query_start_loc_cpu=query_start_loc_cpu, _seq_lens_cpu=common_attn_metadata._seq_lens_cpu, _num_computed_tokens_cpu=common_attn_metadata._num_computed_tokens_cpu, num_reqs=common_attn_metadata.num_reqs, num_actual_tokens=total_num_tokens, max_query_len=new_query_len_per_req.max().item(), max_seq_len=common_attn_metadata.seq_lens_cpu.max().item(), block_table_tensor=common_attn_metadata.block_table_tensor, slot_mapping=common_attn_metadata.slot_mapping[:total_num_tokens], causal=True, dcp_local_seq_lens=common_attn_metadata.dcp_local_seq_lens, ) return ( spec_common_attn_metadata, token_indices_to_sample, num_rejected_tokens_gpu, ) def propose_tree( self, batch_size: int, # [num_tokens, vocab_size] logits: torch.Tensor, # [num_tokens] positions: torch.Tensor, # [num_tokens, hidden_size] hidden_states: torch.Tensor, common_attn_metadata: CommonAttentionMetadata, ) -> list[torch.Tensor]: tree_attn_metadata_builder = self.runner.attn_groups[0][ 0 ].get_metadata_builder() assert isinstance(tree_attn_metadata_builder, TreeAttentionMetadataBuilder) total_num_drafts = self.cu_drafts_per_level[0] level_num_drafts = total_num_drafts # Sample a draft token for each child at the tree root level. num_children = self.child_drafts_per_level[0] if num_children == 1: draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1) else: draft_token_ids = torch.topk(logits, num_children, dim=-1).indices.view( batch_size, -1 ) draft_token_ids_list = [draft_token_ids] draft_hidden_states = hidden_states.view(batch_size, 1, -1) # Initialize empty tensors for concatenation with the level outputs. tree_input_ids = torch.empty( 0, device=self.input_ids.device, dtype=self.input_ids.dtype ) tree_positions = torch.empty( 0, device=self.positions.device, dtype=self.positions.dtype ) tree_hidden_states = torch.empty( 0, device=self.hidden_states.device, dtype=self.hidden_states.dtype ) # Precompute the draft token positions. flattened_draft_positions = ( positions.view(batch_size, -1) + self.tree_draft_pos_offsets[:batch_size, :] ) tree_depth = len(self.cu_drafts_per_level) for level in range(tree_depth - 1): # Get draft positions for RoPE. draft_positions = positions + (level + 1) exceeds_max_model_len = (positions + total_num_drafts) >= self.max_model_len # Mask out the position ids that exceed the max model length. # Otherwise, we may get out-of-range error in RoPE. draft_positions = torch.where( exceeds_max_model_len, 0, draft_positions, ).view(batch_size, -1) if level_num_drafts > 1: # Repeat the positions for each draft at this level. draft_positions = draft_positions.repeat_interleave( level_num_drafts, dim=1 ) if num_children > 1:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/metadata.py
vllm/v1/spec_decode/metadata.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import numpy as np import torch @dataclass class SpecDecodeMetadata: # [num_tokens] draft_token_ids: torch.Tensor # [batch_size] num_draft_tokens: list[int] # [batch_size] cu_num_draft_tokens: torch.Tensor # [batch_size] cu_num_sampled_tokens: torch.Tensor # [num_tokens] target_logits_indices: torch.Tensor # [batch_size] bonus_logits_indices: torch.Tensor # [num_tokens + batch_size] logits_indices: torch.Tensor def __post_init__(self): self.max_spec_len = max(self.num_draft_tokens) @classmethod def make_dummy( cls, draft_token_ids: list[list[int]], device: torch.device, ) -> "SpecDecodeMetadata": batch_size = len(draft_token_ids) num_draft_tokens = [len(ids) for ids in draft_token_ids] num_sampled_tokens = [len(ids) + 1 for ids in draft_token_ids] flattened_draft_token_ids = sum(draft_token_ids, []) num_tokens = len(flattened_draft_token_ids) draft_token_ids_tensor = torch.tensor( flattened_draft_token_ids, dtype=torch.int32, device=device ) cu_num_draft_tokens = np.cumsum(num_draft_tokens, dtype=np.int32) cu_num_draft_tokens_tensor = torch.from_numpy(cu_num_draft_tokens).to(device) cu_num_sampled_tokens = np.cumsum(num_sampled_tokens, dtype=np.int32) cu_num_sampled_tokens_tensor = torch.from_numpy(cu_num_sampled_tokens).to( device ) target_logits_indices = torch.zeros( num_tokens, dtype=torch.int32, device=device ) bonus_logits_indices = torch.zeros(batch_size, dtype=torch.int32, device=device) logits_indices = torch.zeros( num_tokens + batch_size, dtype=torch.int32, device=device ) return cls( draft_token_ids=draft_token_ids_tensor, num_draft_tokens=num_draft_tokens, cu_num_draft_tokens=cu_num_draft_tokens_tensor, cu_num_sampled_tokens=cu_num_sampled_tokens_tensor, target_logits_indices=target_logits_indices, bonus_logits_indices=bonus_logits_indices, logits_indices=logits_indices, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/utils.py
vllm/v1/spec_decode/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.sampling_params import SamplingParams from vllm.triton_utils import tl, triton _SAMPLING_EPS = 1e-5 def is_spec_decode_unsupported(sampling_params: SamplingParams) -> bool: """True if request is incompatible with speculative decoding""" return ( sampling_params.frequency_penalty != 0.0 or sampling_params.presence_penalty != 0.0 or sampling_params.repetition_penalty != 1.0 or sampling_params.min_p > _SAMPLING_EPS or sampling_params.logprobs is not None ) @triton.jit def eagle_prepare_inputs_padded_kernel( cu_num_draft_tokens_ptr, # [num_reqs] valid_sampled_tokens_count_ptr, # [num_reqs] query_start_loc_gpu_ptr, # [num_reqs + 1] token_indices_to_sample_ptr, # [num_reqs] (output) num_rejected_tokens_gpu_ptr, # [num_reqs] (output) num_reqs, # tl.int32 ): """ Fused kernel for Eagle prepare_input_padded. This kernel computes the token index to sample for each request, taking into account the number of draft tokens and the number of valid sampled tokens (which is one more than the number of accepted tokens). """ req_idx = tl.program_id(axis=0) if req_idx >= num_reqs: return # Calculate num_draft_tokens from cu_num_draft_tokens, which is an inclusive # cumulative sum (first entry is the first value, not zero). cu_draft_curr = tl.load(cu_num_draft_tokens_ptr + req_idx) num_draft_tokens = 0 if req_idx == 0: num_draft_tokens = cu_draft_curr else: cu_draft_prev = tl.load(cu_num_draft_tokens_ptr + req_idx - 1) num_draft_tokens = cu_draft_curr - cu_draft_prev valid_count = tl.load(valid_sampled_tokens_count_ptr + req_idx) num_rejected_tokens = num_draft_tokens + 1 - valid_count num_rejected_tokens = tl.where(num_draft_tokens > 0, num_rejected_tokens, 0) # query_start_loc[req_idx + 1] is the start position of the next request, # which is one past the last token of this request. q_last_tok_idx = tl.load(query_start_loc_gpu_ptr + req_idx + 1) - 1 index_to_sample = q_last_tok_idx - num_rejected_tokens tl.store(token_indices_to_sample_ptr + req_idx, index_to_sample) tl.store(num_rejected_tokens_gpu_ptr + req_idx, num_rejected_tokens) @triton.jit def eagle_prepare_next_token_padded_kernel( sampled_token_ids_ptr, # [num_reqs, num_sampled_tokens_per_req] discard_request_mask_ptr, # [num_reqs] backup_next_token_ids_ptr, # [num_reqs] next_token_ids_ptr, # [num_reqs] (output) valid_sampled_tokens_count_ptr, # [num_reqs] (output) vocab_size, # tl.int32 num_sampled_tokens_per_req, # tl.int32 (num_spec_tokens + 1) num_reqs, # tl.int32 stride_sampled_token_ids, # tl.int32 (stride for dim 0) BLOCK_SIZE_TOKENS: tl.constexpr, # Power-of-2 >= num_sampled_tokens_per_req ): """ Fused kernel for Eagle prepare_next_token_ids_padded. This kernel computes the number of valid (1 + accepted) tokens for each request, and the corresponding "next" token id to sample from during speculative decoding. This is the "last accepted token" from the sampled tokens, or the backup token if no tokens were accepted or if the request is marked as discarded. """ req_idx = tl.program_id(axis=0) if req_idx >= num_reqs: return # Check if this request is discarded. is_discarded = tl.load(discard_request_mask_ptr + req_idx) if is_discarded: backup_token = tl.load(backup_next_token_ids_ptr + req_idx) valid_count = tl.full((), 0, dtype=tl.uint32) tl.store(next_token_ids_ptr + req_idx, backup_token) tl.store(valid_sampled_tokens_count_ptr + req_idx, valid_count) else: # Count the number of valid tokens among the sampled tokens. token_offs = tl.arange(0, BLOCK_SIZE_TOKENS) token_mask = token_offs < num_sampled_tokens_per_req row_ptr = sampled_token_ids_ptr + req_idx * stride_sampled_token_ids token_ids = tl.load(row_ptr + token_offs, mask=token_mask, other=-1) # Rejected tokens are -1, valid tokens are in [0, vocab_size) is_valid_mask = (token_ids != -1) & (token_ids < vocab_size) & token_mask valid_count = tl.sum(is_valid_mask) if valid_count > 0: # Guaranteed to be well-defined since # valid_count > 0 implies is_valid_mask is not empty last_valid_index = tl.max(tl.where(is_valid_mask, token_offs, -1)) # Select the token at that index, using a sum trick since # we don't want to load again to access token_ids[last_valid_index]. last_valid_token = tl.sum( tl.where(token_offs == last_valid_index, token_ids, 0) ) tl.store(next_token_ids_ptr + req_idx, last_valid_token) else: # No valid tokens found, use backup token backup_token = tl.load(backup_next_token_ids_ptr + req_idx) tl.store(next_token_ids_ptr + req_idx, backup_token) tl.store(valid_sampled_tokens_count_ptr + req_idx, valid_count)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/__init__.py
vllm/v1/spec_decode/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/spec_decode/medusa.py
vllm/v1/spec_decode/medusa.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch import torch.nn as nn from vllm.config import VllmConfig from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.model_executor.models.interfaces import is_mixture_of_experts from vllm.v1.sample.metadata import SamplingMetadata # Initialize logger logger = init_logger(__name__) class MedusaProposer: """ Medusa proposer class for generating token sequences """ def __init__( self, vllm_config: VllmConfig, device: torch.device, ): # Save config parameters self.vllm_config = vllm_config self.device = device self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens self.hidden_size = ( vllm_config.speculative_config.draft_model_config.get_hidden_size() ) self.dtype = vllm_config.model_config.dtype def propose( self, target_hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> torch.Tensor: # Generate blocks and compute logits blocks = self.model(target_hidden_states) logits = self.model.compute_logits(blocks) # Compute argmax for each Medusa head and stack into a single tensor # Shape: [batch_size, num_heads] draft_tokens = torch.stack([logit.argmax(dim=-1) for logit in logits], dim=1) return draft_tokens def load_model(self, target_model: nn.Module) -> None: from vllm.compilation.backends import set_model_tag with set_model_tag("medusa_head"): self.model = get_model( vllm_config=self.vllm_config, model_config=self.vllm_config.speculative_config.draft_model_config, ) assert not ( is_mixture_of_experts(self.model) and self.vllm_config.parallel_config.enable_eplb ), "EPLB for Medusa is not supported" @torch.inference_mode() def dummy_run(self, num_tokens: int) -> None: hidden_states = torch.zeros( (self.max_num_tokens, self.hidden_size), dtype=self.dtype, device=self.device, ) with set_forward_context(None, self.vllm_config, num_tokens=num_tokens): self.model(hidden_states)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/ray_distributed_executor.py
vllm/v1/executor/ray_distributed_executor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.v1.executor.ray_executor import ( RayDistributedExecutor as _RayDistributedExecutor, ) # For backwards compatibility. RayDistributedExecutor = _RayDistributedExecutor
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/ray_utils.py
vllm/v1/executor/ray_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import time from collections import defaultdict from concurrent.futures import Future from typing import TYPE_CHECKING, Union import vllm.platforms from vllm.config import ParallelConfig from vllm.distributed import get_pp_group from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.sequence import IntermediateTensors from vllm.utils.network_utils import get_ip from vllm.v1.outputs import AsyncModelRunnerOutput from vllm.v1.worker.worker_base import WorkerWrapperBase if TYPE_CHECKING: from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.outputs import ModelRunnerOutput logger = init_logger(__name__) PG_WAIT_TIMEOUT = 1800 try: import ray from ray.util import placement_group_table from ray.util.placement_group import PlacementGroup try: from ray._private.state import available_resources_per_node except ImportError: # Ray 2.9.x doesn't expose `available_resources_per_node` from ray._private.state import state as _state available_resources_per_node = _state._available_resources_per_node class RayWorkerWrapper(WorkerWrapperBase): """Ray wrapper for vllm.worker.Worker, allowing Worker to be lazily initialized after Ray sets CUDA_VISIBLE_DEVICES.""" def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # Since the compiled DAG runs a main execution # in a different thread that calls cuda.set_device. # The flag indicates is set_device is called on # that thread. self.compiled_dag_cuda_device_set = False def get_node_ip(self) -> str: return get_ip() def get_node_and_gpu_ids(self) -> tuple[str, list[int]]: node_id = ray.get_runtime_context().get_node_id() device_key = vllm.platforms.current_platform.ray_device_key if not device_key: raise RuntimeError( "current platform %s does not support ray.", vllm.platforms.current_platform.device_name, ) gpu_ids = ray.get_runtime_context().get_accelerator_ids()[device_key] return node_id, gpu_ids def setup_device_if_necessary(self): # TODO(swang): This is needed right now because Ray CG executes # on a background thread, so we need to reset torch's current # device. # We can remove this API after it is fixed in compiled graph. assert self.worker is not None, "Worker is not initialized" if not self.compiled_dag_cuda_device_set: if current_platform.is_tpu(): # Not needed pass else: assert self.worker.device is not None current_platform.set_device(self.worker.device) self.compiled_dag_cuda_device_set = True def execute_model_ray( self, execute_model_input: tuple["SchedulerOutput", "GrammarOutput"] | tuple["SchedulerOutput", "GrammarOutput", "IntermediateTensors"], ) -> Union[ "ModelRunnerOutput", tuple["SchedulerOutput", "GrammarOutput", "IntermediateTensors"], ]: # This method is used by Ray Compiled Graph to execute the model, # and it needs a special logic of self.setup_device_if_necessary() self.setup_device_if_necessary() assert self.worker is not None, "Worker is not initialized" if len(execute_model_input) == 3: scheduler_output, grammar_output, intermediate_tensors = ( execute_model_input ) else: scheduler_output, grammar_output = execute_model_input intermediate_tensors = None assert self.worker.model_runner is not None output = self.worker.model_runner.execute_model( scheduler_output, intermediate_tensors ) if isinstance(output, IntermediateTensors): return scheduler_output, grammar_output, output if isinstance(output, AsyncModelRunnerOutput): output = output.get_output() if not get_pp_group().is_last_rank: # Case where there are no scheduled requests # but may still be finished requests. assert not output or not output.req_ids output = scheduler_output, grammar_output, None elif output is None: output = self.worker.model_runner.sample_tokens(grammar_output) # Ensure outputs crossing Ray compiled DAG are serializable. # AsyncModelRunnerOutput holds CUDA events and cannot be # pickled. if isinstance(output, AsyncModelRunnerOutput): output = output.get_output() return output def override_env_vars(self, vars: dict[str, str]): os.environ.update(vars) ray_import_err = None except ImportError as e: ray = None # type: ignore # only capture string to avoid variable references in the traceback that can # prevent garbage collection in some cases ray_import_err = str(e) RayWorkerWrapper = None # type: ignore class FutureWrapper(Future): """A wrapper around Ray output reference to meet the interface of .execute_model(): The top level (core busy loop) expects .result() api to block and return a single output. If aggregator is provided, the outputs from all workers are aggregated upon the result() call. If not only the first worker's output is returned. """ def __init__(self, ref_or_refs, aggregator: KVOutputAggregator | None = None): super().__init__() self.ref_or_refs = ref_or_refs self.aggregator = aggregator def result(self, timeout=None): outputs = ray.get(self.ref_or_refs, timeout=timeout) if self.aggregator is None: return outputs return self.aggregator.aggregate(outputs, output_rank=0) def ray_is_available() -> bool: """Returns True if Ray is available.""" return ray is not None def assert_ray_available(): """Raise an exception if Ray is not available.""" if ray is None: raise ValueError( f"Failed to import Ray: {ray_import_err}." "Please install Ray with `pip install ray`." ) def _verify_bundles( placement_group: "PlacementGroup", parallel_config: ParallelConfig, device_str: str ): """Verify a given placement group has bundles located in the right place. There are 2 rules. - Warn if all tensor parallel workers cannot fit in a single node. - Fail if driver node is not included in a placement group. """ assert ray.is_initialized(), ( "Ray is not initialized although distributed-executor-backend is ray." ) pg_data = placement_group_table(placement_group) # bundle_idx -> node_id bundle_to_node_ids = pg_data["bundles_to_node_id"] # bundle_idx -> bundle (e.g., {"GPU": 1}) bundles = pg_data["bundles"] # node_id -> List of bundle (e.g., {"GPU": 1}) node_id_to_bundle: dict[str, list[dict[str, float]]] = defaultdict(list) for bundle_idx, node_id in bundle_to_node_ids.items(): node_id_to_bundle[node_id].append(bundles[bundle_idx]) driver_node_id = ray.get_runtime_context().get_node_id() if driver_node_id not in node_id_to_bundle: raise RuntimeError( f"driver node id {driver_node_id} is not included in a placement " f"group {placement_group.id}. Node id -> bundles " f"{node_id_to_bundle}. " "You don't have enough GPUs available in a current node. Check " "`ray status` and `ray list nodes` to see if you have available " "GPUs in a node `{driver_node_id}` before starting an vLLM engine." ) for node_id, bundles in node_id_to_bundle.items(): if len(bundles) < parallel_config.tensor_parallel_size: logger.warning( "tensor_parallel_size=%d " "is bigger than a reserved number of %ss (%d " "%ss) in a node %s. Tensor parallel workers can be " "spread out to 2+ nodes which can degrade the performance " "unless you have fast interconnect across nodes, like " "Infiniband. To resolve this issue, make sure you have more " "than %d GPUs available at each node.", parallel_config.tensor_parallel_size, device_str, len(bundles), device_str, node_id, parallel_config.tensor_parallel_size, ) def _wait_until_pg_ready(current_placement_group: "PlacementGroup"): """Wait until a placement group is ready. It prints the informative log messages if the placement group is not created within time. """ # Wait until PG is ready - this will block until all # requested resources are available, and will time out # if they cannot be provisioned. placement_group_specs = current_placement_group.bundle_specs s = time.time() pg_ready_ref = current_placement_group.ready() wait_interval = 10 while time.time() - s < PG_WAIT_TIMEOUT: ready, _ = ray.wait([pg_ready_ref], timeout=wait_interval) if len(ready) > 0: break # Exponential backoff for warning print. wait_interval *= 2 logger.info( "Waiting for creating a placement group of specs for " "%d seconds. specs=%s. Check `ray status` and " "`ray list nodes` to see if you have enough resources," " and make sure the IP addresses used by ray cluster" " are the same as VLLM_HOST_IP environment variable" " specified in each node if you are running on a multi-node.", int(time.time() - s), placement_group_specs, ) try: ray.get(pg_ready_ref, timeout=0) except ray.exceptions.GetTimeoutError: # Provide more helpful error message when GPU count is exceeded total_gpu_required = sum(spec.get("GPU", 0) for spec in placement_group_specs) # If more than one GPU is required for the placement group, provide a # more specific error message. # We use >1 here because multi-GPU (tensor parallel) jobs are more # likely to fail due to insufficient cluster resources, and users may # need to adjust tensor_parallel_size to fit available GPUs. if total_gpu_required > 1: raise ValueError( f"Cannot provide a placement group requiring " f"{total_gpu_required} GPUs " f"(placement_group_specs={placement_group_specs}) within " f"{PG_WAIT_TIMEOUT} seconds.\n" f"Tensor parallel size may exceed available GPUs in your " f"cluster. Check resources with `ray status` and " f"`ray list nodes`.\n" f"If running on K8s with limited GPUs, consider reducing " f"--tensor-parallel-size to match available GPU resources." ) from None else: raise ValueError( "Cannot provide a placement group of " f"{placement_group_specs=} within " f"{PG_WAIT_TIMEOUT} seconds. See " "`ray status` and `ray list nodes` to make sure the cluster " "has enough resources." ) from None def _wait_until_pg_removed(current_placement_group: "PlacementGroup"): ray.util.remove_placement_group(current_placement_group) s = time.time() wait_interval = 10 while time.time() - s < PG_WAIT_TIMEOUT: pg = ray.util.get_current_placement_group() if pg is None: break # Exponential backoff for warning print. wait_interval *= 2 logger.info( "Waiting for removing a placement group of specs for %d seconds.", int(time.time() - s), ) time.sleep(wait_interval) def initialize_ray_cluster( parallel_config: ParallelConfig, ray_address: str | None = None, ): """Initialize the distributed cluster with Ray. it will connect to the Ray cluster and create a placement group for the workers, which includes the specification of the resources for each distributed worker. Args: parallel_config: The configurations for parallel execution. ray_address: The address of the Ray cluster. If None, uses the default Ray cluster address. """ assert_ray_available() from vllm.platforms import current_platform # Prevalidate GPU requirements before Ray processing if current_platform.is_cuda() and parallel_config.world_size > 1: from vllm.utils.torch_utils import cuda_device_count_stateless available_gpus = cuda_device_count_stateless() if parallel_config.world_size > available_gpus: logger.warning( "Tensor parallel size (%d) exceeds available GPUs (%d). " "This may result in Ray placement group allocation failures. " "Consider reducing tensor_parallel_size to %d or less, " "or ensure your Ray cluster has %d GPUs available.", parallel_config.world_size, available_gpus, available_gpus, parallel_config.world_size, ) if ray.is_initialized(): logger.info("Ray is already initialized. Skipping Ray initialization.") elif current_platform.is_rocm() or current_platform.is_xpu(): # Try to connect existing ray instance and create a new one if not found try: ray.init("auto") except ConnectionError: logger.warning( "No existing RAY instance detected. " "A new instance will be launched with current node resources." ) ray.init( address=ray_address, num_gpus=parallel_config.world_size, runtime_env=parallel_config.ray_runtime_env, ) else: ray.init(address=ray_address, runtime_env=parallel_config.ray_runtime_env) device_str = current_platform.ray_device_key if not device_str: raise ValueError( f"current platform {current_platform.device_name} does not support ray." ) # Create or get the placement group for worker processes if parallel_config.placement_group: current_placement_group = parallel_config.placement_group else: current_placement_group = ray.util.get_current_placement_group() if current_placement_group: logger.info("Using the existing placement group") # We are in a placement group bundles = current_placement_group.bundle_specs # Verify that we can use the placement group. device_bundles = 0 for bundle in bundles: bundle_devices = bundle.get(device_str, 0) if bundle_devices > 1: raise ValueError( f"Placement group bundle cannot have more than 1 {device_str}." ) if bundle_devices: device_bundles += 1 if parallel_config.world_size > device_bundles: raise ValueError( f"The number of required {device_str}s exceeds the total " f"number of available {device_str}s in the placement group. " f"Required number of devices: {parallel_config.world_size}. " f"Total number of devices: {device_bundles}." ) else: logger.info("No current placement group found. Creating a new placement group.") num_devices_in_cluster = ray.cluster_resources().get(device_str, 0) # Log a warning message and delay resource allocation failure response. # Avoid immediate rejection to allow user-initiated placement group # created and wait cluster to be ready if parallel_config.world_size > num_devices_in_cluster: logger.warning( "The number of required %ss exceeds the total " "number of available %ss in the placement group.", device_str, device_str, ) # Create a new placement group placement_group_specs: list[dict[str, float]] = [ {device_str: 1.0} for _ in range(parallel_config.world_size) ] # vLLM engine is also a worker to execute model with an accelerator, # so it requires to have the device in a current node. Check if # the current node has at least one device. current_ip = get_ip() current_node_id = ray.get_runtime_context().get_node_id() current_node_resource = available_resources_per_node()[current_node_id] if current_node_resource.get(device_str, 0) < 1: raise ValueError( f"Current node has no {device_str} available. " f"{current_node_resource=}. vLLM engine cannot start without " f"{device_str}. Make sure you have at least 1 {device_str} " f"available in a node {current_node_id=} {current_ip=}." ) # This way, at least bundle is required to be created in a current # node. placement_group_specs[0][f"node:{current_ip}"] = 0.001 # By default, Ray packs resources as much as possible. current_placement_group = ray.util.placement_group( placement_group_specs, strategy="PACK" ) _wait_until_pg_ready(current_placement_group) assert current_placement_group is not None _verify_bundles(current_placement_group, parallel_config, device_str) # Set the placement group in the parallel config parallel_config.placement_group = current_placement_group def get_num_tpu_nodes() -> int: from ray._private.accelerators import TPUAcceleratorManager cluster_resources = ray.cluster_resources() total_tpus = int(cluster_resources["TPU"]) tpus_per_node = TPUAcceleratorManager.get_current_node_num_accelerators() assert total_tpus % tpus_per_node == 0 return total_tpus // tpus_per_node def get_num_nodes_in_placement_group() -> int: pg_table = ray.util.placement_group_table() current_pg = ray.util.get_current_placement_group() num_nodes = 0 if current_pg: nodes_in_pg = set() for pg_key, pg in pg_table.items(): if pg_key == current_pg.id.hex(): for _, node in pg["bundles_to_node_id"].items(): nodes_in_pg.add(node) num_nodes = len(nodes_in_pg) return num_nodes
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/abstract.py
vllm/v1/executor/abstract.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from abc import ABC, abstractmethod from collections.abc import Callable from concurrent.futures import Future from functools import cached_property from typing import TYPE_CHECKING, Literal, TypeVar, overload from vllm.config import VllmConfig from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator from vllm.distributed.kv_transfer.kv_connector.v1.base import ( KVConnectorHandshakeMetadata, ) from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.tasks import SupportedTask from vllm.utils.import_utils import resolve_obj_by_qualname from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.engine import ReconfigureDistributedRequest from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput from vllm.v1.worker.worker_base import WorkerBase if TYPE_CHECKING: from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase logger = init_logger(__name__) _R = TypeVar("_R") FailureCallback = Callable[[], None] class Executor(ABC): """Abstract base class for vLLM executors." An executor is responsible for executing the model on one device, or it can be a distributed executor that can execute the model on multiple devices. """ uses_ray: bool = False # whether the executor uses Ray for orchestration. supports_pp: bool = False # whether the executor supports PP @staticmethod def get_class(vllm_config: VllmConfig) -> type["Executor"]: executor_class: type[Executor] parallel_config = vllm_config.parallel_config distributed_executor_backend = parallel_config.distributed_executor_backend # distributed_executor_backend must be set in VllmConfig.__post_init__ if isinstance(distributed_executor_backend, type): if not issubclass(distributed_executor_backend, Executor): raise TypeError( "distributed_executor_backend must be a subclass of " f"Executor. Got {distributed_executor_backend}." ) executor_class = distributed_executor_backend elif distributed_executor_backend == "ray": from vllm.v1.executor.ray_executor import RayDistributedExecutor executor_class = RayDistributedExecutor elif distributed_executor_backend == "mp": from vllm.v1.executor.multiproc_executor import MultiprocExecutor executor_class = MultiprocExecutor elif distributed_executor_backend == "uni": from vllm.v1.executor.uniproc_executor import UniProcExecutor executor_class = UniProcExecutor elif distributed_executor_backend == "external_launcher": # TODO: make v1 scheduling deterministic # to support external launcher executor_class = ExecutorWithExternalLauncher elif isinstance(distributed_executor_backend, str): executor_class = resolve_obj_by_qualname(distributed_executor_backend) if not issubclass(executor_class, Executor): raise TypeError( "distributed_executor_backend must be a subclass of " f"Executor. Got {executor_class}." ) else: raise ValueError( f"Unknown distributed executor backend: {distributed_executor_backend}" ) return executor_class def __init__( self, vllm_config: VllmConfig, ) -> None: self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config self.load_config = vllm_config.load_config self.parallel_config = vllm_config.parallel_config self.scheduler_config = vllm_config.scheduler_config self.device_config = vllm_config.device_config self.speculative_config = vllm_config.speculative_config self.observability_config = vllm_config.observability_config self._init_executor() self.is_sleeping = False self.sleeping_tags: set[str] = set() self.kv_output_aggregator: KVOutputAggregator | None = None @abstractmethod def _init_executor(self) -> None: raise NotImplementedError def initialize_from_config(self, kv_cache_configs: list[KVCacheConfig]) -> None: """ Initialize the KV caches and begin the model execution loop of the underlying workers. """ self.collective_rpc("initialize_from_config", args=(kv_cache_configs,)) self.collective_rpc("compile_or_warm_up_model") def register_failure_callback(self, callback: FailureCallback): # noqa: B027 """ Register a function to be called if the executor enters a permanent failed state. """ pass def determine_available_memory(self) -> list[int]: # in bytes return self.collective_rpc("determine_available_memory") def get_kv_cache_specs(self) -> list[dict[str, KVCacheSpec]]: return self.collective_rpc("get_kv_cache_spec") @overload def collective_rpc( self, method: str | Callable[[WorkerBase], _R], timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, non_block: Literal[False] = False, ) -> list[_R]: """ Execute an RPC call on all workers. Args: method: Name of the worker method to execute, or a callable that is serialized and sent to all workers to execute. If the method is a callable, it should accept an additional `self` argument, in addition to the arguments passed in `args` and `kwargs`. The `self` argument will be the worker object. timeout: Maximum time in seconds to wait for execution. Raises a [`TimeoutError`][] on timeout. `None` means wait indefinitely. args: Positional arguments to pass to the worker method. kwargs: Keyword arguments to pass to the worker method. non_block: If `True`, returns a list of Futures instead of waiting for the results. Returns: A list containing the results from each worker. Note: It is recommended to use this API to only pass control messages, and set up data-plane communication to pass data. """ pass @overload def collective_rpc( self, method: str | Callable[[WorkerBase], _R], timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, non_block: Literal[True] = True, ) -> Future[list[_R]]: pass @abstractmethod def collective_rpc( self, method, timeout=None, args=(), kwargs=None, non_block: bool = False ): raise NotImplementedError def get_kv_connector_handshake_metadata( self, ) -> list[dict[int, KVConnectorHandshakeMetadata]]: return self.collective_rpc("get_kv_connector_handshake_metadata") @overload def execute_model( self, scheduler_output: SchedulerOutput, non_block: Literal[False] = False ) -> ModelRunnerOutput | None: pass @overload def execute_model( self, scheduler_output: SchedulerOutput, non_block: Literal[True] = True ) -> Future[ModelRunnerOutput | None]: pass def execute_model( self, scheduler_output: SchedulerOutput, non_block: bool = False ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: output = self.collective_rpc( # type: ignore[call-overload] "execute_model", args=(scheduler_output,), non_block=non_block ) return output[0] @overload def sample_tokens( self, grammar_output: GrammarOutput | None, non_block: Literal[False] = False ) -> ModelRunnerOutput: pass @overload def sample_tokens( self, grammar_output: GrammarOutput | None, non_block: Literal[True] = True ) -> Future[ModelRunnerOutput]: pass def sample_tokens( self, grammar_output: GrammarOutput | None, non_block: bool = False ) -> ModelRunnerOutput | Future[ModelRunnerOutput]: output = self.collective_rpc( # type: ignore[call-overload] "sample_tokens", args=(grammar_output,), non_block=non_block ) return output[0] def execute_dummy_batch(self) -> None: self.collective_rpc("execute_dummy_batch") def take_draft_token_ids(self) -> DraftTokenIds | None: output: list[DraftTokenIds] = self.collective_rpc("take_draft_token_ids") return output[0] @property def max_concurrent_batches(self) -> int: return 1 def profile(self, is_start: bool = True): self.collective_rpc("profile", args=(is_start,)) def save_sharded_state( self, path: str, pattern: str | None = None, max_size: int | None = None, ) -> None: self.collective_rpc( "save_sharded_state", kwargs=dict(path=path, pattern=pattern, max_size=max_size), ) @abstractmethod def check_health(self) -> None: """Checks if the executor is healthy. If not, it should raise an exception.""" raise NotImplementedError def shutdown(self) -> None: """Shutdown the executor.""" self.collective_rpc("shutdown") def init_kv_output_aggregator(self, connector: "KVConnectorBase") -> None: """Init KVOutputAggregator""" self.kv_output_aggregator = KVOutputAggregator.from_connector( connector, self.parallel_config.world_size ) @cached_property # Avoid unnecessary RPC calls def supported_tasks(self) -> tuple[SupportedTask, ...]: output: list[tuple[SupportedTask, ...]] output = self.collective_rpc("get_supported_tasks") return output[0] def add_lora(self, lora_request: LoRARequest) -> bool: assert lora_request.lora_int_id > 0, "lora_id must be greater than 0." return all(self.collective_rpc("add_lora", args=(lora_request,))) def remove_lora(self, lora_id: int) -> bool: assert lora_id > 0, "lora_id must be greater than 0." return all(self.collective_rpc("remove_lora", args=(lora_id,))) def pin_lora(self, lora_id: int) -> bool: assert lora_id > 0, "lora_id must be greater than 0." return all(self.collective_rpc("pin_lora", args=(lora_id,))) def list_loras(self) -> set[int]: sets: list[set[int]] = self.collective_rpc("list_loras") for s in sets: assert s == sets[0], "All workers should have the same LORAs." return sets[0] def reset_mm_cache(self) -> None: """Reset the multi-modal cache in each worker.""" self.collective_rpc("reset_mm_cache") def sleep(self, level: int = 1): if self.is_sleeping: logger.warning("Executor is already sleeping.") return time_before_sleep = time.perf_counter() self.collective_rpc("sleep", kwargs=dict(level=level)) time_after_sleep = time.perf_counter() self.sleeping_tags = {"weights", "kv_cache"} self.is_sleeping = True logger.info( "It took %.6f seconds to fall asleep.", time_after_sleep - time_before_sleep ) def wake_up(self, tags: list[str] | None = None): if not self.is_sleeping: logger.warning("Executor is not sleeping.") return if tags: for tag in tags: if tag not in self.sleeping_tags: logger.warning( "Tag %s is not in sleeping tags %s", tag, self.sleeping_tags ) return time_before_wakeup = time.perf_counter() self.collective_rpc("wake_up", kwargs=dict(tags=tags)) time_after_wakeup = time.perf_counter() logger.info( "It took %.6f seconds to wake up tags %s.", time_after_wakeup - time_before_wakeup, tags if tags is not None else self.sleeping_tags, ) if tags: for tag in tags: self.sleeping_tags.remove(tag) else: self.sleeping_tags.clear() if not self.sleeping_tags: self.is_sleeping = False def reinitialize_distributed( self, reconfig_request: ReconfigureDistributedRequest ) -> None: raise NotImplementedError from vllm.v1.executor.uniproc_executor import ( # noqa: E402 ExecutorWithExternalLauncher as _ExecutorWithExternalLauncher, ) from vllm.v1.executor.uniproc_executor import ( # noqa: E402 UniProcExecutor as _UniProcExecutor, ) # For backwards compatibility. UniProcExecutor = _UniProcExecutor ExecutorWithExternalLauncher = _ExecutorWithExternalLauncher
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/multiproc_executor.py
vllm/v1/executor/multiproc_executor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import multiprocessing import os import pickle import queue import signal import threading import time import traceback import weakref from collections import deque from collections.abc import Callable, Sequence from concurrent.futures import Future, InvalidStateError from contextlib import suppress from dataclasses import dataclass from enum import Enum, auto from functools import cached_property, partial from multiprocessing.connection import Connection from multiprocessing.process import BaseProcess from multiprocessing.synchronize import Lock as LockType from threading import Thread from typing import Any, cast import cloudpickle import torch import vllm.envs as envs from vllm.config import VllmConfig from vllm.distributed import destroy_distributed_environment, destroy_model_parallel from vllm.distributed.device_communicators.shm_broadcast import Handle, MessageQueue from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator from vllm.distributed.parallel_state import ( get_dcp_group, get_dp_group, get_ep_group, get_inner_dp_world_group, get_pcp_group, get_pp_group, get_tp_group, ) from vllm.envs import enable_envs_cache from vllm.logger import init_logger from vllm.utils.network_utils import ( get_distributed_init_method, get_loopback_ip, get_open_port, ) from vllm.utils.system_utils import ( _maybe_force_spawn, decorate_logs, get_mp_context, set_process_title, ) from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.executor.abstract import Executor, FailureCallback from vllm.v1.outputs import AsyncModelRunnerOutput, DraftTokenIds, ModelRunnerOutput from vllm.v1.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) class FutureWrapper(Future): def __init__( self, futures_queue: deque[tuple["FutureWrapper", Callable]], aggregate: Callable = lambda x: x, ): self.futures_queue = futures_queue self.aggregate = aggregate super().__init__() def result(self, timeout=None): if timeout is not None: raise RuntimeError("timeout not implemented") # Drain any futures ahead of us in the queue. while not self.done(): future, get_response = self.futures_queue.pop() future.wait_for_response(get_response) return super().result() def wait_for_response(self, get_response: Callable): try: response = self.aggregate(get_response()) with suppress(InvalidStateError): self.set_result(response) except Exception as e: with suppress(InvalidStateError): self.set_exception(e) class MultiprocExecutor(Executor): supports_pp: bool = True def __init__(self, vllm_config: VllmConfig, monitor_workers: bool = True): self.monitor_workers = monitor_workers super().__init__(vllm_config) def _init_executor(self) -> None: # Call self.shutdown at exit to clean up # and ensure workers will be terminated. self._finalizer = weakref.finalize(self, self.shutdown) self.is_failed = False self.shutdown_event = threading.Event() self.failure_callback: FailureCallback | None = None self.world_size = self.parallel_config.world_size assert self.world_size % self.parallel_config.nnodes_within_dp == 0, ( f"global world_size ({self.parallel_config.world_size}) must be " f"divisible by nnodes_within_dp " f"({self.parallel_config.nnodes_within_dp}). " ) self.local_world_size = self.parallel_config.local_world_size tp_size = self.parallel_config.tensor_parallel_size pp_size = self.parallel_config.pipeline_parallel_size pcp_size = self.parallel_config.prefill_context_parallel_size assert self.world_size == tp_size * pp_size * pcp_size, ( f"world_size ({self.world_size}) must be equal to the " f"tensor_parallel_size ({tp_size}) x pipeline" f"_parallel_size ({pp_size}) x prefill_context" f"_parallel_size ({pcp_size}). " ) # Set multiprocessing envs set_multiprocessing_worker_envs() # use the loopback address get_loopback_ip() for communication. distributed_init_method = get_distributed_init_method( get_loopback_ip(), get_open_port() ) self.rpc_broadcast_mq: MessageQueue | None = None scheduler_output_handle: Handle | None = None # Initialize worker and set up message queues for SchedulerOutputs # and ModelRunnerOutputs if self.parallel_config.node_rank_within_dp == 0: # For leader node within each dp rank, # each dp will have its own leader multiproc executor. max_chunk_bytes = envs.VLLM_MQ_MAX_CHUNK_BYTES_MB * 1024 * 1024 self.rpc_broadcast_mq = MessageQueue( self.world_size, self.local_world_size, max_chunk_bytes=max_chunk_bytes, connect_ip=self.parallel_config.master_addr, ) scheduler_output_handle = self.rpc_broadcast_mq.export_handle() # Create workers context = get_mp_context() shared_worker_lock = context.Lock() unready_workers: list[UnreadyWorkerProcHandle] = [] success = False try: global_start_rank = ( self.local_world_size * self.parallel_config.node_rank_within_dp ) for local_rank in range(self.local_world_size): global_rank = global_start_rank + local_rank unready_workers.append( WorkerProc.make_worker_process( vllm_config=self.vllm_config, local_rank=local_rank, rank=global_rank, distributed_init_method=distributed_init_method, input_shm_handle=scheduler_output_handle, shared_worker_lock=shared_worker_lock, ) ) # Workers must be created before wait_for_ready to avoid # deadlock, since worker.init_device() does a device sync. # Wait for all local workers to be ready. self.workers = WorkerProc.wait_for_ready(unready_workers) # Start background thread to monitor worker health if not in headless mode. if self.monitor_workers: self.start_worker_monitor() self.response_mqs = [] # Only leader node have remote response mqs if self.parallel_config.node_rank_within_dp == 0: for rank in range(self.world_size): if rank < self.local_world_size: local_message_queue = self.workers[rank].worker_response_mq assert local_message_queue is not None self.response_mqs.append(local_message_queue) else: remote_message_queue = self.workers[0].peer_worker_response_mqs[ rank ] assert remote_message_queue is not None self.response_mqs.append(remote_message_queue) # Ensure message queues are ready. Will deadlock if re-ordered # Must be kept consistent with the WorkerProc. # Wait for all input mqs to be ready. if self.rpc_broadcast_mq is not None: self.rpc_broadcast_mq.wait_until_ready() # Wait for all remote response mqs to be ready. for response_mq in self.response_mqs: response_mq.wait_until_ready() success = True finally: if not success: # Clean up the worker procs if there was a failure. # Close death_writers first to signal workers to exit for uw in unready_workers: if uw.death_writer is not None: uw.death_writer.close() self._ensure_worker_termination([uw.proc for uw in unready_workers]) self.futures_queue = deque[tuple[FutureWrapper, Callable]]() self.output_rank = self._get_output_rank() def start_worker_monitor(self, inline=False) -> None: workers = self.workers self_ref = weakref.ref(self) # Monitors worker process liveness. If any die unexpectedly, # logs an error, shuts down the executor and invokes the failure # callback to inform the engine. def monitor_workers(): sentinels = [h.proc.sentinel for h in workers] died = multiprocessing.connection.wait(sentinels) _self = self_ref() if not _self or getattr(_self, "shutting_down", False): return _self.is_failed = True proc_name = next(h.proc.name for h in workers if h.proc.sentinel == died[0]) logger.error( "Worker proc %s died unexpectedly, shutting down executor.", proc_name ) _self.shutdown() callback = _self.failure_callback if callback is not None: _self.failure_callback = None callback() if not inline: Thread( target=monitor_workers, daemon=True, name="MultiprocWorkerMonitor" ).start() return monitor_workers() def register_failure_callback(self, callback: FailureCallback): if self.is_failed: callback() else: self.failure_callback = callback def execute_model( # type: ignore[override] self, scheduler_output: SchedulerOutput, non_block: bool = False ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: return self.collective_rpc( "execute_model", args=(scheduler_output,), unique_reply_rank=self.output_rank, non_block=non_block, timeout=envs.VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS, kv_output_aggregator=self.kv_output_aggregator, ) def sample_tokens( # type: ignore[override] self, grammar_output: GrammarOutput | None, non_block: bool = False ) -> ModelRunnerOutput | Future[ModelRunnerOutput]: return self.collective_rpc( "sample_tokens", args=(grammar_output,), unique_reply_rank=self.output_rank, non_block=non_block, timeout=envs.VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS, kv_output_aggregator=self.kv_output_aggregator, ) def execute_dummy_batch(self) -> None: self.collective_rpc("execute_dummy_batch", unique_reply_rank=self.output_rank) def take_draft_token_ids(self) -> DraftTokenIds | None: # OPTIMIZATION: Get output only from a single worker (output_rank) return self.collective_rpc( "take_draft_token_ids", unique_reply_rank=self.output_rank ) def collective_rpc( # type: ignore[override] self, method: str | Callable, timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, non_block: bool = False, unique_reply_rank: int | None = None, kv_output_aggregator: KVOutputAggregator | None = None, ) -> Any: """Returns single result if unique_reply_rank and/or kv_output_aggregator is provided, otherwise list.""" assert self.rpc_broadcast_mq is not None, ( "collective_rpc should not be called on follower node" ) if self.is_failed: raise RuntimeError("Executor failed.") deadline = None if timeout is None else time.monotonic() + timeout kwargs = kwargs or {} if kv_output_aggregator is not None: output_rank = None aggregate: Callable[[Any], Any] = partial( kv_output_aggregator.aggregate, output_rank=unique_reply_rank or 0 ) else: output_rank = unique_reply_rank aggregate = lambda x: x if isinstance(method, str): send_method = method else: send_method = cloudpickle.dumps(method, protocol=pickle.HIGHEST_PROTOCOL) self.rpc_broadcast_mq.enqueue((send_method, args, kwargs, output_rank)) response_mqs: Sequence[MessageQueue] = self.response_mqs if output_rank is not None: response_mqs = (response_mqs[output_rank],) shutdown_event = self.shutdown_event def get_response(): responses = [] for mq in response_mqs: dequeue_timeout = ( None if deadline is None else (deadline - time.monotonic()) ) try: status, result = mq.dequeue( timeout=dequeue_timeout, cancel=shutdown_event ) except TimeoutError as e: raise TimeoutError(f"RPC call to {method} timed out.") from e if status != WorkerProc.ResponseStatus.SUCCESS: raise RuntimeError( f"Worker failed with error '{result}', please check the" " stack trace above for the root cause" ) responses.append(result) return responses[0] if output_rank is not None else responses if non_block: future = FutureWrapper(self.futures_queue, aggregate=aggregate) self.futures_queue.appendleft((future, get_response)) return future # First drain any pending futures in the queue. while self.futures_queue: future, get_fut_response = self.futures_queue.pop() future.wait_for_response(get_fut_response) return aggregate(get_response()) @staticmethod def _ensure_worker_termination(worker_procs: list[BaseProcess]): """Ensure that all worker processes are terminated. Assumes workers have received termination requests. Waits for processing, then sends termination and kill signals if needed.""" def wait_for_termination(procs, timeout): if not time: # If we are in late stage shutdown, the interpreter may replace # `time` with `None`. return all(not proc.is_alive() for proc in procs) start_time = time.time() while time.time() - start_time < timeout: if all(not proc.is_alive() for proc in procs): return True time.sleep(0.1) return False # Send SIGTERM if still running active_procs = [proc for proc in worker_procs if proc.is_alive()] for p in active_procs: p.terminate() if not wait_for_termination(active_procs, 4): # Send SIGKILL if still running active_procs = [p for p in active_procs if p.is_alive()] for p in active_procs: p.kill() def shutdown(self): """Properly shut down the executor and its workers""" if not getattr(self, "shutting_down", False): self.shutting_down = True # Make sure all the worker processes are terminated first. if workers := getattr(self, "workers", None): for w in workers: # Close death_writer to signal child processes to exit if w.death_writer is not None: w.death_writer.close() w.death_writer = None w.worker_response_mq = None self._ensure_worker_termination([w.proc for w in workers]) self.shutdown_event.set() self.rpc_broadcast_mq = None def check_health(self) -> None: self.collective_rpc("check_health", timeout=10) return @cached_property def max_concurrent_batches(self) -> int: if self.scheduler_config.async_scheduling: return 2 return self.parallel_config.pipeline_parallel_size def _get_output_rank(self) -> int: # Only returns ModelRunnerOutput from TP rank=0 and PP rank=-1 # (the first TP worker of the last PP stage). # Example: # Assuming TP=8, PP=4, then the world_size=32 # 0-7, PP rank 0 # 8-15, PP rank 1 # 16-23, PP rank 2 # 24-31, PP rank 3 # so world_size - tp_size = 32 - 8 = 24 should be PP rank = -1 (i.e. 3) return ( self.world_size - self.parallel_config.tensor_parallel_size * self.parallel_config.prefill_context_parallel_size ) @dataclass class UnreadyWorkerProcHandle: """WorkerProcess handle before READY.""" proc: BaseProcess rank: int ready_pipe: Connection death_writer: Connection | None = None @dataclass class WorkerProcHandle: proc: BaseProcess rank: int # The worker process writes to this MQ in single-node mode worker_response_mq: MessageQueue | None # This is only non empty on driver node, # the peer worker process i writes to MQ # `peer_worker_response_mqs[i]` peer_worker_response_mqs: list[MessageQueue | None] death_writer: Connection | None = None @classmethod def from_unready_handle( cls, unready_handle: UnreadyWorkerProcHandle, worker_response_mq: MessageQueue | None, peer_worker_response_mqs: list[MessageQueue | None], ) -> "WorkerProcHandle": return cls( proc=unready_handle.proc, rank=unready_handle.rank, worker_response_mq=worker_response_mq, peer_worker_response_mqs=peer_worker_response_mqs, death_writer=unready_handle.death_writer, ) class WorkerProc: """Wrapper that runs one Worker in a separate process.""" READY_STR = "READY" rpc_broadcast_mq: MessageQueue | None worker_response_mq: MessageQueue | None def _init_message_queues( self, input_shm_handle: Handle, vllm_config: VllmConfig ) -> None: if vllm_config.parallel_config.nnodes_within_dp == 1: # Initialize MessageQueue for receiving SchedulerOutput self.rpc_broadcast_mq = MessageQueue.create_from_handle( input_shm_handle, self.worker.rank ) # Initializes a message queue for sending the model output self.worker_response_mq = MessageQueue(1, 1) self.peer_response_handles = [] else: # Initialize remote MessageQueue for receiving SchedulerOutput across nodes self.rpc_broadcast_mq = get_inner_dp_world_group().create_mq_broadcaster( external_writer_handle=input_shm_handle, # Since there is external_writer_handle from executor proc, # where the ready signal from actual writer is sent out of the # create_mq_broadcaster method and after this setup, we make it # non blocking. The handshake will be triggered when # worker.rpc_broadcast_mq.wait_until_ready() is called blocking=False, ) # Initializes remote message queue for sending the model output to the # driver worker, exposing peer_response_handles for driver worker # that include handles for all ranks self.worker_response_mq, self.peer_response_handles = ( get_inner_dp_world_group().create_single_reader_mq_broadcasters( reader_rank_in_group=0 ) ) def __init__( self, vllm_config: VllmConfig, local_rank: int, rank: int, distributed_init_method: str, input_shm_handle: Handle, shared_worker_lock: LockType, ): self.rank = rank wrapper = WorkerWrapperBase( vllm_config=vllm_config, rpc_rank=local_rank, global_rank=rank ) # TODO: move `init_worker` to executor level as a collective rpc call all_kwargs: list[dict] = [ {} for _ in range(vllm_config.parallel_config.world_size) ] is_driver_worker = rank % vllm_config.parallel_config.tensor_parallel_size == 0 all_kwargs[local_rank] = { "vllm_config": vllm_config, "local_rank": local_rank, "rank": rank, "distributed_init_method": distributed_init_method, "is_driver_worker": is_driver_worker, "shared_worker_lock": shared_worker_lock, } wrapper.init_worker(all_kwargs) self.worker = wrapper scheduler_config = vllm_config.scheduler_config self.use_async_scheduling = scheduler_config.async_scheduling if self.use_async_scheduling: self.async_output_queue: queue.Queue = queue.Queue() self.async_output_copy_thread = Thread( target=self.async_output_busy_loop, daemon=True, name="WorkerAsyncOutputCopy", ) self.async_output_copy_thread.start() # Initialize device self.worker.init_device() # Set process title and log prefix self.setup_proc_title_and_log_prefix( enable_ep=vllm_config.parallel_config.enable_expert_parallel ) # Load model self._init_message_queues(input_shm_handle, vllm_config) self.worker.load_model() # Enable environment variable cache (e.g. assume no more # environment variable overrides after this point) enable_envs_cache() @staticmethod def make_worker_process( vllm_config: VllmConfig, local_rank: int, rank: int, distributed_init_method: str, input_shm_handle, # Receive SchedulerOutput shared_worker_lock: LockType, ) -> UnreadyWorkerProcHandle: context = get_mp_context() # (reader, writer) reader, writer = context.Pipe(duplex=False) # Create death pipe to detect parent process exit death_reader, death_writer = context.Pipe(duplex=False) process_kwargs = { "vllm_config": vllm_config, "local_rank": local_rank, "rank": rank, "distributed_init_method": distributed_init_method, "input_shm_handle": input_shm_handle, "ready_pipe": (reader, writer), "death_pipe": death_reader, "shared_worker_lock": shared_worker_lock, } # Run EngineCore busy loop in background process. proc = context.Process( target=WorkerProc.worker_main, kwargs=process_kwargs, name=f"VllmWorker-{rank}", daemon=True, ) proc.start() writer.close() # Keep death_writer open in parent - when parent exits, # death_reader in child will get EOFError return UnreadyWorkerProcHandle(proc, rank, reader, death_writer) @staticmethod def wait_for_response_handle_ready( handles: dict[str, Any], proc_handle: UnreadyWorkerProcHandle ) -> WorkerProcHandle: response_handle = handles["handle"] worker_response_mq: MessageQueue | None = None if len(response_handle.local_reader_ranks) > 0: worker_response_mq = MessageQueue.create_from_handle(response_handle, 0) peer_response_handles = handles["peer_response_handles"] peer_worker_response_mqs = [ MessageQueue.create_from_handle(handle, -1) if handle.remote_subscribe_addr is not None else None for handle in peer_response_handles ] return WorkerProcHandle.from_unready_handle( proc_handle, worker_response_mq, peer_worker_response_mqs=peer_worker_response_mqs, ) @staticmethod def wait_for_ready( unready_proc_handles: list[UnreadyWorkerProcHandle], ) -> list[WorkerProcHandle]: e = Exception( "WorkerProc initialization failed due to " "an exception in a background process. " "See stack trace for root cause." ) pipes = {handle.ready_pipe: handle for handle in unready_proc_handles} ready_proc_handles: list[WorkerProcHandle | None] = [None] * len( unready_proc_handles ) while pipes: ready = multiprocessing.connection.wait(pipes.keys()) for pipe in ready: assert isinstance(pipe, Connection) try: # Wait until the WorkerProc is ready. unready_proc_handle = pipes.pop(pipe) response: dict[str, Any] = pipe.recv() if response["status"] != "READY": raise e idx = unready_proc_handle.rank % len(ready_proc_handles) ready_proc_handles[idx] = WorkerProc.wait_for_response_handle_ready( response, unready_proc_handle ) except EOFError: e.__suppress_context__ = True raise e from None finally: # Close connection. pipe.close() return cast(list[WorkerProcHandle], ready_proc_handles) def shutdown(self): self.worker.shutdown() self.rpc_broadcast_mq = None self.worker_response_mq = None destroy_model_parallel() destroy_distributed_environment() @staticmethod def worker_main(*args, **kwargs): """Worker initialization and execution loops. This runs a background process""" # Signal handler used for graceful termination. # SystemExit exception is only raised once to allow this and worker # processes to terminate without error shutdown_requested = False def signal_handler(signum, frame): nonlocal shutdown_requested if not shutdown_requested: shutdown_requested = True raise SystemExit() # Either SIGTERM or SIGINT will terminate the worker signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) worker = None # tuple[Connection, Connection] reader, ready_writer = kwargs.pop("ready_pipe") death_pipe: Connection | None = kwargs.pop("death_pipe", None) shutdown_event = threading.Event() # Start death monitoring thread if death_pipe is provided if death_pipe is not None: def monitor_parent_death(): try: # This will block until parent process exits (pipe closes) death_pipe.recv() except EOFError: # Parent process has exited, terminate this worker logger.info_once("Parent process exited, terminating worker") # Send signal to self to trigger clean shutdown shutdown_event.set() except Exception as e: logger.warning("Death monitoring error: %s", e) death_monitor = Thread( target=monitor_parent_death, daemon=True, name="WorkerDeathMonitor" ) death_monitor.start() try: reader.close() worker = WorkerProc(*args, **kwargs) assert worker.worker_response_mq is not None # Send READY once we know everything is loaded ready_writer.send( { "status": WorkerProc.READY_STR, "handle": worker.worker_response_mq.export_handle(), "peer_response_handles": worker.peer_response_handles, } ) # Ensure message queues are ready. Will deadlock if re-ordered. # Must be kept consistent with the Executor if worker.rpc_broadcast_mq is not None: worker.rpc_broadcast_mq.wait_until_ready() worker.worker_response_mq.wait_until_ready() ready_writer.close() ready_writer = None worker.worker_busy_loop(cancel=shutdown_event) except Exception: # NOTE: if an Exception arises in busy_loop, we send # a FAILURE message over the MQ RPC to notify the Executor, # which triggers system shutdown. # TODO(rob): handle case where the MQ itself breaks. if ready_writer is not None: logger.exception("WorkerProc failed to start.") elif shutdown_event.is_set(): logger.info("WorkerProc shutting down.") else: logger.exception("WorkerProc failed.") # The parent sends a SIGTERM to all worker processes if # any worker dies. Set this value so we don't re-throw # SystemExit() to avoid zmq exceptions in __del__. shutdown_requested = True finally: if ready_writer is not None: ready_writer.close() if death_pipe is not None: death_pipe.close() # Clean up once worker exits busy loop if worker is not None: worker.shutdown() class ResponseStatus(Enum): SUCCESS = auto() FAILURE = auto() def enqueue_output(self, output: Any): """Prepares output from the worker and enqueues it to the worker_response_mq. If the output is an Exception, it is converted to a FAILURE response. """ if isinstance(output, AsyncModelRunnerOutput): output = output.get_output() if isinstance(output, Exception): result = (WorkerProc.ResponseStatus.FAILURE, str(output)) else: result = (WorkerProc.ResponseStatus.SUCCESS, output) if (response_mq := self.worker_response_mq) is not None: response_mq.enqueue(result) def handle_output(self, output: Any): """Handles output from the worker. If async scheduling is enabled, it is passed to the async_output_busy_loop thread. Otherwise, it is enqueued directly to the worker_response_mq. """ if self.use_async_scheduling: self.async_output_queue.put(output) else: self.enqueue_output(output) def async_output_busy_loop(self): """Entrypoint for the thread which handles outputs asynchronously.""" while True: output = self.async_output_queue.get() self.enqueue_output(output) def worker_busy_loop(self, cancel: threading.Event | None = None): """Main busy loop for Multiprocessing Workers""" assert self.rpc_broadcast_mq is not None while True: method, args, kwargs, output_rank = self.rpc_broadcast_mq.dequeue( cancel=cancel, indefinite=True ) try: if isinstance(method, str): func = getattr(self.worker, method) elif isinstance(method, bytes): func = partial(cloudpickle.loads(method), self.worker) output = func(*args, **kwargs) except Exception as e: # Notes have been introduced in python 3.11 if hasattr(e, "add_note"): e.add_note(traceback.format_exc()) logger.exception("WorkerProc hit an exception.") # exception might not be serializable, so we convert it to # string, only for logging purpose. if output_rank is None or self.rank == output_rank: self.handle_output(e) continue if output_rank is None or self.rank == output_rank: self.handle_output(output) @staticmethod def setup_proc_title_and_log_prefix(enable_ep: bool) -> None: dp_size = get_dp_group().world_size dp_rank = get_dp_group().rank_in_group pp_size = get_pp_group().world_size pp_rank = get_pp_group().rank_in_group
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/ray_executor.py
vllm/v1/executor/ray_executor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os from collections import defaultdict from collections.abc import Callable from concurrent.futures import Future from dataclasses import dataclass from typing import TYPE_CHECKING, Any import cloudpickle import vllm.envs as envs from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.ray.ray_env import get_env_vars_to_copy from vllm.utils.network_utils import ( get_distributed_init_method, get_ip, get_open_port, ) from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType from vllm.v1.executor.abstract import Executor from vllm.v1.executor.ray_utils import ( FutureWrapper, RayWorkerWrapper, initialize_ray_cluster, ray, ) from vllm.v1.outputs import ModelRunnerOutput if ray is not None: from ray.actor import ActorHandle from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy else: ActorHandle = None if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup logger = init_logger(__name__) COMPLETED_NONE_FUTURE: Future[ModelRunnerOutput | None] = Future() COMPLETED_NONE_FUTURE.set_result(None) @dataclass class RayWorkerMetaData: """ Metadata for a Ray worker. The order of ray worker creation can be random, and we need to reset the rank after creating all workers. """ worker: ActorHandle created_rank: int adjusted_rank: int = -1 ip: str = "" class RayDistributedExecutor(Executor): """Ray-based distributed executor""" # These env vars are worker-specific, therefore are NOT copied # from the driver to the workers WORKER_SPECIFIC_ENV_VARS = { "VLLM_HOST_IP", "VLLM_HOST_PORT", "LOCAL_RANK", "CUDA_VISIBLE_DEVICES", } # These non-vLLM env vars are copied from the driver to workers ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN"} uses_ray: bool = True supports_pp: bool = True def _init_executor(self) -> None: self.forward_dag: ray.dag.CompiledDAG | None = None # For TPU or XPU, avoid compiling NVIDIA's NCCL if current_platform.is_tpu() or current_platform.is_xpu(): os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = "shm" assert self.uses_ray initialize_ray_cluster(self.parallel_config) placement_group = self.parallel_config.placement_group # Disable Ray usage stats collection. ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0") if ray_usage != "1": os.environ["RAY_USAGE_STATS_ENABLED"] = "0" # Create the parallel GPU workers. self._init_workers_ray(placement_group) # KV connector setup self.has_connector = self.vllm_config.kv_transfer_config is not None self.uses_sampler = self.vllm_config.model_config.runner_type != "pooling" and ( self.vllm_config.ec_transfer_config is None or not self.vllm_config.ec_transfer_config.is_ec_producer ) self.scheduler_output: SchedulerOutput | None = None @property def max_concurrent_batches(self) -> int: """Ray distributed executor supports pipeline parallelism, meaning that it allows PP size batches to be executed concurrently. """ if self.scheduler_config.async_scheduling: return 2 return self.parallel_config.pipeline_parallel_size def shutdown(self) -> None: if logger: # Somehow logger can be None here. logger.info( "Shutting down Ray distributed executor. If you see error log " "from logging.cc regarding SIGTERM received, please ignore " "because this is the expected termination process in Ray." ) if hasattr(self, "forward_dag") and self.forward_dag is not None: self.forward_dag.teardown() import ray for worker in self.workers: ray.kill(worker) self.forward_dag = None def _configure_ray_workers_use_nsight(self, ray_remote_kwargs) -> dict[str, Any]: # If nsight profiling is enabled, we need to set the profiling # configuration for the ray workers as runtime env. runtime_env = ray_remote_kwargs.setdefault("runtime_env", {}) runtime_env.update( { "nsight": { "t": "cuda,cudnn,cublas", "o": "'worker_process_%p'", "cuda-graph-trace": "node", } } ) return ray_remote_kwargs # child class could overwrite this to return actual env vars. def _get_env_vars_to_be_updated(self): return self._env_vars_for_all_workers def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs): num_gpus = envs.VLLM_RAY_PER_WORKER_GPUS # The driver dummy worker does not actually use any resources. # It holds the resource for the driver worker. self.driver_dummy_worker: RayWorkerWrapper | None = None # The remaining workers are the actual ray actors. self.workers: list[RayWorkerWrapper] = [] # Used in ray compiled DAG: indexed first by PP rank, # and then TP rank. In other words, the inner list is # the TP group of workers for a PP rank. self.pp_tp_workers: list[list[RayWorkerWrapper]] = [] if self.parallel_config.ray_workers_use_nsight: ray_remote_kwargs = self._configure_ray_workers_use_nsight( ray_remote_kwargs ) # Create the workers. bundle_indices: list[int] if envs.VLLM_RAY_BUNDLE_INDICES: # Use the bundle indices specified by the user. bundle_indices = list(map(int, envs.VLLM_RAY_BUNDLE_INDICES.split(","))) assert len(bundle_indices) == self.parallel_config.world_size, ( "VLLM_RAY_BUNDLE_INDICES must have the same size" f" as the world size, but got {bundle_indices=} " f"and {self.parallel_config.world_size=}" ) assert len(set(bundle_indices)) == len(bundle_indices), ( "VLLM_RAY_BUNDLE_INDICES cannot have duplicate values," f" but got {bundle_indices=}" ) else: # use the first N bundles that have GPU resources. bundle_indices = [] for bundle_id, bundle in enumerate(placement_group.bundle_specs): if bundle.get(current_platform.ray_device_key, 0): bundle_indices.append(bundle_id) bundle_indices = bundle_indices[: self.parallel_config.world_size] worker_metadata: list[RayWorkerMetaData] = [] driver_ip = get_ip() for rank, bundle_id in enumerate(bundle_indices): scheduling_strategy = PlacementGroupSchedulingStrategy( placement_group=placement_group, placement_group_capture_child_tasks=True, placement_group_bundle_index=bundle_id, ) if current_platform.ray_device_key == "GPU": # NV+AMD GPUs, and Intel XPUs worker = ray.remote( num_cpus=0, num_gpus=num_gpus, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, )(RayWorkerWrapper).remote( # type: ignore[attr-defined] vllm_config=self.vllm_config, rpc_rank=rank ) else: worker = ray.remote( num_cpus=0, num_gpus=0, resources={current_platform.ray_device_key: num_gpus}, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, )(RayWorkerWrapper).remote( # type: ignore[attr-defined] vllm_config=self.vllm_config, rpc_rank=rank ) worker_metadata.append(RayWorkerMetaData(worker=worker, created_rank=rank)) worker_ips = ray.get( [ each.worker.get_node_ip.remote() # type: ignore[attr-defined] for each in worker_metadata ] ) for each, ip in zip(worker_metadata, worker_ips): each.ip = ip logger.debug("workers: %s", worker_metadata) logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker) ip_counts: dict[str, int] = {} for ip in worker_ips: ip_counts[ip] = ip_counts.get(ip, 0) + 1 def sort_by_driver_then_worker_ip(item: RayWorkerMetaData): """ Sort the workers based on 3 properties: 1. If the worker is on the same node as the driver (vllm engine), it should be placed first. 2. Then, if the worker is on a node with fewer workers, it should be placed first. 3. Finally, if the work is on a node with smaller IP address, it should be placed first. """ ip = item.ip return 0 if ip == driver_ip else 1, ip_counts[ip], ip # After sorting, the workers on the same node will be # close to each other, and the workers on the driver # node will be placed first. sorted_worker_metadata = sorted( worker_metadata, key=sort_by_driver_then_worker_ip ) for i, item in enumerate(sorted_worker_metadata): item.adjusted_rank = i self.workers = [item.worker for item in sorted_worker_metadata] rerank_mapping = { item.created_rank: item.adjusted_rank for item in sorted_worker_metadata } self.collective_rpc("adjust_rank", args=(rerank_mapping,)) # Get the set of GPU IDs used on each node. worker_node_and_gpu_ids = [] for worker in [self.driver_dummy_worker] + self.workers: if worker is None: # driver_dummy_worker can be None when using ray spmd worker. continue worker_node_and_gpu_ids.append( ray.get(worker.get_node_and_gpu_ids.remote()) ) # type: ignore[attr-defined] node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids): node_workers[node_id].append(i) # `gpu_ids` can be a list of strings or integers. # convert them to integers for consistency. # NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs), # string sorting is not sufficient. # see https://github.com/vllm-project/vllm/issues/5590 gpu_ids = [int(x) for x in gpu_ids] node_gpus[node_id].extend(gpu_ids) for node_id, gpu_ids in node_gpus.items(): node_gpus[node_id] = sorted(gpu_ids) all_ips = set(worker_ips + [driver_ip]) n_ips = len(all_ips) n_nodes = len(node_workers) if n_nodes != n_ips: raise RuntimeError( f"Every node should have a unique IP address. Got {n_nodes}" f" nodes with node ids {list(node_workers.keys())} and " f"{n_ips} unique IP addresses {all_ips}. Please check your" " network configuration. If you set `VLLM_HOST_IP`" " environment variable, make sure it is unique for" " each node." ) # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [ { current_platform.device_control_env_var: ",".join( map(str, node_gpus[node_id]) ), } for (node_id, _) in worker_node_and_gpu_ids ] # Environment variables to copy from driver to workers env_vars_to_copy = get_env_vars_to_copy( exclude_vars=self.WORKER_SPECIFIC_ENV_VARS, additional_vars=set(current_platform.additional_env_vars).union( self.ADDITIONAL_ENV_VARS ), destination="workers", ) # Copy existing env vars to each worker's args for args in all_args_to_update_environment_variables: # TODO: refactor platform-specific env vars for name in env_vars_to_copy: if name in os.environ: args[name] = os.environ[name] self._env_vars_for_all_workers = all_args_to_update_environment_variables self.collective_rpc( "update_environment_variables", args=(self._get_env_vars_to_be_updated(),) ) if len(node_gpus) == 1: # in single node case, we don't need to get the IP address. # the loopback address is sufficient # NOTE: a node may have several IP addresses, one for each # network interface. `get_ip()` might return any of them, # while they might not work for communication inside the node # if the network setup is complicated. Using the loopback address # solves this issue, as it always works for communication inside # the node. driver_ip = "127.0.0.1" distributed_init_method = get_distributed_init_method( driver_ip, get_open_port() ) # Initialize the actual workers inside worker wrapper. all_kwargs = [] for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids): local_rank = node_workers[node_id].index(rank) kwargs = dict( vllm_config=self.vllm_config, local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method, is_driver_worker=(not self.parallel_config) or (rank % self.parallel_config.tensor_parallel_size == 0), ) all_kwargs.append(kwargs) self.collective_rpc("init_worker", args=(all_kwargs,)) self.collective_rpc("init_device") self.collective_rpc("load_model") for pp_rank in range(self.parallel_config.pipeline_parallel_size): self.pp_tp_workers.append([]) for tp_rank in range(self.parallel_config.tensor_parallel_size): # PP=2, TP=4 # pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]] rank = (pp_rank * self.parallel_config.tensor_parallel_size) + tp_rank assert len(self.pp_tp_workers[pp_rank]) == tp_rank assert pp_rank < len(self.pp_tp_workers) self.pp_tp_workers[pp_rank].append(self.workers[rank]) def reinitialize_distributed( self, reconfig_request: ReconfigureDistributedRequest ) -> None: self.collective_rpc("reinitialize_distributed", args=(reconfig_request,)) if ( reconfig_request.new_data_parallel_rank == ReconfigureRankType.SHUTDOWN_CURRENT_RANK ): self.shutdown() def execute_model( # type: ignore[override] self, scheduler_output: SchedulerOutput, non_block: bool = False, ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: if self.scheduler_output is not None: raise RuntimeError( "State error: sample_tokens() must be called " "after execute_model() returns None." ) if not self.uses_sampler or not scheduler_output.total_num_scheduled_tokens: # Model will not execute, call model runner immediately. return self._execute_dag(scheduler_output, None, non_block) # Model will execute, defer to sample_tokens() call. self.scheduler_output = scheduler_output return COMPLETED_NONE_FUTURE if non_block else None def sample_tokens( # type: ignore[override] self, grammar_output: "GrammarOutput | None", non_block: bool = False, ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: """Execute the model on the Ray workers. The scheduler output to use should have been provided in a prior call to execute_model(). Args: grammar_output: The structured outputs grammar bitmask, if applicable. non_block: If True, the method will return a Future. Returns: The model runner output. """ scheduler_output = self.scheduler_output if scheduler_output is None: return COMPLETED_NONE_FUTURE if non_block else None self.scheduler_output = None return self._execute_dag(scheduler_output, grammar_output, non_block) def _execute_dag( self, scheduler_output: SchedulerOutput, grammar_output: "GrammarOutput | None", non_block: bool = False, ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: # Build the compiled DAG for the first time. if self.forward_dag is None: # type: ignore self.forward_dag = self._compiled_ray_dag(enable_asyncio=False) refs = self.forward_dag.execute((scheduler_output, grammar_output)) # type: ignore if not self.has_connector: # Get output only from a single worker (output_rank) # When PP is not used, we block here until the result is available. if not non_block: return refs[0].get() # When PP is used, we return a FutureWrapper immediately so that # the scheduler can yield to the next batch. return FutureWrapper(refs[0]) # Get output from all workers when connector is present assert self.kv_output_aggregator is not None if not non_block: # Block and get results from all workers return self.kv_output_aggregator.aggregate(ray.get(refs)) # Return a future that will aggregate outputs from all workers return FutureWrapper(refs, self.kv_output_aggregator) def collective_rpc( # type: ignore[override] self, method: str | Callable, timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, non_block: bool = False, ) -> list[Any] | Future[list[Any]]: """Runs the given method on all workers.""" sent_method = method if isinstance(method, str) else cloudpickle.dumps(method) del method if kwargs is None: kwargs = {} ray_worker_outputs = [ worker.execute_method.remote( # type: ignore[attr-defined] sent_method, *args, **kwargs ) for worker in self.workers ] # Get the results of the ray workers. if non_block: return FutureWrapper(ray_worker_outputs) return ray.get(ray_worker_outputs, timeout=timeout) def _check_ray_cgraph_installation(self): import importlib.metadata from packaging import version required_version = version.parse("2.43.0") current_version = version.parse(importlib.metadata.version("ray")) if current_version < required_version: raise ValueError( f"Ray version {required_version} is " f"required, but found {current_version}" ) import importlib.util cgraph_spec = importlib.util.find_spec("ray.experimental.compiled_dag_ref") if cgraph_spec is None: raise ValueError( "Ray Compiled Graph is not installed. " "Run `pip install ray[cgraph]` to install it." ) cupy_spec = importlib.util.find_spec("cupy") if cupy_spec is None and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE == "nccl": raise ValueError( "cupy is not installed but required since " "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE is set to 'nccl'. " "Run `pip install ray[cgraph]` and check cupy installation." ) def _compiled_ray_dag(self, enable_asyncio: bool): assert self.parallel_config.use_ray self._check_ray_cgraph_installation() # Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds # (it is 10 seconds by default). This is a Ray environment variable to # control the timeout of getting result from a compiled graph execution, # i.e., the distributed execution that includes model forward runs and # intermediate tensor communications, in the case of vllm. # Note: we should set this env var before importing # ray.dag, otherwise it will not take effect. os.environ.setdefault("RAY_CGRAPH_get_timeout", "300") # noqa: SIM112 from ray.dag import InputNode, MultiOutputNode logger.info( "RAY_CGRAPH_get_timeout is set to %s", os.environ["RAY_CGRAPH_get_timeout"], # noqa: SIM112 ) logger.info( "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s", envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE, ) logger.info( "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s", envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM, ) channel_type = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE if channel_type not in ("auto", "nccl", "shm"): raise ValueError( "Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: " f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'." ) with InputNode() as input_data: # Example DAG: PP=2, TP=4 # # SchedulerOutput -> 0 -> (SchedulerOutput, IntermediateTensors) -> 4 -> ModelRunnerOutput # noqa: E501 # SchedulerOutput -> 1 -> (SchedulerOutput, IntermediateTensors) -> 5 -> ModelRunnerOutput # noqa: E501 # SchedulerOutput -> 2 -> (SchedulerOutput, IntermediateTensors) -> 6 -> ModelRunnerOutput # noqa: E501 # SchedulerOutput -> 3 -> (SchedulerOutput, IntermediateTensors) -> 7 -> ModelRunnerOutput # noqa: E501 # All workers in the first TP group will take in the # ExecuteModelRequest as input. outputs = [input_data for _ in self.pp_tp_workers[0]] for pp_rank, tp_group in enumerate(self.pp_tp_workers): # Each PP worker takes in the output of the previous PP worker, # and the TP group executes in SPMD fashion. outputs = [ worker.execute_model_ray.bind(outputs[i]) # type: ignore[attr-defined] for i, worker in enumerate(tp_group) ] last_pp_rank = len(self.pp_tp_workers) - 1 if ( pp_rank < last_pp_rank and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE != "shm" ): # Specify how intermediate tensors should be passed # between pp stages, no need to specify for the last # pp stage or when using shared memory (the default). transport = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE outputs = [ output.with_tensor_transport(transport=transport) for output in outputs ] forward_dag = MultiOutputNode(outputs) if envs.VLLM_USE_RAY_WRAPPED_PP_COMM: from ray.experimental.channel.accelerator_context import ( register_accelerator_context, ) from vllm.distributed.device_communicators.ray_communicator import ( RayPPCommunicator, ) register_accelerator_context( torch_module_name="cuda", communicator_cls=RayPPCommunicator ) logger.info( "Using RayPPCommunicator " "(which wraps vLLM _PP GroupCoordinator) " "for Ray Compiled Graph communication." ) else: logger.info( "Using Ray's NCCL communicator for Ray Compiled Graph communication." ) return forward_dag.experimental_compile( enable_asyncio=enable_asyncio, _overlap_gpu_communication=envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM, ) def __del__(self): self.shutdown() def check_health(self) -> None: # Assume that the Ray workers are healthy. # TODO: check the health of the Ray workers return
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/__init__.py
vllm/v1/executor/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from .abstract import Executor from .uniproc_executor import UniProcExecutor __all__ = ["Executor", "UniProcExecutor"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/executor/uniproc_executor.py
vllm/v1/executor/uniproc_executor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os from collections.abc import Callable from concurrent.futures import Future, ThreadPoolExecutor from functools import cached_property from multiprocessing import Lock from typing import Any import torch import torch.distributed as dist import vllm.envs as envs from vllm.logger import init_logger from vllm.utils.network_utils import get_distributed_init_method, get_ip, get_open_port from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType from vllm.v1.executor.abstract import Executor from vllm.v1.outputs import AsyncModelRunnerOutput, DraftTokenIds, ModelRunnerOutput from vllm.v1.serial_utils import run_method from vllm.v1.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) class UniProcExecutor(Executor): def _init_executor(self) -> None: """Initialize the worker and load the model.""" self.driver_worker = WorkerWrapperBase(vllm_config=self.vllm_config, rpc_rank=0) distributed_init_method, rank, local_rank = self._distributed_args() kwargs = dict( vllm_config=self.vllm_config, local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method, is_driver_worker=True, shared_worker_lock=Lock(), ) self.async_output_thread: ThreadPoolExecutor | None = None if self.max_concurrent_batches > 1: self.async_output_thread = ThreadPoolExecutor( max_workers=1, thread_name_prefix="WorkerAsyncOutput" ) self.driver_worker.init_worker(all_kwargs=[kwargs]) self.driver_worker.init_device() self.driver_worker.load_model() def _distributed_args(self) -> tuple[str, int, int]: """Return (distributed_init_method, rank, local_rank).""" distributed_init_method = get_distributed_init_method(get_ip(), get_open_port()) # set local rank as the device index if specified device_info = self.vllm_config.device_config.device.__str__().split(":") local_rank = int(device_info[1]) if len(device_info) > 1 else 0 return distributed_init_method, 0, local_rank @cached_property def max_concurrent_batches(self) -> int: return 2 if self.scheduler_config.async_scheduling else 1 def collective_rpc( # type: ignore[override] self, method: str | Callable, timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, non_block: bool = False, single_value: bool = False, ) -> Any: if kwargs is None: kwargs = {} if not non_block: result = run_method(self.driver_worker, method, args, kwargs) return result if single_value else [result] try: result = run_method(self.driver_worker, method, args, kwargs) if isinstance(result, AsyncModelRunnerOutput): if (async_thread := self.async_output_thread) is not None: if single_value: return async_thread.submit(result.get_output) def get_output_list() -> list[Any]: return [result.get_output()] return async_thread.submit(get_output_list) result = result.get_output() future = Future[Any]() future.set_result(result if single_value else [result]) except Exception as e: future = Future[Any]() future.set_exception(e) return future def execute_model( # type: ignore[override] self, scheduler_output: SchedulerOutput, non_block: bool = False ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: return self.collective_rpc( "execute_model", args=(scheduler_output,), non_block=non_block, single_value=True, ) def sample_tokens( # type: ignore[override] self, grammar_output: GrammarOutput | None, non_block: bool = False ) -> ModelRunnerOutput | None | Future[ModelRunnerOutput | None]: return self.collective_rpc( "sample_tokens", args=(grammar_output,), non_block=non_block, single_value=True, ) def take_draft_token_ids(self) -> DraftTokenIds | None: return self.collective_rpc("take_draft_token_ids", single_value=True) def check_health(self) -> None: # UniProcExecutor will always be healthy as long as # it's running. return def reinitialize_distributed( self, reconfig_request: ReconfigureDistributedRequest ) -> None: self.driver_worker.reinitialize_distributed(reconfig_request) if ( reconfig_request.new_data_parallel_rank == ReconfigureRankType.SHUTDOWN_CURRENT_RANK ): self.shutdown() def shutdown(self) -> None: if worker := self.driver_worker: worker.shutdown() class ExecutorWithExternalLauncher(UniProcExecutor): """An executor that uses external launchers to launch engines, specially designed for torchrun-compatible launchers, for offline inference with tensor parallelism. see https://github.com/vllm-project/vllm/issues/11400 for the motivation, and examples/offline_inference/torchrun_example.py for the usage example. The key idea: although it is tensor-parallel inference, we only create one worker per executor, users will launch multiple engines with torchrun-compatible launchers, and all these engines work together to process the same prompts. When scheduling is deterministic, all the engines will generate the same outputs, and they don't need to synchronize the states with each other. """ def _init_executor(self) -> None: """Initialize the worker and load the model.""" assert not envs.VLLM_ENABLE_V1_MULTIPROCESSING, ( "To get deterministic execution, " "please set VLLM_ENABLE_V1_MULTIPROCESSING=0" ) super()._init_executor() def _distributed_args(self) -> tuple[str, int, int]: # engines are launched in torchrun-compatible launchers # so we can use the env:// method. # required env vars: # - RANK # - LOCAL_RANK # - MASTER_ADDR # - MASTER_PORT distributed_init_method = "env://" rank = int(os.environ["RANK"]) local_rank = int(os.environ["LOCAL_RANK"]) return distributed_init_method, rank, local_rank def determine_available_memory(self) -> list[int]: # in bytes # we need to get the min across all ranks. memory = super().determine_available_memory() from vllm.distributed.parallel_state import get_world_group cpu_group = get_world_group().cpu_group memory_tensor = torch.tensor([memory], device="cpu", dtype=torch.int64) dist.all_reduce(memory_tensor, group=cpu_group, op=dist.ReduceOp.MIN) return [memory_tensor.item()]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/mediums.py
vllm/v1/kv_offload/mediums.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC import numpy as np from vllm.v1.kv_offload.abstract import LoadStoreSpec class BlockIDsLoadStoreSpec(LoadStoreSpec, ABC): """ Spec for loading/storing KV blocks from given block numbers. """ def __init__(self, block_ids: list[int]): self.block_ids = np.array(block_ids, dtype=np.int64) def __repr__(self) -> str: return repr(self.block_ids) class GPULoadStoreSpec(BlockIDsLoadStoreSpec): """ Spec for loading/storing a KV block to GPU memory. """ @staticmethod def medium() -> str: return "GPU" class CPULoadStoreSpec(BlockIDsLoadStoreSpec): """ Spec for loading/storing a KV block to CPU memory. """ @staticmethod def medium() -> str: return "CPU"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/lru_manager.py
vllm/v1/kv_offload/lru_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections import OrderedDict from collections.abc import Iterable from vllm.v1.core.kv_cache_utils import BlockHash from vllm.v1.kv_offload.abstract import ( LoadStoreSpec, OffloadingEvent, OffloadingManager, PrepareStoreOutput, ) from vllm.v1.kv_offload.backend import Backend, BlockStatus class LRUOffloadingManager(OffloadingManager): """ An OffloadingManager with a pluggable backend, which evicts blocks by LRU. """ def __init__(self, backend: Backend, enable_events: bool = False): self.backend: Backend = backend # block_hash -> BlockStatus self.blocks: OrderedDict[BlockHash, BlockStatus] = OrderedDict() self.events: list[OffloadingEvent] | None = [] if enable_events else None def lookup(self, block_hashes: Iterable[BlockHash]) -> int: hit_count = 0 for block_hash in block_hashes: block = self.blocks.get(block_hash) if block is None or not block.is_ready: break hit_count += 1 return hit_count def prepare_load(self, block_hashes: Iterable[BlockHash]) -> LoadStoreSpec: blocks = [] for block_hash in block_hashes: block = self.blocks[block_hash] assert block.is_ready block.ref_cnt += 1 blocks.append(block) return self.backend.get_load_store_spec(block_hashes, blocks) def touch(self, block_hashes: Iterable[BlockHash]): for block_hash in reversed(list(block_hashes)): if self.blocks.get(block_hash): self.blocks.move_to_end(block_hash) def complete_load(self, block_hashes: Iterable[BlockHash]): for block_hash in block_hashes: block = self.blocks[block_hash] assert block.ref_cnt > 0 block.ref_cnt -= 1 def prepare_store( self, block_hashes: Iterable[BlockHash] ) -> PrepareStoreOutput | None: # filter out blocks that are already stored block_hashes_to_store = [ block_hash for block_hash in block_hashes if block_hash not in self.blocks ] num_blocks_to_evict = ( len(block_hashes_to_store) - self.backend.get_num_free_blocks() ) # build list of blocks to evict to_evict = [] if num_blocks_to_evict > 0: for block_hash, block in self.blocks.items(): if block.ref_cnt == 0: to_evict.append(block_hash) num_blocks_to_evict -= 1 if num_blocks_to_evict == 0: break else: # we could not evict enough blocks return None # evict blocks for block_hash in to_evict: self.backend.free(self.blocks.pop(block_hash)) if to_evict and self.events is not None: self.events.append( OffloadingEvent( block_hashes=to_evict, block_size=self.backend.block_size, medium=self.backend.medium, removed=True, ) ) blocks = self.backend.allocate_blocks(block_hashes_to_store) assert len(blocks) == len(block_hashes_to_store) for block_hash, block in zip(block_hashes_to_store, blocks): self.blocks[block_hash] = block # build store specs for allocated blocks store_spec = self.backend.get_load_store_spec(block_hashes_to_store, blocks) return PrepareStoreOutput( block_hashes_to_store=block_hashes_to_store, store_spec=store_spec, block_hashes_evicted=to_evict, ) def complete_store(self, block_hashes: Iterable[BlockHash], success: bool = True): stored_block_hashes: list[BlockHash] = [] if success: for block_hash in block_hashes: block = self.blocks[block_hash] if not block.is_ready: block.ref_cnt = 0 stored_block_hashes.append(block_hash) else: for block_hash in block_hashes: block = self.blocks[block_hash] if not block.is_ready: self.backend.free(block) del self.blocks[block_hash] if stored_block_hashes and self.events is not None: self.events.append( OffloadingEvent( block_hashes=stored_block_hashes, block_size=self.backend.block_size, medium=self.backend.medium, removed=False, ) ) def take_events(self) -> Iterable[OffloadingEvent]: if self.events is not None: yield from self.events self.events.clear()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/spec.py
vllm/v1/kv_offload/spec.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from collections.abc import Iterator from typing import TYPE_CHECKING import torch from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger from vllm.v1.kv_offload.abstract import LoadStoreSpec, OffloadingManager from vllm.v1.kv_offload.worker.worker import OffloadingHandler if TYPE_CHECKING: from vllm.config import VllmConfig logger = init_logger(__name__) class OffloadingSpec(ABC): """Spec for an offloading connector""" def __init__(self, vllm_config: "VllmConfig"): logger.warning( "Initializing OffloadingSpec. This API is experimental and " "subject to change in the future as we iterate the design." ) self.vllm_config = vllm_config kv_transfer_config = vllm_config.kv_transfer_config assert kv_transfer_config is not None self.extra_config = kv_transfer_config.kv_connector_extra_config self.gpu_block_size = vllm_config.cache_config.block_size self.offloaded_block_size = int( self.extra_config.get("block_size", self.gpu_block_size) ) assert self.offloaded_block_size % self.gpu_block_size == 0 @abstractmethod def get_manager(self) -> OffloadingManager: """ Get an OffloadingManager that will be used by the scheduler-side offloading connector to track offloaded blocks and manage evictions. """ pass @abstractmethod def get_handlers( self, kv_caches: dict[str, torch.Tensor], attn_backends: dict[str, type[AttentionBackend]], ) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]: """ Get offloading handlers along with their respective src and dst types. Args: kv_caches: A dictionary of layer_name -> gpu_kv_cache tensor. attn_backends: A dictionary of layer_name -> AttentionBackend. Yields: Tuples of (src_type, dst_type, offloading_handler). """ pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/abstract.py
vllm/v1/kv_offload/abstract.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ OffloadingManager class for managing KV data offloading in vLLM v1 This class runs in the scheduler, tracks which blocks are offloaded and their address. The class provides the following primitives: lookup() - find the length of the maximal series of blocks, starting from the first one, that are all offloaded. prepare_load() - prepare given blocks to be read. The given blocks will be protected from eviction. This function returns a LoadSpec which encapsulates information required for performing the load. touch() - marks the give blocks as recently used. Can be used to track block's LRU. This function is separated from the prepare_load function to allow setting block recency even for blocks which do not need reading from the cache, such as blocks that are cached by the GPU prefix cache. complete_load() - mark blocks which were previously prepared to be loaded as done loading. This is to re-allow their eviction. prepare_store() - prepare the given blocks to be written. Returns a StoreSpec encapsulating offloading information, as well as a list of blocks that were evicted as a result. complete_store() - marks a previous store as completed. Following this call, the given blocks will become loadable. """ from abc import ABC, abstractmethod from collections.abc import Iterable from dataclasses import dataclass from vllm.v1.core.kv_cache_utils import BlockHash class LoadStoreSpec(ABC): """ Abstract metadata that encapsulates information allowing a worker to load, and optionally also to store, blocks of KV data. """ @staticmethod @abstractmethod def medium() -> str: """ Returns a string representation of the medium type this store/load targets. """ pass @dataclass class PrepareStoreOutput: block_hashes_to_store: list[BlockHash] store_spec: LoadStoreSpec block_hashes_evicted: list[BlockHash] @dataclass class OffloadingEvent: block_hashes: list[BlockHash] block_size: int medium: str # True if blocks are removed, False if stored removed: bool class OffloadingManager(ABC): @abstractmethod def lookup(self, block_hashes: Iterable[BlockHash]) -> int: """ Finds the length of the maximal series of blocks, starting from the first one, that are all offloaded. Args: block_hashes: the hashes identifying the blocks to lookup. Returns: An integer representing the maximal number of blocks that are currently offloaded. """ pass @abstractmethod def prepare_load(self, block_hashes: Iterable[BlockHash]) -> LoadStoreSpec: """ Prepare the given blocks to be read. The given blocks will be protected from eviction until complete_load is called. It assumes all given blocks are offloaded. Args: block_hashes: the hashes identifying the blocks. Returns: A LoadStoreSpec that can be used by a worker to locate and load the actual offloaded KV data. """ pass def touch(self, block_hashes: Iterable[BlockHash]): """ Mark the given blocks as recently used. This could in practice mean moving them to the end of an LRU list. Args: block_hashes: the hashes identifying the blocks. """ return def complete_load(self, block_hashes: Iterable[BlockHash]): """ Marks previous blocks that were prepared to load as done loading. Args: block_hashes: the hashes identifying the blocks. """ return @abstractmethod def prepare_store( self, block_hashes: Iterable[BlockHash] ) -> PrepareStoreOutput | None: """ Prepare the given blocks to be offloaded. The given blocks will be protected from eviction until complete_store is called. Args: block_hashes: the hashes identifying the blocks. Returns: A PrepareStoreOutput indicating which blocks need storing, where to store them (LoadStoreSpec), and list of blocks that were evicted as a result. None is returned if the blocks cannot be stored. """ pass def complete_store(self, block_hashes: Iterable[BlockHash], success: bool = True): """ Marks blocks which were previously prepared to be stored, as stored. Following this call, the blocks become loadable. If if_success is False, blocks that were not marked as stored will be removed. Args: block_hashes: the hashes identifying the blocks. success: whether the blocks were stored successfully. """ return def take_events(self) -> Iterable[OffloadingEvent]: """ Take the offloading events from the manager. Yields: New OffloadingEvents collected since the last call. """ return ()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/cpu.py
vllm/v1/kv_offload/cpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterator import torch from vllm.attention.backends.abstract import AttentionBackend from vllm.config import VllmConfig from vllm.platforms import current_platform from vllm.v1.kv_offload.abstract import LoadStoreSpec, OffloadingManager from vllm.v1.kv_offload.arc_manager import ARCOffloadingManager from vllm.v1.kv_offload.backends.cpu import CPUBackend from vllm.v1.kv_offload.lru_manager import LRUOffloadingManager from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec from vllm.v1.kv_offload.spec import OffloadingSpec from vllm.v1.kv_offload.worker.cpu_gpu import CpuGpuOffloadingHandlers from vllm.v1.kv_offload.worker.worker import OffloadingHandler class CPUOffloadingSpec(OffloadingSpec): def __init__(self, vllm_config: VllmConfig): super().__init__(vllm_config) num_cpu_blocks = self.extra_config.get("num_cpu_blocks") if not num_cpu_blocks: raise Exception( "num_cpu_blocks must be specified in kv_connector_extra_config" ) self.num_cpu_blocks: int = num_cpu_blocks # scheduler-side self._manager: OffloadingManager | None = None # worker-side self._handlers: CpuGpuOffloadingHandlers | None = None self.eviction_policy: str = self.extra_config.get("eviction_policy", "lru") def get_manager(self) -> OffloadingManager: if not self._manager: kv_events_config = self.vllm_config.kv_events_config enable_events = ( kv_events_config is not None and kv_events_config.enable_kv_cache_events ) backend = CPUBackend( block_size=self.offloaded_block_size, num_blocks=self.num_cpu_blocks ) if self.eviction_policy == "lru": self._manager = LRUOffloadingManager( backend=backend, enable_events=enable_events ) elif self.eviction_policy == "arc": self._manager = ARCOffloadingManager( backend=backend, enable_events=enable_events ) else: raise ValueError( f"Unknown eviction policy: {self.eviction_policy}. " f"Supported policies: lru, arc" ) return self._manager def get_handlers( self, kv_caches: dict[str, torch.Tensor], attn_backends: dict[str, type[AttentionBackend]], ) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]: if not self._handlers: if not current_platform.is_cuda_alike(): raise Exception( "CPU Offloading is currently only supported on CUDA-alike GPUs" ) self._handlers = CpuGpuOffloadingHandlers( attn_backends=attn_backends, gpu_block_size=self.gpu_block_size, cpu_block_size=self.offloaded_block_size, num_cpu_blocks=self.num_cpu_blocks, gpu_caches=kv_caches, ) assert self._handlers is not None yield GPULoadStoreSpec, CPULoadStoreSpec, self._handlers.gpu_to_cpu_handler yield CPULoadStoreSpec, GPULoadStoreSpec, self._handlers.cpu_to_gpu_handler
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/arc_manager.py
vllm/v1/kv_offload/arc_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections import OrderedDict from collections.abc import Iterable from vllm.v1.core.kv_cache_utils import BlockHash from vllm.v1.kv_offload.abstract import ( LoadStoreSpec, OffloadingEvent, OffloadingManager, PrepareStoreOutput, ) from vllm.v1.kv_offload.backend import Backend, BlockStatus class ARCOffloadingManager(OffloadingManager): """ An OffloadingManager implementing the ARC (Adaptive Replacement Cache) eviction policy with a pluggable backend. Data Structures: T1: Recent cache containing blocks accessed once. T2: Frequent cache containing blocks accessed multiple times. B1/B2: Ghost lists tracking recently evicted blocks from T1/T2. target_t1_size: Adaptive target size for the T1 partition. Algorithm Flow: 1. Cache lookup (lookup): Searches T1 and T2 for block hashes and counts consecutive hits until a miss or non-ready block is encountered. 2. Cache touch (touch) - Adaptive Learning: For each block_hash (in reverse order): - If in T1: Move to T2 (promotion from recent to frequent). - If in T2: Move to MRU position (end of queue). - If in B1 ghost list: Increase target_t1_size. - If in B2 ghost list: Decrease target_t1_size. 3. Block eviction (prepare_store) - Adaptive Replacement: Determines eviction source based on adaptive target: - If T1 size > target_t1_size: Evict from T1, add to B1. - Otherwise: Evict from T2, add to B2. Finally, bound each ghost list size. 4. Block insertion (prepare_store): New blocks are always inserted into T1 and removed from B1/B2 if present. Blocks may later be promoted to T2 during touch operations. Adaptive Behavior: The algorithm self-tunes the recency vs. frequency trade-off: - B1 hit: Recent access patterns matter more → increase T1. - B2 hit: Frequent access patterns matter more → decrease T1. """ def __init__(self, backend: Backend, enable_events: bool = False): self.backend: Backend = backend self.target_t1_size: float = 0.0 self.t1: OrderedDict[BlockHash, BlockStatus] = OrderedDict() self.t2: OrderedDict[BlockHash, BlockStatus] = OrderedDict() # block_hash -> None (only care about presence) self.b1: OrderedDict[BlockHash, None] = OrderedDict() self.b2: OrderedDict[BlockHash, None] = OrderedDict() self.events: list[OffloadingEvent] | None = [] if enable_events else None self.cache_capacity: int = self.backend.get_num_free_blocks() def lookup(self, block_hashes: Iterable[BlockHash]) -> int: hit_count = 0 for block_hash in block_hashes: block = self.t1.get(block_hash) or self.t2.get(block_hash) if block is None or not block.is_ready: break hit_count += 1 return hit_count def prepare_load(self, block_hashes: Iterable[BlockHash]) -> LoadStoreSpec: blocks = [] for block_hash in block_hashes: block = self.t1.get(block_hash) or self.t2.get(block_hash) assert block is not None, f"Block {block_hash!r} not found in cache" assert block.is_ready, f"Block {block_hash!r} is not ready for reading" block.ref_cnt += 1 blocks.append(block) return self.backend.get_load_store_spec(block_hashes, blocks) def touch(self, block_hashes: Iterable[BlockHash]): for block_hash in reversed(list(block_hashes)): if block_hash in self.t1: block = self.t1.pop(block_hash) if not block.is_ready: # block was just prepared to be stored, not really touched twice self.t1.move_to_end(block_hash) else: self.t2[block_hash] = block elif block_hash in self.t2: self.t2.move_to_end(block_hash) elif block_hash in self.b1: delta = max(1, len(self.b2) / len(self.b1)) self.target_t1_size = min( self.target_t1_size + delta, self.cache_capacity ) # move to MRU position (end) to keep it fresh in the ghost list self.b1.move_to_end(block_hash) elif block_hash in self.b2: delta = max(1, len(self.b1) / len(self.b2)) self.target_t1_size = max(self.target_t1_size - delta, 0) # move to MRU position (end) to keep it fresh in the ghost list self.b2.move_to_end(block_hash) def complete_load(self, block_hashes: Iterable[BlockHash]): for block_hash in block_hashes: block = self.t1.get(block_hash) or self.t2.get(block_hash) assert block is not None, f"Block {block_hash!r} not found" assert block.ref_cnt > 0, f"Block {block_hash!r} ref_cnt is already 0" block.ref_cnt -= 1 def prepare_store( self, block_hashes: Iterable[BlockHash] ) -> PrepareStoreOutput | None: block_hashes_to_store = [] for block_hash in block_hashes: if block_hash not in self.t1 and block_hash not in self.t2: block_hashes_to_store.append(block_hash) if not block_hashes_to_store: return PrepareStoreOutput( block_hashes_to_store=[], store_spec=self.backend.get_load_store_spec([], []), block_hashes_evicted=[], ) num_blocks_to_evict = ( len(block_hashes_to_store) - self.backend.get_num_free_blocks() ) to_evict = [] while num_blocks_to_evict > 0: block_to_evict = None if len(self.t1) >= int(self.target_t1_size): # try to evict the least recently used (oldest) block from T1 for block_hash, block in self.t1.items(): if block.ref_cnt == 0: block_to_evict = (block_hash, block) eviction_t = self.t1 eviction_b = self.b1 break if not block_to_evict: # try to evict the least recently used (oldest) block from T2 for block_hash, block in self.t2.items(): if block.ref_cnt == 0: block_to_evict = (block_hash, block) eviction_t = self.t2 eviction_b = self.b2 break else: # cannot evict enough blocks, cache is full of in-use items return None block_hash, block = block_to_evict del eviction_t[block_hash] eviction_b[block_hash] = None to_evict.append(block_hash) self.backend.free(block) num_blocks_to_evict -= 1 for b in [self.b1, self.b2]: for i in range(len(b) - self.cache_capacity): b.popitem(last=False) if to_evict and self.events is not None: self.events.append( OffloadingEvent( block_hashes=to_evict, block_size=self.backend.block_size, medium=self.backend.medium, removed=True, ) ) blocks = self.backend.allocate_blocks(block_hashes_to_store) assert len(blocks) == len(block_hashes_to_store), ( "Backend did not allocate the expected number of blocks" ) for block_hash, block in zip(block_hashes_to_store, blocks): self.t1[block_hash] = block self.b1.pop(block_hash, None) self.b2.pop(block_hash, None) store_spec = self.backend.get_load_store_spec(block_hashes_to_store, blocks) return PrepareStoreOutput( block_hashes_to_store=block_hashes_to_store, store_spec=store_spec, block_hashes_evicted=to_evict, ) def complete_store(self, block_hashes: Iterable[BlockHash], success: bool = True): stored_block_hashes: list[BlockHash] = [] if success: for block_hash in block_hashes: block = self.t1.get(block_hash) or self.t2.get(block_hash) if block is not None and not block.is_ready: block.ref_cnt = 0 stored_block_hashes.append(block_hash) else: for block_hash in block_hashes: block = self.t1.pop(block_hash, None) if block is None: block = self.t2.pop(block_hash, None) if block is not None and not block.is_ready: self.backend.free(block) if stored_block_hashes and self.events is not None: self.events.append( OffloadingEvent( block_hashes=stored_block_hashes, block_size=self.backend.block_size, medium=self.backend.medium, removed=False, ) ) def take_events(self) -> Iterable[OffloadingEvent]: if self.events is not None: yield from self.events self.events.clear()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/factory.py
vllm/v1/kv_offload/factory.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import importlib from collections.abc import Callable from typing import TYPE_CHECKING from vllm.logger import init_logger from vllm.v1.kv_offload.spec import OffloadingSpec if TYPE_CHECKING: from vllm.config import VllmConfig logger = init_logger(__name__) class OffloadingSpecFactory: _registry: dict[str, Callable[[], type[OffloadingSpec]]] = {} @classmethod def register_spec(cls, name: str, module_path: str, class_name: str) -> None: """Register a spec with a lazy-loading module and class name.""" if name in cls._registry: raise ValueError(f"Connector '{name}' is already registered.") def loader() -> type[OffloadingSpec]: module = importlib.import_module(module_path) return getattr(module, class_name) cls._registry[name] = loader @classmethod def create_spec( cls, config: "VllmConfig", ) -> OffloadingSpec: kv_transfer_config = config.kv_transfer_config assert kv_transfer_config is not None extra_config = kv_transfer_config.kv_connector_extra_config spec_name = extra_config.get("spec_name", "CPUOffloadingSpec") if spec_name in cls._registry: spec_cls = cls._registry[spec_name]() else: spec_module_path = extra_config.get("spec_module_path") if spec_module_path is None: raise ValueError(f"Unsupported spec type: {spec_name}") spec_module = importlib.import_module(spec_module_path) spec_cls = getattr(spec_module, spec_name) assert issubclass(spec_cls, OffloadingSpec) logger.info("Creating offloading spec with name: %s", spec_name) return spec_cls(config) # Register various specs here. OffloadingSpecFactory.register_spec( "CPUOffloadingSpec", "vllm.v1.kv_offload.cpu", "CPUOffloadingSpec" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/__init__.py
vllm/v1/kv_offload/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/backend.py
vllm/v1/kv_offload/backend.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ctypes from abc import ABC, abstractmethod from collections.abc import Iterable from vllm.v1.core.kv_cache_utils import BlockHash from vllm.v1.kv_offload.abstract import LoadStoreSpec class BlockStatus(ctypes.Structure): """ Offloading status for a single block of KV data. Holds the following information: ref_cnt - the current number of transfers using this block as a source. A value of -1 indicates the block is not yet ready to be read. load_store_spec - backend-specific information on how to actually read/write the block. """ _fields_ = [("ref_cnt", ctypes.c_int32)] def __init__(self): super().__init__() # initialize block as "not ready" (ref_cnt = -1) self.ref_cnt = -1 @property def is_ready(self) -> bool: """ Returns whether the block is ready to be read. """ return self.ref_cnt >= 0 class Backend(ABC): """ An abstract class for allocating and returning specs for writing KV blocks to some backend. """ def __init__(self, block_size: int, medium: str): self.block_size = block_size self.medium = medium @abstractmethod def get_num_free_blocks(self): """ Returns the number of current number of blocks that can be allocated. """ pass @abstractmethod def allocate_blocks(self, block_hashes: list[BlockHash]) -> list[BlockStatus]: """ Allocate space for writing blocks. This method assumes there is enough space for allocation. It is unsafe to use without checking get_num_free_blocks beforehand. Args: block_hashes: the hashes identifying the blocks to be written. Returns: A list of BlockStatus for the allocated blocks. The ref_cnt of each returned item will be -1, meaning the block is not yet ready to be read. """ pass @abstractmethod def free(self, block: BlockStatus): """ Free a previously allocated block. You should only call this function with blocks returned by allocate_blocks, and only once per each block. Args: block: The block to be freed. """ pass def get_load_store_spec( self, block_hashes: Iterable[BlockHash], blocks: Iterable[BlockStatus] ) -> LoadStoreSpec: """ Get backend-specific information on how to read/write blocks. Args: block_hashes: the list of block hashes identifying the blocks. blocks: the list of blocks. Returns: A LoadStoreSpec that can be used by a worker to read/write the blocks. """ raise NotImplementedError
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/worker/worker.py
vllm/v1/kv_offload/worker/worker.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from vllm.logger import init_logger from vllm.v1.kv_offload.abstract import LoadStoreSpec # a single transfer spec (src_blocks_spec, dst_blocks_spec) TransferSpec = tuple[LoadStoreSpec, LoadStoreSpec] # transfers are forwarded to workers by (src_medium, dst_medium) TransferType = tuple[str, str] # transfer result (job_id, success) TransferResult = tuple[int, bool] logger = init_logger(__name__) class OffloadingHandler(ABC): """ OffloadingHandler class for managing asynchronous KV data transfers This class runs in the worker. It kicks off async KV data transfer requests, and allows collecting back completion statuses. The class provides the following primitives: transfer_async() - kicks off a new transfer job get_finished() - returns a list of newly finished job IDs. """ @abstractmethod def transfer_async(self, job_id: int, spec: TransferSpec) -> bool: """ Initiates an asynchronous transfer of KV data. Args: job_id: a unique ID that will be used when notifying back on transfer completion. spec: the (src, dst) spec of the KV data transfer. Returns: True if transfer was submitted successfully. """ pass @abstractmethod def get_finished(self) -> list[TransferResult]: """ Get transfers finished since last call. Returns: A list of (job_id, success) of transfers. """ pass class OffloadingWorker: """ OffloadingWorker class for managing asynchronous KV data transfers using multiple OffloadingHandlers This class runs in the worker. It kicks off async KV data transfer requests, by delegating to one of its registered OffloadingHandlers, based on the transfer type. The class provides the following primitives: register_handler() - registers a new handler to handle a specific transfer type transfer_async() - kicks off a new transfer job using one of the registered handlers. get_finished() - returns a list of newly finished job IDs from all handlers. """ def __init__(self): self.handlers: set[OffloadingHandler] = set() self.transfer_type_to_handler: dict[TransferType, OffloadingHandler] = {} def register_handler( self, src_cls: type[LoadStoreSpec], dst_cls: type[LoadStoreSpec], handler: OffloadingHandler, ) -> None: """ Registers a new handler. Args: src_cls: the source type of transfers handled by this handler. dst_cls: the destination type of transfers handled by this handler. handler: the handler that will handle transfers. """ transfer_type = (src_cls.medium(), dst_cls.medium()) assert transfer_type not in self.transfer_type_to_handler self.handlers.add(handler) self.transfer_type_to_handler[transfer_type] = handler def transfer_async(self, job_id: int, spec: TransferSpec) -> bool: """ Initiates an asynchronous transfer of KV data. Args: job_id: a unique ID that will be used when notifying back on transfer completion. spec: the (src, dst) spec of the KV data transfer. Returns: True if transfer was submitted successfully. """ src, dst = spec transfer_type = (src.medium(), dst.medium()) handler = self.transfer_type_to_handler.get(transfer_type) assert handler is not None try: success = handler.transfer_async(job_id, spec) except Exception as e: logger.warning( "Exception in %r transfer %d: %r", transfer_type, job_id, e, exc_info=True, ) return False if not success: logger.warning("Failed to submit %r transfer %d", transfer_type, job_id) else: logger.debug("Submitted %r transfer %d: %r", transfer_type, job_id, spec) return success def get_finished(self) -> list[TransferResult]: """ Get transfers finished since last call. Returns: A list of (job_id, success) of transfers. """ finished = [] for handler in self.handlers: finished.extend(handler.get_finished()) return finished
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/worker/__init__.py
vllm/v1/kv_offload/worker/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/worker/cpu_gpu.py
vllm/v1/kv_offload/worker/cpu_gpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections import deque import numpy as np import torch from vllm import _custom_ops as ops from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger from vllm.utils.platform_utils import is_pin_memory_available from vllm.v1.kv_offload.mediums import BlockIDsLoadStoreSpec from vllm.v1.kv_offload.worker.worker import ( OffloadingHandler, TransferResult, TransferSpec, ) logger = init_logger(__name__) def expand_block_ids( block_ids: np.ndarray, block_size_factor: int, output: np.ndarray, skip_count: int = 0, ): """ Convert a list of block IDs to a list of matching block ids, assuming each block is composed of actual block_size_factor blocks. Outputs to output tensor. The first skip_count blocks will be skipped. Note that skip_count must be less than block_size_factor. For example, if block_ids = [0, 1, 3] and block_size_factor = 4, then it yields [0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15] since 0 maps to [0, 1, 2, 3] 1 maps to [4, 5, 6, 7] and 3 maps to [12, 13, 14, 15] """ assert skip_count < block_size_factor first_range = np.arange(skip_count, block_size_factor) full_range = np.arange(0, block_size_factor) output_idx = 0 for i, block_id in enumerate(block_ids): base_block_id = block_id * block_size_factor indices = first_range if i == 0 else full_range output_end_idx = output_idx + len(indices) output[output_idx:output_end_idx] = base_block_id + indices output_idx = output_end_idx class SingleDirectionOffloadingHandler(OffloadingHandler): """ SingleDirectionOffloadingHandler handles transfers for a single direction, either CPU->GPU or GPU->CPU. Transfers are guaranteed to be executed in order of their submission. Each transfer uses a unique CUDA stream, and its stream will start executing only after the streams of previous transfers have finished. """ def __init__( self, src_tensors: list[torch.Tensor], dst_tensors: list[torch.Tensor], kv_dim_before_num_blocks: list[bool], src_block_size_factor: int, dst_block_size_factor: int, priority: int, ): """ Initialize a SingleDirectionOffloadingHandler. Args: src_tensors: list of KV cache tensors to copy from. dst_tensors: list of KV cache tensors to copy to. Order should match src_tensors. kv_dim_before_num_blocks: list of bools, indicating whether the respective KV cache tensor has a KV dimension before its num_blocks dimension. e.g. (2, num_blocks, ...) src_block_size_factor: The number of kernel blocks per KV block in a source tensor. dst_block_size_factor: The number of kernel blocks per KV block in a destination tensor. priority: The priority of the backing CUDA streams. Lower numbers indicate higher priority. """ assert len(src_tensors) == len(dst_tensors) == len(kv_dim_before_num_blocks) self.src_tensors: list[torch.Tensor] = src_tensors self.dst_tensors: list[torch.Tensor] = dst_tensors self.kv_dim_before_num_blocks: list[bool] = kv_dim_before_num_blocks self.src_block_size_factor: int = src_block_size_factor self.dst_block_size_factor: int = dst_block_size_factor self.priority = priority # queue of transfers (job_id, stream, event) self._transfers: deque[tuple[int, torch.cuda.Stream, torch.Event]] = deque() # list of CUDA streams available for re-use self._stream_pool: list[torch.cuda.Stream] = [] # list of CUDA events available for re-use self._event_pool: list[torch.Event] = [] def transfer_async(self, job_id: int, transfer_spec: TransferSpec) -> bool: src_spec, dst_spec = transfer_spec assert isinstance(src_spec, BlockIDsLoadStoreSpec) assert isinstance(dst_spec, BlockIDsLoadStoreSpec) src_blocks = src_spec.block_ids dst_blocks = dst_spec.block_ids assert src_blocks.ndim == 1 assert dst_blocks.ndim == 1 src_sub_block_count = src_blocks.size * self.src_block_size_factor dst_sub_block_count = dst_blocks.size * self.dst_block_size_factor src_sub_blocks_to_skip = -dst_blocks.size % self.src_block_size_factor assert dst_sub_block_count == src_sub_block_count - src_sub_blocks_to_skip src_to_dst = np.empty((dst_sub_block_count, 2), dtype=np.int64) expand_block_ids( src_blocks, self.src_block_size_factor, src_to_dst[:, 0], skip_count=src_sub_blocks_to_skip, ) expand_block_ids(dst_blocks, self.dst_block_size_factor, src_to_dst[:, 1]) src_to_dst_tensor = torch.from_numpy(src_to_dst) stream = ( self._stream_pool.pop() if self._stream_pool else torch.cuda.Stream(priority=self.priority) ) event = self._event_pool.pop() if self._event_pool else torch.Event() if self._transfers: _, _, last_event = self._transfers[-1] # assure job will start only after the previous one completes stream.wait_event(last_event) with torch.cuda.stream(stream): for src_tensor, dst_tensor, kv_dim in zip( self.src_tensors, self.dst_tensors, self.kv_dim_before_num_blocks ): if kv_dim: src_key_cache, src_value_cache = src_tensor dst_key_cache, dst_value_cache = dst_tensor ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst_tensor) ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst_tensor) else: ops.swap_blocks(src_tensor, dst_tensor, src_to_dst_tensor) event.record(stream) self._transfers.append((job_id, stream, event)) # success return True def get_finished(self) -> list[TransferResult]: results: list[TransferResult] = [] while self._transfers and self._transfers[0][2].query(): job_id, stream, event = self._transfers.popleft() results.append((job_id, True)) self._stream_pool.append(stream) self._event_pool.append(event) return results class CpuGpuOffloadingHandlers: def __init__( self, gpu_block_size: int, cpu_block_size: int, num_cpu_blocks: int, gpu_caches: dict[str, torch.Tensor], attn_backends: dict[str, type[AttentionBackend]], ): assert gpu_caches assert cpu_block_size % gpu_block_size == 0 block_size_factor = cpu_block_size // gpu_block_size pin_memory = is_pin_memory_available() # allocate cpu tensors logger.info("Allocating %d CPU tensors...", len(gpu_caches)) gpu_tensors: list[torch.Tensor] = [] cpu_tensors: list[torch.Tensor] = [] kv_dim_before_num_blocks: list[bool] = [] kernel_block_size: int | None = None for layer_name, gpu_tensor in gpu_caches.items(): gpu_tensors.append(gpu_tensor) gpu_shape = gpu_tensor.shape attn_backend = attn_backends[layer_name] test_shape = attn_backend.get_kv_cache_shape( num_blocks=1234, block_size=16, num_kv_heads=8, head_size=256 ) has_layers_dim = False if len(gpu_shape) != len(test_shape): # cross-layers tensor # shape is (num_blocks, ...) assert len(gpu_shape) == len(test_shape) + 1 num_blocks_idx = 0 has_layers_dim = True kv_dim_before_num_blocks.append(False) # prepend a dummy num_layers=80 to test_shape test_shape = (80,) + test_shape elif test_shape[0] == 1234: # shape is (num_blocks, ...) num_blocks_idx = 0 kv_dim_before_num_blocks.append(False) else: # shape should be (2, num_blocks, ...) assert test_shape[0] == 2 assert test_shape[1] == 1234 assert gpu_shape[0] == 2 num_blocks_idx = 1 kv_dim_before_num_blocks.append(True) try: kv_cache_stride_order = attn_backend.get_kv_cache_stride_order( include_num_layers_dimension=has_layers_dim ) assert len(kv_cache_stride_order) == len(gpu_shape) except (AttributeError, NotImplementedError): kv_cache_stride_order = tuple(range(len(gpu_shape))) # permute test_shape according to stride_order test_shape = tuple(test_shape[i] for i in kv_cache_stride_order) # find block_size (16) dimension index block_size_idx = test_shape.index(16) if kernel_block_size is not None: assert kernel_block_size == gpu_shape[block_size_idx] else: kernel_block_size = gpu_shape[block_size_idx] assert gpu_block_size % kernel_block_size == 0 cpu_shape = list(gpu_shape) cpu_shape[num_blocks_idx] = num_cpu_blocks * block_size_factor logger.debug("Allocating CPU tensor of shape %r", cpu_shape) cpu_tensors.append( torch.zeros( cpu_shape, dtype=gpu_tensor.dtype, device="cpu", pin_memory=pin_memory, ) ) assert kernel_block_size is not None gpu_block_size_factor = gpu_block_size // kernel_block_size cpu_block_size_factor = cpu_block_size // kernel_block_size # TODO (orozery): adapt swap_blocks to support gpu_block_size_factor assert gpu_block_size_factor == 1 self.gpu_to_cpu_handler = SingleDirectionOffloadingHandler( src_tensors=gpu_tensors, dst_tensors=cpu_tensors, kv_dim_before_num_blocks=kv_dim_before_num_blocks, src_block_size_factor=gpu_block_size_factor, dst_block_size_factor=cpu_block_size_factor, priority=1, ) self.cpu_to_gpu_handler = SingleDirectionOffloadingHandler( src_tensors=cpu_tensors, dst_tensors=gpu_tensors, kv_dim_before_num_blocks=kv_dim_before_num_blocks, src_block_size_factor=cpu_block_size_factor, dst_block_size_factor=gpu_block_size_factor, priority=-1, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/backends/cpu.py
vllm/v1/kv_offload/backends/cpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ctypes from collections.abc import Iterable from vllm.v1.core.kv_cache_utils import BlockHash from vllm.v1.kv_offload.abstract import LoadStoreSpec from vllm.v1.kv_offload.backend import Backend, BlockStatus from vllm.v1.kv_offload.mediums import CPULoadStoreSpec class CPUBlockStatus(BlockStatus): _fields_ = BlockStatus._fields_ + [("block_id", ctypes.c_int64)] # type: ignore def __init__(self, block_id: int): super().__init__() self.block_id = block_id class CPUBackend(Backend): def __init__(self, block_size: int, num_blocks: int): super().__init__(block_size=block_size, medium=CPULoadStoreSpec.medium()) self.num_blocks: int = num_blocks self.num_allocated_blocks: int = 0 self.allocated_blocks_free_list: list[int] = [] def get_num_free_blocks(self): return ( len(self.allocated_blocks_free_list) + self.num_blocks - self.num_allocated_blocks ) def allocate_blocks(self, block_hashes: list[BlockHash]) -> list[BlockStatus]: num_fresh_blocks = min( len(block_hashes), self.num_blocks - self.num_allocated_blocks ) num_reused_blocks = len(block_hashes) - num_fresh_blocks assert len(self.allocated_blocks_free_list) >= num_reused_blocks # allocate fresh blocks blocks: list[BlockStatus] = [] for _ in range(num_fresh_blocks): blocks.append(CPUBlockStatus(self.num_allocated_blocks)) self.num_allocated_blocks += 1 # allocate reused blocks for _ in range(num_reused_blocks): block_id = self.allocated_blocks_free_list.pop() blocks.append(CPUBlockStatus(block_id)) return blocks def free(self, block: BlockStatus): assert isinstance(block, CPUBlockStatus) self.allocated_blocks_free_list.append(block.block_id) def get_load_store_spec( self, block_hashes: Iterable[BlockHash], blocks: Iterable[BlockStatus] ) -> LoadStoreSpec: return CPULoadStoreSpec([block.block_id for block in blocks])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/kv_offload/backends/__init__.py
vllm/v1/kv_offload/backends/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/block_pool.py
vllm/v1/core/block_pool.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterable, Sequence from typing import Any from vllm.distributed.kv_events import ( MEDIUM_GPU, AllBlocksCleared, BlockRemoved, BlockStored, KVCacheEvent, ) from vllm.logger import init_logger from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.kv_cache_utils import ( BlockHash, BlockHashList, BlockHashListWithBlockSize, BlockHashWithGroupId, ExternalBlockHash, FreeKVCacheBlockQueue, KVCacheBlock, get_block_hash, make_block_hash_with_group_id, maybe_convert_block_hash, ) from vllm.v1.request import Request logger = init_logger(__name__) class BlockHashToBlockMap: """ Cache of blocks that are used for prefix caching. It caches blocks from hash directly to a block or multiple blocks (i.e. {block_hash: KVCacheBlocks}) - Mostly block_hash maps to a single KVCacheBlock, and KVCacheBlocks would simply be a KVCacheBlock. - Otherwise, KVCacheBlocks is a dict from {block_id: KVCacheBlock} A cached block is a full block with a block hash that can be used for prefix caching. The cached block may be used by running requests or in the free_block_queue that could potentially be evicted. NOTE #1: We currently don't de-duplicate the blocks in the cache, meaning that if a block becomes full and is cached, we don't check if there is already an identical block in the cache. This is because we want to make sure the allocated block IDs won't change so that block tables are append-only. NOTE #2: The union type is introduced in order to reduce GC costs from the inner dict. """ def __init__(self): self._cache: dict[ BlockHashWithGroupId, KVCacheBlock | dict[int, KVCacheBlock] ] = {} def get_one_block(self, key: BlockHashWithGroupId) -> KVCacheBlock | None: """ Gets any block with the given block hash key. """ blocks = self._cache.get(key) if blocks is not None: if isinstance(blocks, KVCacheBlock): return blocks if isinstance(blocks, dict): return next(iter(blocks.values())) self._unexpected_blocks_type(blocks) return None def insert(self, key: BlockHashWithGroupId, block: KVCacheBlock) -> None: """ Inserts the KVCacheBlock to the cache """ blocks = self._cache.get(key) if blocks is None: # When key is not found, attach a single block to the key self._cache[key] = block elif isinstance(blocks, KVCacheBlock): # If there's a block with the same key, merge the original block # and the new block into a dict self._cache[key] = {blocks.block_id: blocks, block.block_id: block} elif isinstance(blocks, dict): # If it's already a dict, simply insert the block blocks[block.block_id] = block else: self._unexpected_blocks_type(blocks) def pop(self, key: BlockHashWithGroupId, block_id: int) -> KVCacheBlock | None: """ Checks if block_hash exists and pop block_id from the cache """ blocks = self._cache.pop(key, None) if blocks is None: # block_hash not found in the cache return None # TODO(Jialin): If key is found, block_id should always present # in blocks. We currently keep the original behaviour for safety. # # Will add block_id == blocks.block_id assertion and # use del blocks[block_id] instead as followup. if isinstance(blocks, KVCacheBlock): if blocks.block_id == block_id: return blocks # If the single block ID doesn't match, we should put the # block back (it should happen rarely) self._cache[key] = blocks return None if isinstance(blocks, dict): # Try to pop block_id from the block dict, and if dict still # contain blocks, put back to the cache. block = blocks.pop(block_id, None) if len(blocks) > 0: self._cache[key] = blocks return block self._unexpected_blocks_type(blocks) return None def __len__(self) -> int: return len(self._cache) def _unexpected_blocks_type(self, blocks: Any) -> None: raise AssertionError(f"Invalid KV cache block type {type(blocks)}") class BlockPool: """BlockPool that manages KVCacheBlocks. It provides methods to allocate, free and cache the kv cache blocks. The free_block_queue stores the free blocks in eviction order to enable allocation, free, and cache eviction. The cached_block_hash_to_block maps between block hash and cached block to support finding cached blocks by their block hash. Args: num_gpu_blocks: The number of blocks in the pool. enable_caching: Whether to enable prefix caching. hash_block_size: The block size of which the block hashes are computed. The actual block size usually equals hash_block_size, but in cases where different KV cache groups have different block sizes, the actual block size can be a multiple of hash_block_size. enable_kv_cache_events: Whether to enable kv cache events. metrics_collector: Optional metrics collector for tracking block residency. """ def __init__( self, num_gpu_blocks: int, enable_caching: bool, hash_block_size: int, enable_kv_cache_events: bool = False, metrics_collector: KVCacheMetricsCollector | None = None, ): assert isinstance(num_gpu_blocks, int) and num_gpu_blocks > 0 self.num_gpu_blocks = num_gpu_blocks self.enable_caching = enable_caching self.hash_block_size = hash_block_size # All kv-cache blocks. self.blocks: list[KVCacheBlock] = [ KVCacheBlock(idx) for idx in range(num_gpu_blocks) ] # Free block queue that constructs and manipulates a doubly linked # list of free blocks (including eviction candidates when caching is # enabled). self.free_block_queue = FreeKVCacheBlockQueue(self.blocks) # Cache for block lookup self.cached_block_hash_to_block: BlockHashToBlockMap = BlockHashToBlockMap() # To represent a placeholder block with block_id=0. # The ref_cnt of null_block is not maintained, needs special care to # avoid freeing it. self.null_block = self.free_block_queue.popleft() self.null_block.is_null = True self.enable_kv_cache_events = enable_kv_cache_events self.kv_event_queue: list[KVCacheEvent] = [] self.metrics_collector = metrics_collector def get_cached_block( self, block_hash: BlockHash, kv_cache_group_ids: list[int] ) -> list[KVCacheBlock] | None: """Get the cached block by the block hash for each group in `kv_cache_group_ids`, or None if cache miss for any group. If there are duplicated blocks, we return the first block in the cache. Args: block_hash: The hash value of the block. kv_cache_group_ids: The ids of the KV cache groups. Returns: The cached blocks if exists, or None. """ cached_blocks = [] for group_id in kv_cache_group_ids: block_hash_with_group_id = make_block_hash_with_group_id( block_hash, group_id ) block = self.cached_block_hash_to_block.get_one_block( block_hash_with_group_id ) if not block: return None cached_blocks.append(block) return cached_blocks def cache_full_blocks( self, request: Request, blocks: list[KVCacheBlock], num_cached_blocks: int, num_full_blocks: int, block_size: int, kv_cache_group_id: int, ) -> None: """Cache a list of full blocks for prefix caching. This function takes a list of blocks that will have their block hash metadata to be updated and cached. Given a request, it updates the metadata for each block and caching it in the `cached_block_hash_to_block`. The block hashes values are computed by the Request object immediately when it is created and when new tokens are appended. Args: request: The request to cache the blocks. blocks: All blocks in the request. num_cached_blocks: The number of blocks that are already cached. num_full_blocks: The number of blocks that are full and should be cached after this function. block_size: Number of tokens in each block. kv_cache_group_id: The id of the KV cache group. """ if num_cached_blocks >= num_full_blocks: return new_full_blocks = blocks[num_cached_blocks:num_full_blocks] assert len(request.block_hashes) >= num_full_blocks if block_size == self.hash_block_size: # Common case. block_hashes: BlockHashList = request.block_hashes else: # block_size is a multiple of hash_block_size. This happens when # different KV cache groups have different block sizes. assert block_size % self.hash_block_size == 0 # Recalculate block_hashes at the granularity of block_size, using # the original block_hashes (at the granularity of hash_block_size). block_hashes = BlockHashListWithBlockSize( request.block_hashes, self.hash_block_size, block_size ) new_block_hashes = block_hashes[num_cached_blocks:] new_hashes: list[ExternalBlockHash] | None = ( [] if self.enable_kv_cache_events else None ) for i, blk in enumerate(new_full_blocks): # Some blocks may be null blocks when enabling sparse attention like # sliding window attention. We skip null blocks here. if blk.is_null: continue assert blk.block_hash is None block_hash = new_block_hashes[i] # Update and added the full block to the cache. block_hash_with_group_id = make_block_hash_with_group_id( block_hash, kv_cache_group_id ) blk.block_hash = block_hash_with_group_id self.cached_block_hash_to_block.insert(block_hash_with_group_id, blk) if new_hashes is not None: new_hashes.append(maybe_convert_block_hash(block_hash)) if self.enable_kv_cache_events: if num_cached_blocks == 0: parent_block_hash: ExternalBlockHash | None = None else: parent_block_hash = maybe_convert_block_hash( block_hashes[num_cached_blocks - 1] ) self.kv_event_queue.append( BlockStored( block_hashes=new_hashes, parent_block_hash=parent_block_hash, token_ids=request.all_token_ids[ num_cached_blocks * block_size : num_full_blocks * block_size ], block_size=block_size, lora_id=request.lora_request.adapter_id if request.lora_request else None, medium=MEDIUM_GPU, lora_name=request.lora_request.name if request.lora_request else None, ) ) def get_new_blocks(self, num_blocks: int) -> list[KVCacheBlock]: """Get new blocks from the free block pool. Note that we do not check block cache in this function. Args: num_blocks: The number of blocks to allocate. Returns: A list of new block. """ if num_blocks > self.get_num_free_blocks(): raise ValueError(f"Cannot get {num_blocks} free blocks from the pool") ret: list[KVCacheBlock] = self.free_block_queue.popleft_n(num_blocks) # In order to only iterate the list once, we duplicated code a bit if self.enable_caching: for block in ret: self._maybe_evict_cached_block(block) assert block.ref_cnt == 0 block.ref_cnt += 1 if self.metrics_collector: self.metrics_collector.on_block_allocated(block) else: for block in ret: assert block.ref_cnt == 0 block.ref_cnt += 1 if self.metrics_collector: self.metrics_collector.on_block_allocated(block) return ret def _maybe_evict_cached_block(self, block: KVCacheBlock) -> bool: """ If a block is cached in `cached_block_hash_to_block`, we reset its hash metadata and evict it from the cache. Args: block: The block to evict. Returns: True if the block is evicted, False otherwise. """ # Clean up metrics tracking first to prevent leaks if self.metrics_collector: self.metrics_collector.on_block_evicted(block) block_hash = block.block_hash if block_hash is None: # The block doesn't have hash, eviction is not needed return False if self.cached_block_hash_to_block.pop(block_hash, block.block_id) is None: # block not found in cached_block_hash_to_block, # eviction is not needed return False block.reset_hash() if self.enable_kv_cache_events: # FIXME (Chen): Not sure whether we should return `hash_value` # or `(hash_value, group_id)` here. But it's fine now because # we disable hybrid kv cache manager when kv cache event is # enabled, so there is only one group. self.kv_event_queue.append( BlockRemoved( block_hashes=[maybe_convert_block_hash(get_block_hash(block_hash))], medium=MEDIUM_GPU, ) ) return True def touch(self, blocks: Sequence[KVCacheBlock]) -> None: """Touch a block increases its reference count by 1, and may remove the block from the free queue. This is used when a block is hit by another request with the same prefix. Args: blocks: A list of blocks to touch. """ for block in blocks: # ref_cnt=0 means this block is in the free list (i.e. eviction # candidate), so remove it. if block.ref_cnt == 0 and not block.is_null: self.free_block_queue.remove(block) block.ref_cnt += 1 if self.metrics_collector: self.metrics_collector.on_block_accessed(block) def free_blocks(self, ordered_blocks: Iterable[KVCacheBlock]) -> None: """Free a list of blocks. The blocks should be ordered by their eviction priority, where the first block will be evicted first. Args: ordered_blocks: A list of blocks to free ordered by their eviction priority. """ # Materialize the iterable to allow multiple passes. blocks_list = list(ordered_blocks) for block in blocks_list: block.ref_cnt -= 1 self.free_block_queue.append_n( [block for block in blocks_list if block.ref_cnt == 0 and not block.is_null] ) def evict_blocks(self, block_ids: set[int]) -> None: """evict blocks from the prefix cache by their block IDs. only evicts blocks that are currently cached (have a hash). blocks with ref_cnt > 0 are not freed from the block pool, only evicted from the prefix cache hash table. Args: block_ids: Set of block IDs to evict from cache. """ for block_id in block_ids: assert block_id < len(self.blocks), ( f"Invalid block_id {block_id} >= {len(self.blocks)}. " f"This indicates a bug in the KV connector - workers should " f"only report block IDs that were allocated by the scheduler." ) block = self.blocks[block_id] self._maybe_evict_cached_block(block) def reset_prefix_cache(self) -> bool: """Reset prefix cache. This function may be used in RLHF flows to invalid prefix caching after the weights are updated, or used for resetting prefix caching status for benchmarking. Returns: bool: True if the prefix cache is successfully reset, False otherwise. """ num_used_blocks = self.num_gpu_blocks - self.get_num_free_blocks() if num_used_blocks != 1: # The null block is always marked as used logger.warning( "Failed to reset prefix cache because some " "blocks (%d) are not freed yet", num_used_blocks - 1, ) return False # Remove all hashes so that no new blocks will hit. self.cached_block_hash_to_block = BlockHashToBlockMap() # Remove all hashes from all blocks. for block in self.blocks: block.reset_hash() if self.metrics_collector: self.metrics_collector.reset() logger.info("Successfully reset prefix cache") if self.enable_kv_cache_events: self.kv_event_queue.append(AllBlocksCleared()) return True def get_num_free_blocks(self) -> int: """Get the number of free blocks in the pool. Returns: The number of free blocks. """ return self.free_block_queue.num_free_blocks def get_usage(self) -> float: """Get the KV cache usage. Returns: The KV cache usage (between 0.0 and 1.0). """ # Subtract 1 to account for null block. total_gpu_blocks = self.num_gpu_blocks - 1 if not total_gpu_blocks: return 0 return 1.0 - (self.get_num_free_blocks() / total_gpu_blocks) def take_events(self) -> list[KVCacheEvent]: """Atomically takes all events and clears the queue. Returns: A list of KV cache events. """ if not self.enable_kv_cache_events: return [] events = self.kv_event_queue self.kv_event_queue = [] return events
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/kv_cache_metrics.py
vllm/v1/core/kv_cache_metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """KV cache metrics tracking.""" import random import time from collections import deque from typing import TYPE_CHECKING if TYPE_CHECKING: from vllm.v1.core.kv_cache_utils import KVCacheBlock from vllm.v1.metrics.stats import KVCacheEvictionEvent class BlockMetricsState: """Tracks lifecycle metrics for a single KV cache block.""" def __init__(self): now_ns = time.monotonic_ns() self.birth_time_ns = now_ns self.last_access_ns = now_ns # Bounded to prevent unbounded growth if a block is accessed many times. self.access_history: deque[int] = deque(maxlen=4) def record_access(self) -> None: now_ns = time.monotonic_ns() self.last_access_ns = now_ns self.access_history.append(now_ns) def get_lifetime_seconds(self) -> float: now_ns = time.monotonic_ns() return (now_ns - self.birth_time_ns) / 1e9 def get_idle_time_seconds(self) -> float: now_ns = time.monotonic_ns() return (now_ns - self.last_access_ns) / 1e9 def get_reuse_gaps_seconds(self) -> list[float]: if len(self.access_history) < 2: return [] history = list(self.access_history) return [(history[i] - history[i - 1]) / 1e9 for i in range(1, len(history))] class KVCacheMetricsCollector: """Collects KV cache residency metrics with sampling.""" def __init__(self, sample_rate: float = 0.01): assert 0 < sample_rate <= 1.0, ( f"sample_rate must be in (0, 1.0], got {sample_rate}" ) self.sample_rate = sample_rate self.block_metrics: dict[int, BlockMetricsState] = {} self._eviction_events: list[KVCacheEvictionEvent] = [] def should_sample_block(self) -> bool: return random.random() < self.sample_rate def on_block_allocated(self, block: "KVCacheBlock") -> None: if self.should_sample_block(): self.block_metrics[block.block_id] = BlockMetricsState() def on_block_accessed(self, block: "KVCacheBlock") -> None: metrics = self.block_metrics.get(block.block_id) if metrics: metrics.record_access() def on_block_evicted(self, block: "KVCacheBlock") -> None: metrics = self.block_metrics.pop(block.block_id, None) if not metrics: return lifetime = metrics.get_lifetime_seconds() idle_time = metrics.get_idle_time_seconds() reuse_gaps = tuple(metrics.get_reuse_gaps_seconds()) self._eviction_events.append( KVCacheEvictionEvent( lifetime_seconds=lifetime, idle_seconds=idle_time, reuse_gaps_seconds=reuse_gaps, ) ) def reset(self) -> None: """Clear all state on cache reset.""" self.block_metrics.clear() self._eviction_events.clear() def drain_events(self) -> list[KVCacheEvictionEvent]: events = self._eviction_events self._eviction_events = [] return events
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/single_type_kv_cache_manager.py
vllm/v1/core/single_type_kv_cache_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import Sequence from vllm.utils.math_utils import cdiv from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_utils import BlockHashList, KVCacheBlock from vllm.v1.kv_cache_interface import ( ChunkedLocalAttentionSpec, CrossAttentionSpec, FullAttentionSpec, KVCacheSpec, MambaSpec, MLAAttentionSpec, SinkFullAttentionSpec, SlidingWindowSpec, ) from vllm.v1.request import Request class SingleTypeKVCacheManager(ABC): """ An abstract base class for a manager that handle the kv cache management logic of one specific type of attention layer. """ def __init__( self, kv_cache_spec: KVCacheSpec, block_pool: BlockPool, enable_caching: bool, kv_cache_group_id: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> None: """ Initializes the SingleTypeKVCacheManager. Args: kv_cache_spec: The kv_cache_spec for this manager. block_pool: The block pool. kv_cache_group_id: The id of the kv cache group of this manager. """ self.block_size = kv_cache_spec.block_size self.dcp_world_size = dcp_world_size self.pcp_world_size = pcp_world_size if dcp_world_size * pcp_world_size > 1: self.block_size *= dcp_world_size * pcp_world_size self.kv_cache_spec = kv_cache_spec self.block_pool = block_pool self.enable_caching = enable_caching # Mapping from request ID to blocks to track the blocks allocated # for each request, so that we can free the blocks when the request # is finished. self.req_to_blocks: defaultdict[str, list[KVCacheBlock]] = defaultdict(list) # {req_id: The number of cached blocks for this given request} # This is used to track the number of cached blocks for each request. # This is only used to track the RUNNING requests, we do not track the # data for preempted ones. self.num_cached_block: dict[str, int] = {} self.kv_cache_group_id = kv_cache_group_id self._null_block = block_pool.null_block def get_num_blocks_to_allocate( self, request_id: str, num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, ) -> int: """ Get the number of blocks needed to be allocated for the request. Args: request_id: The request ID. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). new_computed_blocks: The new computed blocks just hitting the prefix caching. total_computed_tokens: Include both local and external computed tokens. Returns: The number of blocks to allocate. """ num_required_blocks = cdiv(num_tokens, self.block_size) num_req_blocks = len(self.req_to_blocks.get(request_id, ())) if request_id in self.num_cached_block: # Fast-path: a running request won't have any new prefix-cache hits. assert len(new_computed_blocks) == 0 # NOTE: With speculative decoding, request's blocks may be allocated # for draft tokens which are later rejected. In this case, # num_required_blocks may be smaller than num_req_blocks. return max(num_required_blocks - num_req_blocks, 0) num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) num_local_computed_blocks = len(new_computed_blocks) + num_req_blocks # Number of whole blocks that are skipped by the attention window. # If nothing is skipped, this is 0. num_skipped_blocks = num_skipped_tokens // self.block_size # We need blocks for the non-skipped suffix. If there are still # local-computed blocks inside the window, they contribute to the # required capacity; otherwise, skipped blocks dominate. num_new_blocks = max( num_required_blocks - max(num_skipped_blocks, num_local_computed_blocks), 0, ) # Among the `new_computed_blocks`, the first `num_skipped_blocks` worth # of blocks are skipped; `num_req_blocks` of those may already be in # `req_to_blocks`, so only skip the remainder from `new_computed_blocks`. num_skipped_new_computed_blocks = max(0, num_skipped_blocks - num_req_blocks) # If a computed block is an eviction candidate (in the free queue and # ref_cnt == 0), it will be removed from the free queue when touched by # the allocated request, so we must count it in the free-capacity check. num_evictable_blocks = sum( blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks[num_skipped_new_computed_blocks:] ) return num_new_blocks + num_evictable_blocks def allocate_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock], num_local_computed_tokens: int, num_external_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. This involves three steps: 1. Touch the computed blocks to make sure they won't be evicted. 1.5. (Optional) For sliding window, skip blocks are padded with null blocks. 2. Add the remaining computed blocks. 3. (Optional) For KV connectors, allocate new blocks for external computed tokens (if any). Args: request_id: The request ID. new_computed_blocks: The new computed blocks just hitting the prefix cache. num_local_computed_tokens: The number of local computed tokens. num_external_computed_tokens: The number of external computed tokens. """ if request_id in self.num_cached_block: # Fast-path: a running request won't have any new prefix-cache hits. # It should not have any new computed blocks. assert len(new_computed_blocks) == 0 return # A new request. req_blocks = self.req_to_blocks[request_id] assert len(req_blocks) == 0 num_total_computed_tokens = ( num_local_computed_tokens + num_external_computed_tokens ) num_skipped_tokens = self.get_num_skipped_tokens(num_total_computed_tokens) num_skipped_blocks = num_skipped_tokens // self.block_size if num_skipped_blocks > 0: # It is possible that all new computed blocks are skipped when # num_skipped_blocks > len(new_computed_blocks). new_computed_blocks = new_computed_blocks[num_skipped_blocks:] # Some external computed tokens may be skipped too. num_external_computed_tokens = min( num_total_computed_tokens - num_skipped_tokens, num_external_computed_tokens, ) # Touch the computed blocks to make sure they won't be evicted. if self.enable_caching: self.block_pool.touch(new_computed_blocks) else: assert not any(new_computed_blocks), ( "Computed blocks should be empty when prefix caching is disabled" ) # Skip blocks are padded with null blocks. req_blocks.extend([self._null_block] * num_skipped_blocks) # Add the remaining computed blocks. req_blocks.extend(new_computed_blocks) # All cached hits (including skipped nulls) are already cached; mark # them so cache_blocks() will not try to re-cache blocks that already # have a block_hash set. self.num_cached_block[request_id] = len(req_blocks) if num_external_computed_tokens > 0: # Allocate new blocks for external computed tokens. allocated_blocks = self.block_pool.get_new_blocks( cdiv(num_total_computed_tokens, self.block_size) - len(req_blocks) ) req_blocks.extend(allocated_blocks) def allocate_new_blocks( self, request_id: str, num_tokens: int ) -> list[KVCacheBlock]: """ Allocate new blocks for the request to give it at least `num_tokens` token slots. Args: request_id: The request ID. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). Returns: The new allocated blocks. """ req_blocks = self.req_to_blocks[request_id] num_required_blocks = cdiv(num_tokens, self.block_size) num_new_blocks = num_required_blocks - len(req_blocks) if num_new_blocks <= 0: return [] else: new_blocks = self.block_pool.get_new_blocks(num_new_blocks) req_blocks.extend(new_blocks) return new_blocks def cache_blocks(self, request: Request, num_tokens: int) -> None: """ Cache the blocks for the request. Args: request: The request. num_tokens: The total number of tokens that need to be cached (including tokens that are already cached). """ num_cached_blocks = self.num_cached_block.get(request.request_id, 0) num_full_blocks = num_tokens // self.block_size if num_cached_blocks >= num_full_blocks: return self.block_pool.cache_full_blocks( request=request, blocks=self.req_to_blocks[request.request_id], num_cached_blocks=num_cached_blocks, num_full_blocks=num_full_blocks, block_size=self.block_size, kv_cache_group_id=self.kv_cache_group_id, ) self.num_cached_block[request.request_id] = num_full_blocks def free(self, request_id: str) -> None: """ Free the blocks for the request. Args: request_id: The request ID. """ # Default to [] in case a request is freed (aborted) before alloc. req_blocks = self.req_to_blocks.pop(request_id, []) # Free blocks in reverse order so that the tail blocks are # freed first. ordered_blocks = reversed(req_blocks) self.block_pool.free_blocks(ordered_blocks) self.num_cached_block.pop(request_id, None) @abstractmethod def get_num_common_prefix_blocks(self, running_request_id: str) -> int: """ Get the number of common prefix blocks for all requests with allocated KV cache. Args: running_request_id: The request ID. Returns: The number of common prefix blocks for all requests with allocated KV cache. """ raise NotImplementedError @classmethod @abstractmethod def find_longest_cache_hit( cls, block_hashes: BlockHashList, max_length: int, kv_cache_group_ids: list[int], block_pool: BlockPool, kv_cache_spec: KVCacheSpec, use_eagle: bool, alignment_tokens: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> tuple[list[KVCacheBlock], ...]: """ Get the longest cache hit prefix of the blocks that is not longer than `max_length`. The prefix should be a common prefix hit for all the kv cache groups in `kv_cache_group_ids`. If no cache hit is found, return an empty list. If eagle is enabled, drop the last matched block to force recompute the last block to get the required hidden states for eagle drafting head. Need to be customized for each attention type. Args: block_hashes: The block hashes of the request. max_length: The maximum length of the cache hit prefix. kv_cache_group_ids: The ids of the kv cache groups. block_pool: The block pool. kv_cache_spec: The kv cache spec. use_eagle: Whether to use eagle. alignment_tokens: The returned cache hit length (in tokens) should be a multiple of this value (in tokens). By default, it should be set to the block_size. dcp_world_size: The world size of decode context parallelism. pcp_world_size: The world size of prefill context parallelism. Returns: A list of cached blocks with skipped blocks replaced by null block for each kv cache group in `kv_cache_group_ids`. Return a list of length `len(kv_cache_group_ids)`, where the i-th element is a list of cached blocks for the i-th kv cache group in `kv_cache_group_ids`. For example, sliding window manager should return a list like ([NULL, NULL, KVCacheBlock(7), KVCacheBlock(8)]) for block size 4 and sliding window 8 and len(kv_cache_group_ids) = 1. """ raise NotImplementedError def remove_skipped_blocks( self, request_id: str, total_computed_tokens: int ) -> None: """ Remove and free the blocks that are no longer needed for attention computation. The removed blocks should be replaced by null_block. This function depends on `get_num_skipped_tokens`, which need to be implemented differently for each attention type. Args: request_id: The request ID. total_computed_tokens: The total number of computed tokens, including local computed tokens and external computed tokens. """ # Remove the blocks that will be skipped during attention computation. num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) if num_skipped_tokens <= 0: # This indicates that ALL tokens are inside attention window. # Thus we do not need to free any blocks outside attention window. # A typical case is full attention that we never free any token # before the request is finished. return blocks = self.req_to_blocks[request_id] num_skipped_blocks = num_skipped_tokens // self.block_size # `num_skipped_tokens` may include tokens that haven't been allocated yet # (e.g., when the attention window moves into the external computed tokens # range), so we must cap to the number of blocks that currently exist for # this request. num_skipped_blocks = min(num_skipped_blocks, len(blocks)) removed_blocks: list[KVCacheBlock] = [] # Because the block starts from index 0, the num_skipped_block-th block # corresponds to index num_skipped_blocks - 1. for i in range(num_skipped_blocks - 1, -1, -1): if blocks[i] == self._null_block: # If the block is already a null block, the blocks before it # should also have been set to null blocks by the previous calls # to this function. break removed_blocks.append(blocks[i]) blocks[i] = self._null_block self.block_pool.free_blocks(removed_blocks) def get_num_skipped_tokens(self, num_computed_tokens: int) -> int: """ Get the number of tokens that will be skipped for attention computation. Args: num_computed_tokens: The number of tokens that have been computed. Returns: The number of tokens that will be skipped for attention computation. """ # The default behavior is to not skip any tokens. return 0 class FullAttentionManager(SingleTypeKVCacheManager): @classmethod def find_longest_cache_hit( cls, block_hashes: BlockHashList, max_length: int, kv_cache_group_ids: list[int], block_pool: BlockPool, kv_cache_spec: KVCacheSpec, use_eagle: bool, alignment_tokens: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> tuple[list[KVCacheBlock], ...]: assert isinstance( kv_cache_spec, FullAttentionSpec | ChunkedLocalAttentionSpec ), ( "FullAttentionManager can only be used for full attention " "and chunked local attention groups" ) computed_blocks: tuple[list[KVCacheBlock], ...] = tuple( [] for _ in range(len(kv_cache_group_ids)) ) block_size = kv_cache_spec.block_size if dcp_world_size * pcp_world_size > 1: block_size *= dcp_world_size * pcp_world_size max_num_blocks = max_length // block_size for block_hash in itertools.islice(block_hashes, max_num_blocks): # block_hashes is a chain of block hashes. If a block hash is not # in the cached_block_hash_to_id, the following block hashes are # not computed yet for sure. if cached_block := block_pool.get_cached_block( block_hash, kv_cache_group_ids ): for computed, cached in zip(computed_blocks, cached_block): computed.append(cached) else: break if use_eagle and computed_blocks[0]: # Need to drop the last matched block if eagle is enabled. for computed in computed_blocks: computed.pop() while ( block_size != alignment_tokens # Faster for common case. and len(computed_blocks[0]) * block_size % alignment_tokens != 0 ): for computed in computed_blocks: computed.pop() return computed_blocks def get_num_common_prefix_blocks(self, running_request_id: str) -> int: blocks = self.req_to_blocks[running_request_id] num_common_blocks = 0 for block in blocks: if block.ref_cnt == len(self.req_to_blocks): num_common_blocks += 1 else: break return num_common_blocks class SlidingWindowManager(SingleTypeKVCacheManager): def __init__( self, kv_cache_spec: SlidingWindowSpec, block_pool: BlockPool, **kwargs ) -> None: super().__init__(kv_cache_spec, block_pool, **kwargs) self.sliding_window = kv_cache_spec.sliding_window self._null_block = block_pool.null_block @classmethod def find_longest_cache_hit( cls, block_hashes: BlockHashList, max_length: int, kv_cache_group_ids: list[int], block_pool: BlockPool, kv_cache_spec: KVCacheSpec, use_eagle: bool, alignment_tokens: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> tuple[list[KVCacheBlock], ...]: assert isinstance(kv_cache_spec, SlidingWindowSpec), ( "SlidingWindowManager can only be used for sliding window groups" ) assert dcp_world_size == 1, "DCP not support sliding window attn now." assert pcp_world_size == 1, "PCP not support sliding window attn now." # The number of contiguous blocks needed for prefix cache hit. # -1 since the input token itself is also included in the window sliding_window_contiguous_blocks = cdiv( kv_cache_spec.sliding_window - 1, kv_cache_spec.block_size ) if use_eagle: # Need to drop the last matched block if eagle is enabled. For # sliding window layer, we achieve this by increasing the number of # contiguous blocks needed for prefix cache hit by one and dropping # the last matched block. sliding_window_contiguous_blocks += 1 # TODO: reduce i by sliding_window_contiguous_blocks when cache miss, to # optimize the time complexity from O(max_num_blocks) to # O(max_num_blocks / sliding_window_contiguous_blocks + # sliding_window_contiguous_blocks), # which is good for low cache hit rate scenarios. max_num_blocks = max_length // kv_cache_spec.block_size computed_blocks = tuple( [block_pool.null_block] * max_num_blocks for _ in range(len(kv_cache_group_ids)) ) block_size = kv_cache_spec.block_size num_contiguous_blocks = 0 match_found = False # Search from right to left and early stop when a match is found. for i in range(max_num_blocks - 1, -1, -1): if cached_block := block_pool.get_cached_block( block_hashes[i], kv_cache_group_ids ): # Skip prefix matching check if the block is not aligned with # `alignment_tokens`. if ( num_contiguous_blocks == 0 and block_size != alignment_tokens # Faster for common case. and (i + 1) * block_size % alignment_tokens != 0 ): continue # Add the cached block to the computed blocks. for computed, cached in zip(computed_blocks, cached_block): computed[i] = cached num_contiguous_blocks += 1 if num_contiguous_blocks >= sliding_window_contiguous_blocks: # Trim the trailing blocks. # E.g., [NULL, NULL, 8, 3, NULL, 9] -> [NULL, NULL, 8, 3] # when sliding_window_contiguous_blocks=2. for computed in computed_blocks: del computed[i + num_contiguous_blocks :] match_found = True break else: num_contiguous_blocks = 0 if not match_found: # The first `num_contiguous_blocks` is a cache hit even if # `num_contiguous_blocks < sliding_window_contiguous_blocks`. for computed in computed_blocks: del computed[num_contiguous_blocks:] while ( block_size != alignment_tokens # Faster for common case. and len(computed_blocks[0]) * block_size % alignment_tokens != 0 ): for computed in computed_blocks: computed.pop() if use_eagle and computed_blocks[0]: assert kv_cache_spec.block_size == alignment_tokens, ( "aligned_length is not compatible with eagle now" ) for computed in computed_blocks: computed.pop() return computed_blocks def get_num_skipped_tokens(self, num_computed_tokens: int) -> int: """ Get the number of tokens that will be skipped for attention computation. For sliding window, this corresponds to the tokens that are prior to the current sliding window. Example: sliding_window=4, num_computed_tokens=7 Tokens: [ 0 1 2 3 4 5 6 7 ] | ---- computed -----| ^ next token to be computed |-----------| sliding window for next token |--skipped---| The current window contains tokens 4~7. Tokens 0~3 will be skipped for attention computation since they are outside the sliding window. Thus, get_num_skipped_tokens(7) == 4. Args: num_computed_tokens: The number of tokens that have been computed. Returns: The number of tokens that will be skipped for attention computation. """ return max(0, num_computed_tokens - self.sliding_window + 1) def get_num_common_prefix_blocks(self, running_request_id: str) -> int: """ NOTE(Chen): The prefix blocks are null blocks for sliding window layers. So it's not correct to count ref_cnt like FullAttentionManager. Return 0 here for correctness. Need to support cascade attention + sliding window in the future. """ return 0 class ChunkedLocalAttentionManager(SingleTypeKVCacheManager): def __init__( self, kv_cache_spec: ChunkedLocalAttentionSpec, block_pool: BlockPool, **kwargs ) -> None: super().__init__(kv_cache_spec, block_pool, **kwargs) self.attention_chunk_size = kv_cache_spec.attention_chunk_size self._null_block = block_pool.null_block @classmethod def find_longest_cache_hit( cls, block_hashes: BlockHashList, max_length: int, kv_cache_group_ids: list[int], block_pool: BlockPool, kv_cache_spec: KVCacheSpec, use_eagle: bool, alignment_tokens: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> tuple[list[KVCacheBlock], ...]: """ For chunked local attention, we need to find the longest cache hit prefix of the blocks that is not longer than `max_length`. The prefix should be a common prefix hit for all the kv cache groups in `kv_cache_group_ids`. If no cache hit is found, return an empty list. note we mark as computed if the whole block is outside of the local window, and set the block as null. Examples: 1. Attention chunk size of 8, block size of 4, max length of 15 for next token at 15th (zero-indexed), 8th - 14th tokens are in the window(needs lookup), 0th - 7th are not in the window, so they are already marked as computed. We check the complete block3 (8th - 11th tokens), Assume block 3 is hit, we will return [null, null, block 3], otherwise, we return [null, null] 2. Attention chunk size of 8, block size of 4, max length of 16 for next token at 16th (zero-indexed), 0th - 15th tokens are not in the window, so they are already marked as computed. we return 4 blocks[null, null, null, null] Args: block_hashes: The block hashes of the request. max_length: The maximum length of the cache hit prefix. kv_cache_group_ids: The ids of the kv cache groups. block_pool: The block pool. kv_cache_spec: The kv cache spec. use_eagle: Whether to use eagle. dcp_world_size: The world size of decode context parallelism. pcp_world_size: The world size of prefill context parallelism. alignment_tokens: The returned cache hit length (in tokens) should be a multiple of this value (in tokens). Returns: A list of cached blocks """ assert isinstance(kv_cache_spec, ChunkedLocalAttentionSpec), ( "ChunkedLocalAttentionManager can only be used for " + "chunked local attention groups" ) assert use_eagle is False, ( "Hybrid KV cache is not supported for " + "eagle + chunked local attention." ) assert dcp_world_size == 1, "DCP not support chunked local attn now." assert pcp_world_size == 1, "PCP not support chunked local attn now." assert kv_cache_spec.block_size == alignment_tokens, ( "KV cache groups with different block sizes are not compatible with " "chunked local attention now" ) max_num_blocks = max_length // kv_cache_spec.block_size if max_length > 0: local_attention_start_idx = ( max_length // kv_cache_spec.attention_chunk_size * kv_cache_spec.attention_chunk_size ) else: local_attention_start_idx = 0 # we marked blocks out of window as computed # with null blocks, and blocks inside window based on cache lookup # result [null] [null] ... [null] [hit block 1 (1st block contain # last window)] [hit block 2] ... [hit block x] local_attention_start_block_idx = ( local_attention_start_idx // kv_cache_spec.block_size ) computed_blocks: tuple[list[KVCacheBlock], ...] = tuple( [block_pool.null_block] * local_attention_start_block_idx for _ in range(len(kv_cache_group_ids)) ) for i in range(local_attention_start_block_idx, max_num_blocks): block_hash = block_hashes[i] if cached_block := block_pool.get_cached_block( block_hash, kv_cache_group_ids ): for computed, cached in zip(computed_blocks, cached_block): computed.append(cached) else: break return computed_blocks def get_num_skipped_tokens(self, num_computed_tokens: int) -> int: """ Get the number of tokens that will be skipped for attention computation. For chunked local attention, this corresponds to the tokens that are on the left side of the current chunk. Example 1: chunk size = 8, num_computed_tokens = 13 Tokens: [ 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 ] ... | ----- computed ---------------| ^^ next token to be computed |----------------| <-- attention window for next token |--- skipped -----| Output: get_num_skipped_tokens(13) == 8 Example 2: chunk size = 8, num_computed_tokens = 8 Tokens: [ 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 ] ... | --- computed ---| ^ next token to be computed |--| <-- attention window for next token | --- skipped ----| Output: get_num_skipped_tokens(8) == 8 Example 3: chunk size = 8, num_computed_tokens = 7 Tokens: [ 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 ] ... |---computed---| ^ next token to be computed |-----------------| <-- attention window for next token no token should be skipped. Output: get_num_skipped_tokens(7) == 0 Args: num_computed_tokens: The number of tokens that have been computed. Returns: The number of tokens that will be skipped for attention computation. """ num_skipped_tokens = ( num_computed_tokens // self.attention_chunk_size ) * self.attention_chunk_size return num_skipped_tokens def get_num_common_prefix_blocks(self, running_request_id: str) -> int: """ cascade attention is not supported by chunked local attention. """ return 0 class MambaManager(SingleTypeKVCacheManager): @classmethod def find_longest_cache_hit( cls, block_hashes: BlockHashList, max_length: int, kv_cache_group_ids: list[int], block_pool: BlockPool, kv_cache_spec: KVCacheSpec, use_eagle: bool, alignment_tokens: int, dcp_world_size: int = 1, pcp_world_size: int = 1, ) -> tuple[list[KVCacheBlock], ...]: assert isinstance(kv_cache_spec, MambaSpec), ( "MambaManager can only be used for mamba groups" ) assert dcp_world_size == 1, "DCP not support mamba now." assert pcp_world_size == 1, "PCP not support mamba now." computed_blocks: tuple[list[KVCacheBlock], ...] = tuple( [] for _ in range(len(kv_cache_group_ids)) ) block_size = kv_cache_spec.block_size max_num_blocks = max_length // block_size # Search from right to left and early stop when a match is found. for i in range(max_num_blocks - 1, -1, -1): if cached_block := block_pool.get_cached_block( block_hashes[i], kv_cache_group_ids ): # When enable Mamba prefix caching, `block_size` will be aligned # across full attention layers and Mamba layers to ensure the # prefix hit length aligned at block if ( block_size != alignment_tokens # Faster for common case. and (i + 1) * block_size % alignment_tokens != 0 ): continue
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/encoder_cache_manager.py
vllm/v1/core/encoder_cache_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections import OrderedDict from collections.abc import Mapping from typing import TYPE_CHECKING from vllm.logger import init_logger from vllm.multimodal import MultiModalRegistry from vllm.v1.request import Request if TYPE_CHECKING: from vllm.config import ModelConfig, SchedulerConfig logger = init_logger(__name__) class EncoderCacheManager: """Manages caching of encoder outputs for multimodal models in vLLM V1. The EncoderCacheManager handles the lifecycle of multimodal encoder outputs (such as vision embeddings from images) during request processing. It provides memory-aware caching to avoid recomputing encoder outputs when the same multimodal inputs appear in different stages of request processing. This manager is particularly important for: - Vision-language models (e.g., LLaVA) where image encoder outputs are cached - Any multimodal model where encoder computation is expensive and cacheable The cache operates at the granularity of individual multimodal input items within requests, allowing for fine-grained memory management and enabling chunked processing of multimodal inputs. Cache is enabled to share embeddings of same multimodal data item (identified by their hash value) between different requests, and eviction takes place at allocation time when there's no free space for new embeddings. Oldest cached embeddings with no request referenced will be first evicted. NOTE: The EncoderCacheManager operates on the level of multimodal embeddings instead of encoder tokens (i.e. all tokens that represent the multimodal data in the input sequence). This means all break/text tokens in-between multimodal embeddings are not considered with respect to the cache size and the number of free slots. Args: cache_size: Limit the size of the cache, measured by the number of encoder embeddings from the input sequence. Attributes: cache_size: Total cache capacity in encoder embeddings. num_free_slots: Current available cache capacity in encoder embeddings. num_freeable_slots: Capacity that can be immediately reclaimed by evicting entries with zero references (in encoder embeddings). cached: Mapping from mm_hash to a set of request IDs that currently reference the cached entry. If the set is empty, the entry exists but is not referenced by any request and is eligible for reclamation. freeable: List of tuples (mm_hash, num_encoder_embeds) representing entries whose no current running request is needed and that can be freed to make space when needed. freed: List of mm_hash strings that were actually evicted since the last call to get_freed_mm_hashes(). This list is cleared on return. """ def __init__(self, cache_size: int): self.cache_size = cache_size self.num_free_slots = cache_size self.num_freeable_slots = cache_size # mm_hash of mm_data => ids of requests that reference the mm_data self.cached: dict[str, set[str]] = {} # mm_hash of mm_data => num_encoder_embeds of the mm_data self.freeable: OrderedDict[str, int] = OrderedDict() self.freed: list[str] = [] def check_and_update_cache(self, request: Request, input_id: int) -> bool: """Check if encoder output for a specific multimodal input is cached. If the encoder output is cached, update `cached` to add the request id to the set of request ids that reference the cached encoder output. If the encoder output was previously not referenced by any request, update `freeable` and `num_freeable_slots` accordingly. Args: request: The request containing the multimodal input input_id: Index of the multimodal input within the request Returns: True if the encoder output for this input is already cached """ mm_hash = request.mm_features[input_id].identifier # Not cached at all if mm_hash not in self.cached: return False # Cached but currently not referenced by any request if not self.cached[mm_hash]: num_encoder_embeds = self.freeable.pop(mm_hash) self.num_freeable_slots -= num_encoder_embeds self.cached[mm_hash].add(request.request_id) return True def can_allocate( self, request: Request, input_id: int, encoder_compute_budget: int, num_embeds_to_schedule: int, ) -> bool: """Check if there's sufficient cache space for a multimodal input. If there is, return True and update EncoderCacheManager state. If there is not enough free space in `num_free_slots` but there is enough reclaimable space in `num_freeable_slots`, entries will be evicted from `freeable` (their mm_hash appended to `freed`) until enough space is available, and then this method returns True. Older entries are evicted first. Returns False only if the requested number of tokens exceeds both the free and reclaimable capacities combined. Args: request: The request containing the multimodal input. input_id: Index of the multimodal input within the request. encoder_compute_budget: Number of encoder embeddings allowed to be computed when this method is invoked. num_embeds_to_schedule: Number of encoder embeddings already scheduled to be allocated with cache space when this method is invoked. Returns: True if there's enough capacity to hold the encoder output for this input (possibly after reclaiming `freeable` entries); otherwise False. Note: This method does not allocate physical memory for the encoder output but only the state of EncoderCacheManager. """ num_embeds = request.get_num_encoder_embeds(input_id) # Not enough compute budget if num_embeds > encoder_compute_budget: return False num_embeds += num_embeds_to_schedule # Enough free slots if num_embeds <= self.num_free_slots: return True # Not enough reclaimable slots if num_embeds > self.num_freeable_slots: return False # Not enough free slots but enough reclaimable slots # NOTE: Eviction takes place here, but physical memory is not freed # until model runner is notified by the scheduler output. while num_embeds > self.num_free_slots: mm_hash, num_free_embeds = self.freeable.popitem(last=False) del self.cached[mm_hash] self.freed.append(mm_hash) self.num_free_slots += num_free_embeds return True def allocate(self, request: Request, input_id: int) -> None: """Allocate cache space for a multimodal input's encoder output. This reserves cache space for storing the encoder output of the specified multimodal input. The actual encoder output storage happens in the model runner; this method updates the manager's bookkeeping. Note: This method assumes can_allocate() returned True for the same input. """ mm_hash = request.mm_features[input_id].identifier request_id = request.request_id if mm_hash not in self.cached: self.cached[mm_hash] = set() num_encoder_embeds = request.get_num_encoder_embeds(input_id) # NOTE: Encoder cache should always have enough space for encoder inputs # that are scheduled since eviction takes place at can_allocate(). assert self.num_free_slots >= num_encoder_embeds assert self.num_freeable_slots >= num_encoder_embeds self.cached[mm_hash].add(request_id) self.num_free_slots -= num_encoder_embeds self.num_freeable_slots -= num_encoder_embeds def get_cached_input_ids(self, request: Request) -> set[int]: """Get all cached multimodal input IDs for a request. Returns the set of input IDs whose `mm_hash` exists in the cache map. This includes entries that are currently unreferenced (and thus present in `freeable`); for such entries, freeing for this request will be a no-op. """ return { input_id for input_id in range(len(request.mm_features)) if request.mm_features[input_id].identifier in self.cached } def free_encoder_input(self, request: Request, input_id: int) -> None: """Free the request's reference to the encoder input (`mm_data`) When the reference set for the corresponding `mm_hash` becomes empty, the entry is appended to `freeable` and `num_freeable_slots` is increased by the number of encoder embeddings for that input. The entry is NOT physically freed until capacity is needed (e.g., by `can_allocate`). """ req_id = request.request_id mm_hash = request.mm_features[input_id].identifier # The mm_hash not in cache or the req_id set is empty if not self.cached.get(mm_hash, None): return self.cached[mm_hash].discard(req_id) if not self.cached[mm_hash]: num_encoder_embeds = request.get_num_encoder_embeds(input_id) self.freeable[mm_hash] = num_encoder_embeds self.num_freeable_slots += num_encoder_embeds def free(self, request: Request) -> None: """Free all encoder input cache reference held by *request*. For each cached input ID, `free_encoder_input` is invoked. The data stays in memory until eviction is triggered by a future attempt allocation called by 'can_allocate'. Typically called when a request is finished, cancelled, or aborted. """ input_ids = self.get_cached_input_ids(request).copy() for input_id in input_ids: self.free_encoder_input(request, input_id) def get_freed_mm_hashes(self) -> list[str]: """Get and clear the list of recently freed encoder cache entries. Returns: List of mm_hash strings that were actually evicted since the last call to be used by the scheduler to notify workers about which encoder outputs can be removed from their caches. The internal list is cleared after this call. """ freed = self.freed self.freed = [] return freed def compute_encoder_budget( model_config: "ModelConfig", scheduler_config: "SchedulerConfig", mm_registry: MultiModalRegistry, ) -> tuple[int, int]: """Compute the encoder cache budget based on the model and scheduler configurations. Returns: - Compute budget for encoder execution, measured in number of tokens from the input sequence. - Space budget for encoder cache size, measured in number of tokens from the input sequence. """ if mm_registry.supports_multimodal_inputs(model_config): max_tokens_by_modality = mm_registry.get_max_tokens_per_item_by_modality( model_config ) return compute_mm_encoder_budget( scheduler_config, max_tokens_by_modality, ) return compute_text_encoder_budget(scheduler_config) def compute_text_encoder_budget(scheduler_config: "SchedulerConfig") -> tuple[int, int]: """Compute the encoder cache budget based on the model and scheduler configurations for a text-only model. Args: scheduler_config: Scheduler configuration. Returns: - Compute budget for encoder execution, in unit of number of tokens in the input sequence. - Space budget for encoder cache size, in unit of number of tokens in the input sequence. """ # Currently text-only encoder-decoder models are not supported return 0, 0 def compute_mm_encoder_budget( scheduler_config: "SchedulerConfig", max_tokens_by_modality: Mapping[str, int], ) -> tuple[int, int]: """Compute the encoder cache budget based on the model and scheduler configurations for a multimodal model. Args: scheduler_config: Scheduler configuration. max_tokens_by_modality: The maximum number of tokens for each non-text modality. Returns: - Compute budget for encoder execution, measured in number of tokens from the input sequence. - Space budget for encoder cache size, measured in number of tokens from the input sequence. """ if not max_tokens_by_modality: logger.warning( "All non-text modalities supported by the model have been " "explicitly disabled via limit_mm_per_prompt. Encoder cache will " "not be initialized." ) return 0, 0 max_tokens_per_mm_item = max(max_tokens_by_modality.values()) if ( scheduler_config.disable_chunked_mm_input and max_tokens_per_mm_item > scheduler_config.max_num_batched_tokens ): raise ValueError( "Chunked MM input disabled but max_tokens_per_mm_item " f"({max_tokens_per_mm_item}) is larger than max_num_batched_tokens" f" ({scheduler_config.max_num_batched_tokens}). Please increase " "max_num_batched_tokens." ) encoder_compute_budget = max( scheduler_config.max_num_encoder_input_tokens, max_tokens_per_mm_item ) encoder_cache_size = max( scheduler_config.encoder_cache_size, max_tokens_per_mm_item ) return encoder_compute_budget, encoder_cache_size # NOTE (NickLucche): Temporary implementation for encoder-decoder models that only # use the manager for scheduling purposes. Encoder-decoder models will eventually # utilize the cache and this class will fold into EncoderCacheManager, as # differences with MM models shrink. class EncoderDecoderCacheManager(EncoderCacheManager): def __init__(self, cache_size: int): self.cache_size = cache_size self.num_free_slots = cache_size self.freed: list[str] = [] def check_and_update_cache(self, request: Request, input_id: int) -> bool: return False def can_allocate( self, request: Request, input_id: int, encoder_compute_budget: int, num_embeds_to_schedule: int, ) -> bool: num_encoder_embeds = request.get_num_encoder_embeds(input_id) # Not enough compute budget if num_encoder_embeds > encoder_compute_budget: return False num_encoder_embeds += num_embeds_to_schedule # Enough free slots return num_encoder_embeds <= self.num_free_slots def allocate(self, request: Request, input_id: int) -> None: num_encoder_embeds = request.get_num_encoder_embeds(input_id) self.num_free_slots -= num_encoder_embeds mm_hash = request.mm_features[input_id].identifier self.freed.append(mm_hash) def free(self, request: Request) -> None: for input_id in range(len(request.mm_features)): self.free_encoder_input(request, input_id) def get_cached_input_ids(self, request: Request) -> set[int]: return set(range(len(request.mm_features))) def get_freed_mm_hashes(self) -> list[str]: freed = self.freed self.freed = [] return freed def free_encoder_input(self, request: Request, input_id: int) -> None: num_encoder_embeds = request.get_num_encoder_embeds(input_id) self.num_free_slots += num_encoder_embeds
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/kv_cache_coordinator.py
vllm/v1/core/kv_cache_coordinator.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from collections.abc import Sequence from math import lcm from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.kv_cache_utils import ( BlockHash, BlockHashList, BlockHashListWithBlockSize, KVCacheBlock, ) from vllm.v1.core.single_type_kv_cache_manager import ( CrossAttentionManager, FullAttentionManager, get_manager_for_kv_cache_spec, ) from vllm.v1.kv_cache_interface import ( FullAttentionSpec, KVCacheConfig, KVCacheSpec, ) from vllm.v1.request import Request class KVCacheCoordinator(ABC): """ Coordinate the KV cache of different KV cache groups. """ def __init__( self, kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool, enable_caching: bool, enable_kv_cache_events: bool, dcp_world_size: int, pcp_world_size: int, hash_block_size: int, metrics_collector: KVCacheMetricsCollector | None = None, ): self.kv_cache_config = kv_cache_config self.max_model_len = max_model_len self.enable_caching = enable_caching self.block_pool = BlockPool( kv_cache_config.num_blocks, enable_caching, hash_block_size, enable_kv_cache_events, metrics_collector, ) # Needs special handling for find_longest_cache_hit if eagle is enabled self.use_eagle = use_eagle self.single_type_managers = tuple( get_manager_for_kv_cache_spec( kv_cache_spec=kv_cache_group.kv_cache_spec, block_pool=self.block_pool, enable_caching=enable_caching, kv_cache_group_id=i, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, ) for i, kv_cache_group in enumerate(self.kv_cache_config.kv_cache_groups) ) def get_num_blocks_to_allocate( self, request_id: str, num_tokens: int, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, total_computed_tokens: int, ) -> int: """ Get the number of blocks needed to be allocated for the request. Args: request_id: The request ID. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). new_computed_blocks: The new computed blocks just hitting the prefix caching. num_encoder_tokens: The number of encoder tokens for allocating blocks for cross-attention. total_computed_tokens: Include both local and external tokens. Returns: The number of blocks to allocate. """ num_blocks_to_allocate = 0 for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. num_blocks_to_allocate += manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: num_blocks_to_allocate += manager.get_num_blocks_to_allocate( request_id, num_tokens, new_computed_blocks[i], total_computed_tokens, ) return num_blocks_to_allocate def allocate_new_computed_blocks( self, request_id: str, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_local_computed_tokens: int, num_external_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. Optionally allocate new blocks for external computed tokens (if any). Args: request_id: The request ID. new_computed_blocks: The new computed blocks just hitting the prefix cache. num_local_computed_tokens: The number of local computed tokens. num_external_computed_tokens: The number of external computed tokens. """ for i, manager in enumerate(self.single_type_managers): manager.allocate_new_computed_blocks( request_id, new_computed_blocks[i], num_local_computed_tokens, num_external_computed_tokens, ) def allocate_new_blocks( self, request_id: str, num_tokens: int, num_encoder_tokens: int = 0, ) -> tuple[list[KVCacheBlock], ...]: """ Allocate new blocks for the request to give it at least `num_tokens` token slots. Args: request_id: The request ID. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). num_encoder_tokens: The number of encoder tokens for allocating blocks for cross-attention. Returns: The new allocated blocks. """ return tuple( manager.allocate_new_blocks( request_id, num_encoder_tokens if isinstance(manager, CrossAttentionManager) else num_tokens, ) for manager in self.single_type_managers ) def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: """ Cache the blocks for the request. Args: request: The request. num_computed_tokens: The total number of tokens that need to be cached (including tokens that are already cached). """ for manager in self.single_type_managers: manager.cache_blocks(request, num_computed_tokens) def free(self, request_id: str) -> None: """ Free the blocks for the request. Args: request_id: The request ID. """ for manager in self.single_type_managers: manager.free(request_id) def get_num_common_prefix_blocks(self, running_request_id: str) -> list[int]: """ Get the number of common prefix blocks for all requests with allocated KV cache for each kv cache group. Args: running_request_id: The request ID of any running request, used to identify the common prefix blocks. Returns: list[int]: The number of common prefix blocks for each kv cache group. """ return [ manager.get_num_common_prefix_blocks(running_request_id) for manager in self.single_type_managers ] def remove_skipped_blocks( self, request_id: str, total_computed_tokens: int ) -> None: """ Remove the blocks that are no longer needed from `blocks` and replace the removed blocks with null_block. Args: request_id: The request ID. total_computed_tokens: The total number of computed tokens, including local computed tokens and external computed tokens. """ for manager in self.single_type_managers: manager.remove_skipped_blocks(request_id, total_computed_tokens) def get_blocks(self, request_id: str) -> tuple[list[KVCacheBlock], ...]: """ Get the blocks for the request. """ return tuple( manager.req_to_blocks.get(request_id) or [] for manager in self.single_type_managers ) @abstractmethod def find_longest_cache_hit( self, block_hashes: list[BlockHash], max_cache_hit_length: int, ) -> tuple[tuple[list[KVCacheBlock], ...], int]: pass class KVCacheCoordinatorNoPrefixCache(KVCacheCoordinator): """ KV cache coordinator to use if prefix caching is disabled or unsupported. In contrast to UnitaryKVCacheCoordinator and HybridKVCacheCoordinator, supports arbitrary numbers of KV cache groups (including 0 groups). Does not implement any features related to prefix caching. """ def __init__( self, kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool, enable_kv_cache_events: bool, dcp_world_size: int, pcp_world_size: int, hash_block_size: int, metrics_collector: KVCacheMetricsCollector | None = None, ): super().__init__( kv_cache_config, max_model_len, use_eagle, False, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, ) self.num_single_type_manager = len(self.single_type_managers) def get_num_common_prefix_blocks(self, running_request_id: str) -> list[int]: return [0] * self.num_single_type_manager def find_longest_cache_hit( self, block_hashes: list[BlockHash], max_cache_hit_length: int, ) -> tuple[tuple[list[KVCacheBlock], ...], int]: blocks: tuple[list[KVCacheBlock], ...] = tuple( [] for _ in range(self.num_single_type_manager) ) return blocks, 0 class UnitaryKVCacheCoordinator(KVCacheCoordinator): """ KV cache coordinator for models with only one KV cache group. This is the case for models with only one KV cache type, e.g., all attention layers use full attention or all attention layers use sliding window attention. """ def __init__( self, kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool, enable_caching: bool, enable_kv_cache_events: bool, dcp_world_size: int, pcp_world_size: int, hash_block_size: int, metrics_collector: KVCacheMetricsCollector | None = None, ): super().__init__( kv_cache_config, max_model_len, use_eagle, enable_caching, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, ) self.kv_cache_spec = self.kv_cache_config.kv_cache_groups[0].kv_cache_spec self.block_size = self.kv_cache_spec.block_size self.dcp_world_size = dcp_world_size self.pcp_world_size = pcp_world_size if dcp_world_size > 1: self.block_size *= dcp_world_size if pcp_world_size > 1: self.block_size *= pcp_world_size # For models using only Mamba, block_size is set to max_model_len when # prefix caching is disabled, and hash_block_size validation is skipped. assert not enable_caching or (hash_block_size == self.block_size), ( "UnitaryKVCacheCoordinator assumes hash_block_size == block_size" ) assert len(self.kv_cache_config.kv_cache_groups) == 1, ( "UnitaryKVCacheCoordinator assumes only one kv cache group" ) def find_longest_cache_hit( self, block_hashes: list[BlockHash], max_cache_hit_length: int, ) -> tuple[tuple[list[KVCacheBlock], ...], int]: hit_blocks = self.single_type_managers[0].find_longest_cache_hit( block_hashes=block_hashes, max_length=max_cache_hit_length, kv_cache_group_ids=[0], block_pool=self.block_pool, kv_cache_spec=self.kv_cache_spec, use_eagle=self.use_eagle, alignment_tokens=self.block_size, dcp_world_size=self.dcp_world_size, pcp_world_size=self.pcp_world_size, ) return hit_blocks, len(hit_blocks[0]) * self.block_size class HybridKVCacheCoordinator(KVCacheCoordinator): """ KV cache coordinator for hybrid models with multiple KV cache types, and thus multiple kv cache groups. To simplify `find_longest_cache_hit`, it only supports the combination of two types of KV cache groups, and one of them must be full attention. May extend to more general cases in the future. """ def __init__( self, kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool, enable_caching: bool, enable_kv_cache_events: bool, dcp_world_size: int, pcp_world_size: int, hash_block_size: int, metrics_collector: KVCacheMetricsCollector | None = None, ): super().__init__( kv_cache_config, max_model_len, use_eagle, enable_caching, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, ) # hash_block_size: the block size used to compute block hashes. # The actual block size usually equals hash_block_size, but in cases where # different KV cache groups have different block sizes, the actual block size # can be a multiple of hash_block_size. self.hash_block_size = hash_block_size assert all( g.kv_cache_spec.block_size % hash_block_size == 0 for g in kv_cache_config.kv_cache_groups ), "block_size must be divisible by hash_block_size" assert dcp_world_size == 1, "DCP not support hybrid attn now." assert pcp_world_size == 1, "PCP not support hybrid attn now." self.verify_and_split_kv_cache_groups() def verify_and_split_kv_cache_groups(self) -> None: """ Verifies that the model has exactly two types of KV cache groups, and one of them is full attention. Then, split the kv cache groups into full attention groups and other groups. """ full_attention_spec: FullAttentionSpec | None = None other_spec: KVCacheSpec | None = None self.full_attention_group_ids: list[int] = [] self.other_group_ids: list[int] = [] for i, g in enumerate(self.kv_cache_config.kv_cache_groups): if isinstance(g.kv_cache_spec, FullAttentionSpec): if full_attention_spec is None: full_attention_spec = g.kv_cache_spec else: assert full_attention_spec == g.kv_cache_spec, ( "HybridKVCacheCoordinator assumes exactly one type of " "full attention groups now." ) self.full_attention_group_ids.append(i) else: if other_spec is None: other_spec = g.kv_cache_spec else: assert other_spec == g.kv_cache_spec, ( "HybridKVCacheCoordinator assumes " "exactly one other type of groups now." ) self.other_group_ids.append(i) assert full_attention_spec is not None, ( "HybridKVCacheCoordinator assumes exactly one type of full " "attention groups now." ) assert other_spec is not None, ( "HybridKVCacheCoordinator assumes exactly one type of other groups now." ) self.full_attention_manager_cls = FullAttentionManager self.other_attention_cls = self.single_type_managers[ self.other_group_ids[0] ].__class__ self.full_attention_spec = full_attention_spec self.other_spec = other_spec self.full_attention_block_size = self.full_attention_spec.block_size self.other_block_size = self.other_spec.block_size # The LCM of the block sizes of full attention and other attention. # The cache hit length must be a multiple of the LCM of the block sizes # to make sure the cache hit length is a multiple of the block size of # each attention type. Requiring this because we don't support partial # block cache hit yet. self.lcm_block_size = lcm(self.full_attention_block_size, self.other_block_size) if max(self.full_attention_group_ids) < min(self.other_group_ids): self.full_attn_first = True elif max(self.other_group_ids) < min(self.full_attention_group_ids): self.full_attn_first = False else: raise ValueError( "HybridKVCacheCoordinator assumes the full " "attention group ids and other attention group ids " "do not interleave, either full attention group ids " "are before other attention group ids or vice versa." "This is for simplifying merging hit_blocks_full_attn and " "hit_blocks_other_attn to hit_blocks." ) def find_longest_cache_hit( self, block_hashes: list[BlockHash], max_cache_hit_length: int, ) -> tuple[tuple[list[KVCacheBlock], ...], int]: """ Find the longest cache hit for the request. Args: block_hashes: The block hashes of the request. max_cache_hit_length: The maximum length of the cache hit. Returns: A tuple containing: - A list of the cache hit blocks for each single type manager. - The number of tokens of the longest cache hit. """ # First, find the longest cache hit for full attention. if self.full_attention_spec.block_size == self.hash_block_size: # Common case. full_attention_block_hashes: BlockHashList = block_hashes else: # block_size is a multiple of hash_block_size. This happens when different # KV cache groups have different block sizes. In this case, we need to # recalculate block_hashes at the granularity of block_size, using the # original block_hashes (at the granularity of hash_block_size). full_attention_block_hashes = BlockHashListWithBlockSize( block_hashes, self.hash_block_size, self.full_attention_spec.block_size ) hit_blocks_full_attn = self.full_attention_manager_cls.find_longest_cache_hit( block_hashes=full_attention_block_hashes, max_length=max_cache_hit_length, kv_cache_group_ids=self.full_attention_group_ids, block_pool=self.block_pool, kv_cache_spec=self.full_attention_spec, use_eagle=self.use_eagle, alignment_tokens=self.lcm_block_size, ) hit_length = len(hit_blocks_full_attn[0]) * self.full_attention_block_size # Next, find the cache hit for the other attention WITHIN # the cache hit of full attention. if self.other_spec.block_size == self.hash_block_size: # Common case. other_block_hashes: BlockHashList = block_hashes else: # Similar to the full attention case, here we need to recalculate # block_hashes at the granularity of block_size, using the original # block_hashes (at the granularity of hash_block_size). other_block_hashes = BlockHashListWithBlockSize( block_hashes, self.hash_block_size, self.other_spec.block_size ) hit_blocks_other_attn = self.other_attention_cls.find_longest_cache_hit( block_hashes=other_block_hashes, max_length=hit_length, kv_cache_group_ids=self.other_group_ids, block_pool=self.block_pool, kv_cache_spec=self.other_spec, use_eagle=self.use_eagle, alignment_tokens=self.lcm_block_size, ) hit_length = len(hit_blocks_other_attn[0]) * self.other_block_size # NOTE: the prefix cache hit length must be a multiple of block_size as # we don't support partial block cache hit yet. The cache hit length # of other attention is ensured to be a multiple of the block size of # full attention layers in current implementation, because hit_length is # a multiple of other attention's block size, and other attention's # block size is a multiple of full attention's block size (verified in # `verify_and_split_kv_cache_groups`). assert hit_length % self.full_attention_block_size == 0 # Truncate the full attention cache hit to the length of the # cache hit of the other attention. for group_hit_blocks in hit_blocks_full_attn: del group_hit_blocks[hit_length // self.full_attention_block_size :] # Merge the hit blocks of full attention and other attention. if self.full_attn_first: hit_blocks = hit_blocks_full_attn + hit_blocks_other_attn else: hit_blocks = hit_blocks_other_attn + hit_blocks_full_attn return hit_blocks, hit_length def get_kv_cache_coordinator( kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool, enable_caching: bool, enable_kv_cache_events: bool, dcp_world_size: int, pcp_world_size: int, hash_block_size: int, metrics_collector: KVCacheMetricsCollector | None = None, ) -> KVCacheCoordinator: if not enable_caching: return KVCacheCoordinatorNoPrefixCache( kv_cache_config, max_model_len, use_eagle, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, ) if len(kv_cache_config.kv_cache_groups) == 1: return UnitaryKVCacheCoordinator( kv_cache_config, max_model_len, use_eagle, enable_caching, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, ) return HybridKVCacheCoordinator( kv_cache_config, max_model_len, use_eagle, enable_caching, enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=metrics_collector, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/kv_cache_utils.py
vllm/v1/core/kv_cache_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """KV-Cache Utilities.""" import copy import os from collections import defaultdict from collections.abc import Callable, Iterable, Iterator, Sequence from dataclasses import dataclass, replace from typing import Any, NewType, TypeAlias, overload from vllm import envs from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.utils.hashing import sha256_cbor, xxhash_cbor from vllm.utils.math_utils import cdiv from vllm.utils.mem_constants import GiB_bytes from vllm.v1.kv_cache_interface import ( ChunkedLocalAttentionSpec, FullAttentionSpec, KVCacheConfig, KVCacheGroupSpec, KVCacheSpec, KVCacheTensor, SlidingWindowSpec, UniformTypeKVCacheSpecs, ) from vllm.v1.request import Request from vllm.v1.utils import tensor_data # BlockHash represents the hash of a single KV-cache block used for # prefix caching. Treating it as a distinct type from `bytes` helps # catch accidental misuse when passing around raw byte strings. BlockHash = NewType("BlockHash", bytes) # `BlockHashWithGroupId` combines a `BlockHash` with its KV cache group ID. # It is represented as raw bytes for compactness and efficiency. The helper # functions below pack/unpack the `BlockHash` and group id into/from the key. BlockHashWithGroupId = NewType("BlockHashWithGroupId", bytes) # ExternalBlockHash is used for reproducible prefix-cache block hashing. # It's a union of `bytes` and `int` to keep backward compatibility # after we default block hashing to use sha256 bytes. ExternalBlockHash: TypeAlias = bytes | int def make_block_hash_with_group_id( block_hash: BlockHash, group_id: int ) -> BlockHashWithGroupId: """Pack a `BlockHash` and group id into a `BlockHashWithGroupId`. The group id is encoded using 4 bytes in big-endian order and appended to the block hash bytes. This representation avoids creating tuples while still allowing us to recover both components when needed. """ return BlockHashWithGroupId(block_hash + group_id.to_bytes(4, "big", signed=False)) def get_block_hash(key: BlockHashWithGroupId) -> BlockHash: """Extract the `BlockHash` from a `BlockHashWithGroupId`.""" return BlockHash(key[:-4]) def get_group_id(key: BlockHashWithGroupId) -> int: """Extract the group id from a `BlockHashWithGroupId`.""" return int.from_bytes(key[-4:], "big", signed=False) def maybe_convert_block_hash(hash_bytes: BlockHash) -> ExternalBlockHash: if not envs.VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES: return hash_bytes return int.from_bytes(hash_bytes, byteorder="big") & ((1 << 64) - 1) logger = init_logger(__name__) # The hash seed for the first block of any prefix block sequence. # # We use a random value to avoid hash collisions or PYTHONHASHSEED environment # variable if set such that processes can share the seed if needed. This aligns # with the behavior of Python's hash() function, which also uses a random seed # if PYTHONHASHSEED is not set. # # The function `init_none_hash` initializes this variable globally. NONE_HASH: BlockHash _CBOR_HASH_FUNCTIONS = frozenset({sha256_cbor, xxhash_cbor}) def init_none_hash(hash_fn: Callable[[Any], bytes]): global NONE_HASH hash_seed = os.getenv("PYTHONHASHSEED") if hash_seed is None and hash_fn in _CBOR_HASH_FUNCTIONS: logger.warning( "PYTHONHASHSEED is not set. This will lead to non-reproducible " "block-hashes when using CBOR-based hash functions such as " "sha256_cbor or xxhash_cbor. Consider setting PYTHONHASHSEED to a " "fixed value for reproducibility." ) if hash_seed is None: NONE_HASH = BlockHash(os.urandom(32)) else: NONE_HASH = BlockHash(hash_fn(hash_seed)) @dataclass class KVCacheBlock: """KV-cache block metadata.""" # Block ID, ranging from 0 to num_gpu_blocks - 1. block_id: int # Reference count. ref_cnt: int = 0 # The hash key (block hash + group id) of the block, only available # when the block is full and cached. _block_hash: BlockHashWithGroupId | None = None # Used to construct a doubly linked list for free blocks. # These two attributes should only be manipulated by FreeKVCacheBlockQueue. prev_free_block: "KVCacheBlock | None" = None next_free_block: "KVCacheBlock | None" = None # Whether the block is a null block that should never be cached. is_null: bool = False @property def block_hash(self) -> BlockHashWithGroupId | None: return self._block_hash @block_hash.setter def block_hash(self, block_hash: BlockHashWithGroupId): assert self.block_hash is None, ( "The block already has a hash. This should not happen." ) self._block_hash = block_hash def reset_hash(self): """Reset the block hash when the block is evicted.""" self._block_hash = None def __repr__(self) -> str: # Use block_id instead of KVCacheBlock object to avoid calling __repr__ # on KVCacheBlock object recursively. prev_block_id = self.prev_free_block.block_id if self.prev_free_block else None next_block_id = self.next_free_block.block_id if self.next_free_block else None return ( f"KVCacheBlock(block_id={self.block_id}, " f"ref_cnt={self.ref_cnt}, " f"_block_hash={self._block_hash!r}, " f"prev_free_block={prev_block_id}, " f"next_free_block={next_block_id})" ) class FreeKVCacheBlockQueue: """This class organizes a list of KVCacheBlock objects to a doubly linked list of free blocks. We implement this class instead of using Python builtin deque to support removing a block in the middle of the queue in O(1) time. To close the performance gap to the builtin deque which is implemented in C++, this class does not allocate any Python objects when manipulating the linked list. Instead, this class manipulates the prev_free_block and next_free_block attributes of the given blocks. The queue is ordered by block ID in the beginning. When a block is allocated and then freed, it will be appended back with the eviction order: 1. The least recent used block is at the front (LRU). 2. If two blocks have the same last accessed time (allocated by the same sequence), the one with more hash tokens (the tail of a block chain) is at the front. Note that we maintain this order by reversing the block order when free blocks of a request. This operation is outside of this class. Args: blocks: A list of KVCacheBlock objects. """ def __init__(self, blocks: list[KVCacheBlock]) -> None: self.num_free_blocks = len(blocks) # Initialize doubly links of consecutive blocks for i in range(self.num_free_blocks): if i > 0: blocks[i].prev_free_block = blocks[i - 1] if i < self.num_free_blocks - 1: blocks[i].next_free_block = blocks[i + 1] # Create a fake head and a tail block for the doubly linked list to # reduce branching in the code # # The implementation guaranteed that the fake head and tail # are NEVER got popped, so we could safely assume each real blocks # in the queue has prev and next blocks. self.fake_free_list_head = KVCacheBlock(block_id=-1) self.fake_free_list_tail = KVCacheBlock(block_id=-1) if self.num_free_blocks > 0: # Connect fake_head and fake_tail to the first and last block # respectively. self.fake_free_list_head.next_free_block = blocks[0] blocks[0].prev_free_block = self.fake_free_list_head self.fake_free_list_tail.prev_free_block = blocks[-1] blocks[-1].next_free_block = self.fake_free_list_tail else: # For empty list, simply connect the fake head and tail. self.fake_free_list_head.next_free_block = self.fake_free_list_tail self.fake_free_list_tail.prev_free_block = self.fake_free_list_head def popleft(self) -> KVCacheBlock: """Pop the first free block and reduce num_free_blocks by 1. Returns: The first free block. """ if ( self.fake_free_list_head.next_free_block is self.fake_free_list_tail or self.fake_free_list_head.next_free_block is None ): assert self.num_free_blocks == 0, ( f"num_free_blocks ({self.num_free_blocks}) is out of sync " "with the free list." ) raise ValueError("No free blocks available") first_block: KVCacheBlock = self.fake_free_list_head.next_free_block if first_block.next_free_block is None: # This should not happen if the block is from the free list. # It indicates a bug in the caller's logic. raise RuntimeError( "Invalid block found in popleft() " "which doesn't have a valid next_free_block" ) # Connect fake_head and the next block of first_block (i.e. second block # or fake tail). self.fake_free_list_head.next_free_block = first_block.next_free_block first_block.next_free_block.prev_free_block = self.fake_free_list_head # Remove the block from the linked list. first_block.prev_free_block = first_block.next_free_block = None self.num_free_blocks -= 1 return first_block def popleft_n(self, n: int) -> list[KVCacheBlock]: """Pop the first n free blocks and reduce num_free_blocks by n. Args: n: The number of blocks to pop. Returns: A list of n free blocks. """ if n == 0: return [] assert self.num_free_blocks >= n self.num_free_blocks -= n curr_block = self.fake_free_list_head.next_free_block # Pop n blocks from the head of the list ret = [] for _ in range(n): assert curr_block is not None ret.append(curr_block) last_block = curr_block curr_block = curr_block.next_free_block # Reset prev_free_block and next_free_block of all popped blocks last_block.prev_free_block = None last_block.next_free_block = None if curr_block is not None: # The queue is not empty, connect the fake head to # the new first block. self.fake_free_list_head.next_free_block = curr_block curr_block.prev_free_block = self.fake_free_list_head return ret def remove(self, block: KVCacheBlock) -> None: """Remove a block in the free list and reduce num_free_blocks by 1. Args: block: The block to remove. """ if block.prev_free_block is None or block.next_free_block is None: # This should not happen if the block is from the free list. # It indicates a bug in the caller's logic. raise RuntimeError(f"remove() called on an invalid block: {block}") # Link the previous block to the next block. block.prev_free_block.next_free_block = block.next_free_block # Link the next block to the previous block. block.next_free_block.prev_free_block = block.prev_free_block # Remove the block from the linked list. block.prev_free_block = block.next_free_block = None self.num_free_blocks -= 1 def append(self, block: KVCacheBlock) -> None: """Put a block back into the free list and increase num_free_blocks by 1. Args: block: The block to append. """ if self.fake_free_list_tail.prev_free_block is None: raise RuntimeError( "prev_free_block of fake_free_list_tail should always exist" ) last_block: KVCacheBlock = self.fake_free_list_tail.prev_free_block # Connect the new block after the last block. last_block.next_free_block = block block.prev_free_block = last_block # Connect the fake tail after the new block. block.next_free_block = self.fake_free_list_tail self.fake_free_list_tail.prev_free_block = block self.num_free_blocks += 1 def append_n(self, blocks: list[KVCacheBlock]) -> None: """Put a list of blocks back into the free list Args: blocks: The blocks to append. """ if len(blocks) == 0: return last_block = self.fake_free_list_tail.prev_free_block assert last_block is not None, ( "prev_free_block of fake_free_list_tail should always exist" ) # Add inter-connections between consecutive blocks for block in blocks: block.prev_free_block = last_block last_block.next_free_block = block last_block = block # Connect the last block of <blocks> to the fake tail last_block.next_free_block = self.fake_free_list_tail self.fake_free_list_tail.prev_free_block = last_block self.num_free_blocks += len(blocks) def get_all_free_blocks(self) -> list[KVCacheBlock]: """Get all free blocks in the free list. Mainly used for testing. Returns: A list of free blocks. """ ret = [] if self.fake_free_list_head.next_free_block is None: raise RuntimeError( "next_free_block of fake_free_list_head should always exist" ) # Start from the first block curr_block: KVCacheBlock = self.fake_free_list_head.next_free_block # As long as next_free_block is available, we haven't reached to # the fake tail yet. while curr_block.next_free_block is not None: ret.append(curr_block) curr_block = curr_block.next_free_block return ret def need_extra_keys(request: Request) -> bool: """Check whether the blocks allocated to this request need extra hash keys. Args: request (Request): The request. Returns: bool: Whether blocks allocated to this request need extra hash keys. """ # Multimodal requests need to include the MM hash. # LoRA requests need to include the LoRA name. # Request with provided cache salt need to include the salt. return ( bool(request.mm_features) or (request.lora_request is not None) or (request.cache_salt is not None) ) def _gen_mm_extra_hash_keys( request: Request, start_token_idx: int, end_token_idx: int, start_mm_idx: int ) -> tuple[list[Any], int]: """Generate extra keys related to MultiModal request for block hash computation. For multi-modal inputs, the extra keys are (mm_hash, start_offset) that indicate a mm input contained in the block and its starting offset in the block tokens. Args: request: The request object. start_token_idx: The start token index of the block. end_token_idx: The end token index of the block. start_mm_idx: The start multi-modal index of the block. Returns: A tuple of extra keys and the next multi-modal index. """ extra_keys: list[Any] = [] mm_features = request.mm_features if not mm_features: return extra_keys, start_mm_idx # Note that we assume mm_features are sorted by mm_position.offset. # We do not need to check all mm inputs if the start token index is out of # range. This usually happens in the late prefill phase and decoding phase. last_pos = mm_features[-1].mm_position if last_pos.offset + last_pos.length < start_token_idx: return extra_keys, start_mm_idx # Support start_mm_idx == -1 to indicate the last mm input. if start_mm_idx < 0: assert -start_mm_idx <= len(mm_features) start_mm_idx = len(mm_features) + start_mm_idx curr_mm_idx = start_mm_idx while mm_features and curr_mm_idx < len(mm_features): mm_feature = mm_features[curr_mm_idx] assert mm_feature.identifier is not None offset = mm_feature.mm_position.offset length = mm_feature.mm_position.length if end_token_idx > offset: if start_token_idx > offset + length: # This block has passed the current mm input. curr_mm_idx += 1 continue # The block contains the current mm input. extra_keys.append(mm_feature.identifier) if end_token_idx >= offset + length: # If this block contains the end of the current mm input, # move to the next mm input as this block may also contain # the next mm input. curr_mm_idx += 1 else: # Otherwise this block is done with mm inputs. break else: # This block has not reached the current mm input. break return extra_keys, curr_mm_idx def _gen_lora_extra_hash_keys(request: Request) -> list[str]: """Generate extra keys related to LoRA for block hash computation. Args: request: The request object. Returns: Return LoRA name of the request if it is a LoRA request. Return empty list otherwise. """ if not request.lora_request: return [] return [request.lora_request.lora_name] def _gen_prompt_embeds_extra_hash_keys( request: Request, start_token_idx: int, end_token_idx: int ) -> list[bytes]: """Generate extra keys related to prompt embeds for block hash computation. Args: request: The request object. start_token_idx: The start token index of the block. end_token_idx: The end token index of the block. Returns: Return prompt embeddings data of the request if it has prompt embeds. Return empty list otherwise. """ if request.prompt_embeds is None: return [] block_prompt_embeds = request.prompt_embeds[start_token_idx:end_token_idx] embeds_bytes = tensor_data(block_prompt_embeds).tobytes() return [embeds_bytes] def generate_block_hash_extra_keys( request: Request, start_token_idx: int, end_token_idx: int, start_mm_idx: int ) -> tuple[tuple[Any, ...] | None, int]: """Generate extra keys for the block hash. The extra keys can come from the multi-modal inputs, request specific metadata (e.g., LoRA names), and data from prompt embeddings. Args: request: The request object. start_token_idx: The start token index of the block. end_token_idx: The end token index of the block. start_mm_idx: The start multi-modal index of the block. Returns: A tuple of extra keys and the next multi-modal index. """ mm_extra_keys: list[Any] mm_extra_keys, new_start_mm_idx = _gen_mm_extra_hash_keys( request, start_token_idx, end_token_idx, start_mm_idx ) lora_extra_keys: list[str] = _gen_lora_extra_hash_keys(request) cache_salt_keys: list[str] = ( [request.cache_salt] if (start_token_idx == 0 and request.cache_salt) else [] ) prompt_embeds_keys = _gen_prompt_embeds_extra_hash_keys( request, start_token_idx, end_token_idx ) extra_keys: list[Any] = ( lora_extra_keys + mm_extra_keys + cache_salt_keys + prompt_embeds_keys ) if not extra_keys: return None, new_start_mm_idx return tuple(extra_keys), new_start_mm_idx def hash_block_tokens( hash_function: Callable[[Any], bytes], parent_block_hash: BlockHash | None, curr_block_token_ids: Sequence[int], extra_keys: tuple[Any, ...] | None = None, ) -> BlockHash: """Computes a hash value corresponding to the contents of a block and the contents of the preceding block(s). The hash value is used for prefix caching. We use LRU cache for this function to avoid recomputing hash values for the same block contents. Args: hash_function: The hash function used to compute block hash. parent_block_hash: The hash of the parent block. None if this is the first block. curr_block_token_ids: A list of token ids in the current block. The current block is assumed to be full. extra_keys: Extra keys for the block. Returns: The hash value of the block and the token ids in the block. The entire tuple is used as the hash key of the block. """ if not parent_block_hash: parent_block_hash = NONE_HASH curr_block_token_ids_tuple = tuple(curr_block_token_ids) return BlockHash( hash_function((parent_block_hash, curr_block_token_ids_tuple, extra_keys)) ) def get_request_block_hasher( block_size: int, caching_hash_fn: Callable[[Any], bytes], ) -> Callable[[Request], list[BlockHash]]: """ Returns a function which computes the list of un-computed block hashes of a request.""" def request_block_hasher(request: Request) -> list[BlockHash]: start_token_idx = len(request.block_hashes) * block_size num_tokens = request.num_tokens if start_token_idx + block_size > num_tokens: # Early stop when there no new full blocks created. return [] curr_mm_idx = 0 if start_token_idx > 0: # Set curr_mm_idx = -1 to indicate the last mm input. # Note that since we reach to this branch only when the block is # completed with generated tokens, we only need to consider the # last mm input. curr_mm_idx = -1 prev_block_hash_value = ( request.block_hashes[-1] if request.block_hashes else None ) new_block_hashes: list[BlockHash] = [] while True: end_token_idx = start_token_idx + block_size if end_token_idx > num_tokens: # We only hash full blocks break # MM and LoRA requests need extra keys for block-hash computation. extra_keys, curr_mm_idx = generate_block_hash_extra_keys( request, start_token_idx, end_token_idx, curr_mm_idx ) # Compute the hash of the current block block_tokens = request.all_token_ids[start_token_idx:end_token_idx] block_hash = hash_block_tokens( caching_hash_fn, prev_block_hash_value, block_tokens, extra_keys ) new_block_hashes.append(block_hash) start_token_idx += block_size prev_block_hash_value = block_hash return new_block_hashes return request_block_hasher def _check_enough_kv_cache_memory( available_memory: int, get_needed_memory: Callable[[], int], max_model_len: int, estimate_max_model_len: Callable[[int], int], ): if available_memory <= 0: raise ValueError( "No available memory for the cache blocks. " "Try increasing `gpu_memory_utilization` when initializing the engine. " "See https://docs.vllm.ai/en/latest/configuration/conserving_memory/ " "for more details." ) needed_memory = get_needed_memory() if needed_memory > available_memory: estimated_max_len = estimate_max_model_len(available_memory) estimated_msg = "" if estimated_max_len > 0: estimated_msg = ( "Based on the available memory, " f"the estimated maximum model length is {estimated_max_len}. " ) raise ValueError( f"To serve at least one request with the models's max seq len " f"({max_model_len}), ({needed_memory / GiB_bytes:.2f} GiB KV " f"cache is needed, which is larger than the available KV cache " f"memory ({available_memory / GiB_bytes:.2f} GiB). {estimated_msg}" f"Try increasing `gpu_memory_utilization` or decreasing `max_model_len` " f"when initializing the engine. " f"See https://docs.vllm.ai/en/latest/configuration/conserving_memory/ " f"for more details." ) def max_memory_usage_bytes( vllm_config: VllmConfig, kv_cache_specs: Iterable[KVCacheSpec] ) -> int: """ Get the maximum memory usage in bytes for the given KV cache specs. """ return sum(spec.max_memory_usage_bytes(vllm_config) for spec in kv_cache_specs) def estimate_max_model_len( vllm_config: VllmConfig, kv_cache_spec: dict[str, KVCacheSpec], available_memory: int, ) -> int: """ Estimates the maximum model length that can fit in the available memory using binary search. This function temporarily modifies max_model_len during estimation but restores the original value before returning, ensuring no side effects. Args: vllm_config: The global VllmConfig kv_cache_spec: The kv cache spec of each attention layer in the model available_memory: Memory available for KV cache in bytes. Returns: The estimated maximum model length that can fit in the available memory. """ # Save the original max_model_len to restore after estimation original_max_model_len = vllm_config.model_config.max_model_len # Define a function to check if a given model length fits in memory def fits_in_memory(model_len: int) -> bool: # Temporarily modify the max_model_len for this calculation vllm_config.model_config.max_model_len = model_len # Calculate memory needed for the given model length memory_needed = max_memory_usage_bytes(vllm_config, kv_cache_spec.values()) return memory_needed <= available_memory try: # Binary search for the maximum model length left, right = 1, original_max_model_len # If even the smallest model length doesn't fit, return 0 if not fits_in_memory(left): return 0 # Binary search for the maximum model length that fits result = 1 while left <= right: mid = (left + right) // 2 if fits_in_memory(mid): result = mid left = mid + 1 else: right = mid - 1 return result finally: # Always restore the original max_model_len to avoid side effects vllm_config.model_config.max_model_len = original_max_model_len def check_enough_kv_cache_memory( vllm_config: VllmConfig, kv_cache_spec: dict[str, KVCacheSpec], available_memory: int, ): """ Checks whether `available_memory` is enough for the KV cache to hold at least one request with the model's max_model_len. Args: vllm_config: The global VllmConfig kv_cache_spec: The kv cache spec of each attention layer in the model available_memory: Memory available for KV cache in bytes. Raises: ValueError: If there is not enough memory available for the KV cache. """ # No need to check for available memory if the kv_cache_spec is empty if kv_cache_spec: _check_enough_kv_cache_memory( available_memory, lambda: max_memory_usage_bytes(vllm_config, kv_cache_spec.values()), vllm_config.model_config.max_model_len, lambda am: estimate_max_model_len(vllm_config, kv_cache_spec, am), ) def create_kv_cache_group_specs( kv_cache_spec: dict[str, KVCacheSpec], grouped_layer_names: list[list[str]] ) -> list[KVCacheGroupSpec]: """ Create KVCacheGroupSpec object for each kv cache group layer. The layers in the same group should share the same KVCacheSpec. Args: kv_cache_spec: A mapping from each layer name to its corresponding KVCacheSpec. grouped_layer_names: A list of kv cache groups, where each element is a list of layer names that belong to the same group and should share the same KVCacheSpec. Returns: A list of KVCacheGroupSpec objects, one for each group. """ kv_cache_groups = [] for layer_names_one_group in grouped_layer_names: layer_specs = [ kv_cache_spec[layer_name] for layer_name in layer_names_one_group ] merged_layer_spec = layer_specs[0].merge(layer_specs) kv_cache_groups.append( KVCacheGroupSpec(layer_names_one_group, merged_layer_spec) ) return kv_cache_groups def is_kv_cache_spec_uniform(kv_cache_spec: dict[str, KVCacheSpec]) -> bool: """ Whether all layers in the given KVCacheSpec have the same KV cache spec. Note that we regard FullAttentionSpec with and without sliding window as the same type. Args: kv_cache_spec: The kv cache spec of each attention layer in the model Returns: True if all layers have the same type, False otherwise. """ if not kv_cache_spec: # Encoder-only models do not have KV cache, kv_cache_type can be # regarded as uniform. return True try: kv_cache_spec_values = list(kv_cache_spec.values()) _ = kv_cache_spec_values[0].merge(kv_cache_spec_values) except AssertionError: return False return True def get_max_concurrency_for_kv_cache_config( vllm_config: VllmConfig, kv_cache_config: KVCacheConfig ) -> float: """ Get the maximum concurrency for the given KV cache configuration. """ num_layer_per_group = max( len(group.layer_names) for group in kv_cache_config.kv_cache_groups ) max_memory_usage_per_request = num_layer_per_group * max_memory_usage_bytes( vllm_config, (group.kv_cache_spec for group in kv_cache_config.kv_cache_groups) ) memory_per_block = ( kv_cache_config.kv_cache_groups[0].kv_cache_spec.page_size_bytes * num_layer_per_group ) num_block_per_request = cdiv(max_memory_usage_per_request, memory_per_block) max_concurrency = kv_cache_config.num_blocks / num_block_per_request return max_concurrency def may_override_num_blocks(vllm_config: VllmConfig, num_blocks: int) -> int: """ Override the number of kv cache blocks if `num_gpu_blocks_override` is set. """ if vllm_config.cache_config.num_gpu_blocks_override is not None: num_gpu_blocks_override = vllm_config.cache_config.num_gpu_blocks_override logger.info( "Overriding num_gpu_blocks=%d with num_gpu_blocks_override=%d", num_blocks, num_gpu_blocks_override, ) num_blocks = num_gpu_blocks_override return num_blocks def get_num_blocks( vllm_config: VllmConfig, num_layers: int, available_memory: int, page_size: int ) -> int: """ Get the number of kv cache blocks. Args: vllm_config: The global VllmConfig num_layers: The number of layers available_memory: Memory available for KV cache in bytes. page_size: The page size of the KV cache. """ num_blocks = int(available_memory // page_size // num_layers) num_blocks = max(num_blocks, 0) num_blocks = may_override_num_blocks(vllm_config, num_blocks) return num_blocks def get_uniform_page_size(kv_cache_specs: Iterable[KVCacheSpec]) -> int: """ Get the page size of the KV cache. """ page_sizes = {layer.page_size_bytes for layer in kv_cache_specs} assert len(page_sizes) == 1 return page_sizes.pop() def _get_kv_cache_groups_uniform_spec( kv_cache_specs: dict[str, KVCacheSpec], ) -> list[KVCacheGroupSpec]: """ Generates the KV cache configuration for a model with the same KV cache spec for all layers. Args: kv_cache_specs: The kv cache spec of each attention layer in the model Returns: The generated KVCacheGroupSpecs """ return create_kv_cache_group_specs(kv_cache_specs, [list(kv_cache_specs.keys())]) def _get_kv_cache_groups_uniform_type( spec: UniformTypeKVCacheSpecs, ) -> list[KVCacheGroupSpec]: """ Generates the KV cache configuration for a model with one type of KV cache but different hidden sizes. All layers are merged into one group. Args: spec: The UniformTypeKVCacheSpecs of the model Returns: The generated KVCacheGroupSpecs """ return [KVCacheGroupSpec(list(spec.kv_cache_specs.keys()), spec)] def is_kv_cache_page_size_uniform(kv_cache_spec: dict[str, KVCacheSpec]) -> bool: """
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/__init__.py
vllm/v1/core/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/kv_cache_manager.py
vllm/v1/core/kv_cache_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools from collections.abc import Sequence from dataclasses import dataclass from typing import Literal, overload from vllm.distributed.kv_events import KVCacheEvent from vllm.logger import init_logger from vllm.v1.core.kv_cache_coordinator import get_kv_cache_coordinator from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.kv_cache_utils import KVCacheBlock from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.metrics.stats import PrefixCacheStats from vllm.v1.request import Request logger = init_logger(__name__) @dataclass class KVCacheBlocks: """ The allocation result of KVCacheManager, work as the interface between Scheduler and KVCacheManager, to hide KVCacheManager's internal data structure from the Scheduler. """ blocks: tuple[Sequence[KVCacheBlock], ...] """ `blocks[i][j]` refers to the i-th kv_cache_group and the j-th block of tokens.We don't use block of tokens as the outer dimension because it assumes all kv_cache_groups have the same number of blocks, which is true for now but will be broken if we want to give different block_size to different kv_cache_groups in the future. Each single type KVCacheBlocks could be represented as: - list[KVCacheBlock] for more than one KVCacheBlock - an empty tuple for requests without KVCacheBlock (a precomputed KVCacheBlocks is in KVCacheManager to avoid GC overhead) """ def __add__(self, other: "KVCacheBlocks") -> "KVCacheBlocks": """Adds two KVCacheBlocks instances.""" return KVCacheBlocks( tuple( list(itertools.chain(blk1, blk2)) for blk1, blk2 in zip(self.blocks, other.blocks) ) ) @overload def get_block_ids( self, allow_none: Literal[False] = False, ) -> tuple[list[int], ...]: ... @overload def get_block_ids( self, allow_none: Literal[True] = True, ) -> tuple[list[int], ...] | None: ... def get_block_ids( self, allow_none: bool = False, ) -> tuple[list[int], ...] | None: """ Converts the KVCacheBlocks instance to block_ids. Returns: tuple[list[int], ...]: A tuple of lists where: - the outer tuple corresponds to KV cache groups - each inner list contains the block_ids of the blocks in that group """ if allow_none and all(len(group) == 0 for group in self.blocks): return None return tuple([blk.block_id for blk in group] for group in self.blocks) def get_unhashed_block_ids(self) -> list[int]: """Get block_ids of unhashed blocks from KVCacheBlocks instance.""" assert len(self.blocks) == 1, "Only one group is supported" return [block.block_id for block in self.blocks[0] if block.block_hash is None] def new_empty(self) -> "KVCacheBlocks": """ Creates a new KVCacheBlocks instance with no blocks. """ return KVCacheBlocks(tuple(() for _ in range(len(self.blocks)))) class KVCacheManager: def __init__( self, kv_cache_config: KVCacheConfig, max_model_len: int, hash_block_size: int, enable_caching: bool = True, use_eagle: bool = False, log_stats: bool = False, enable_kv_cache_events: bool = False, dcp_world_size: int = 1, pcp_world_size: int = 1, metrics_collector: KVCacheMetricsCollector | None = None, ) -> None: self.max_model_len = max_model_len self.enable_caching = enable_caching self.use_eagle = use_eagle self.log_stats = log_stats self.metrics_collector = metrics_collector # FIXME: make prefix cache stats conditional on log_stats. We still need # this comment because when the log stats is enabled there are still # potential configs we could expose in the future. self.prefix_cache_stats = PrefixCacheStats() if log_stats else None self.coordinator = get_kv_cache_coordinator( kv_cache_config=kv_cache_config, max_model_len=self.max_model_len, use_eagle=self.use_eagle, enable_caching=self.enable_caching, enable_kv_cache_events=enable_kv_cache_events, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, hash_block_size=hash_block_size, metrics_collector=self.metrics_collector, ) self.num_kv_cache_groups = len(kv_cache_config.kv_cache_groups) self.block_pool = self.coordinator.block_pool self.kv_cache_config = kv_cache_config # Pre-constructed KVCacheBlocks with no blocks, callers should use this # via create_kv_cache_blocks instead of creating new ones to avoid GC # overhead. # # We use nested tuples to ensure the empty KVCacheBlocks is immutable. self.empty_kv_cache_blocks = KVCacheBlocks( tuple(() for _ in range(self.num_kv_cache_groups)) ) @property def usage(self) -> float: """Get the KV cache usage. Returns: The KV cache usage (between 0.0 and 1.0). """ return self.block_pool.get_usage() def make_prefix_cache_stats(self) -> PrefixCacheStats | None: """Get (and reset) the prefix cache stats. Returns: The current prefix caching stats, or None if logging is disabled. """ if not self.log_stats: return None stats = self.prefix_cache_stats self.prefix_cache_stats = PrefixCacheStats() return stats def get_computed_blocks(self, request: Request) -> tuple[KVCacheBlocks, int]: """Get the computed (cached) blocks for the request. Note that the computed blocks must be full. Args: request: The request to get the computed blocks. Returns: A tuple containing: - A list of blocks that are computed for the request. - The number of computed tokens. """ # We skip finding the prefix cache hit when prefix caching is # disabled or the request is marked as skipping kv cache read # (which happens when the request requires prompt logprobs # or calls a pooling model with all pooling). if not self.enable_caching or request.skip_reading_prefix_cache: return self.empty_kv_cache_blocks, 0 # NOTE: When all tokens hit the cache, we must recompute the last token # to obtain logits. Thus, set max_cache_hit_length to prompt_length - 1. # This can trigger recomputation of an entire block, rather than just # the single last token, because allocate_slots() requires # num_computed_tokens to be block-size aligned. Removing this limitation # could slightly improve performance in the future. max_cache_hit_length = request.num_tokens - 1 computed_blocks, num_new_computed_tokens = ( self.coordinator.find_longest_cache_hit( request.block_hashes, max_cache_hit_length ) ) if self.log_stats: assert self.prefix_cache_stats is not None self.prefix_cache_stats.record( num_tokens=request.num_tokens, num_hits=num_new_computed_tokens, preempted=request.num_preemptions > 0, ) return self.create_kv_cache_blocks(computed_blocks), num_new_computed_tokens def allocate_slots( self, request: Request, num_new_tokens: int, num_new_computed_tokens: int = 0, new_computed_blocks: KVCacheBlocks | None = None, num_lookahead_tokens: int = 0, num_external_computed_tokens: int = 0, delay_cache_blocks: bool = False, num_encoder_tokens: int = 0, ) -> KVCacheBlocks | None: """Add slots for a request with new tokens to append. Args: request: The request to allocate slots. num_new_tokens: The number of new tokens to be allocated and computed. num_new_computed_tokens: The number of new computed tokens just hitting the prefix caching, excluding external tokens. new_computed_blocks: The cached blocks for the above new computed tokens, grouped as a tuple by kv cache groups. num_lookahead_tokens: The number of speculative tokens to allocate. This is used by spec decode proposers with kv-cache such as eagle. num_external_computed_tokens: The number of tokens that their KV caches are not cached by vLLM but cached by the connector. delay_cache_blocks: Whether to skip caching the blocks. This is used by P/D when allocating blocks used in a KV transfer which will complete in a future step. num_encoder_tokens: The number of encoder tokens to allocate for cross-attention in encoder-decoder models(e.g., Whisper). For decoder-only models, this should be 0. Blocks layout: ``` ---------------------------------------------------------------------- | < comp > | < new_comp > | < ext_comp > | < new > | < lookahead > | ---------------------------------------------------------------------- | < to be computed > | ---------------------------------------------------------------------- | < to be allocated > | ---------------------------------------------------------------------- | < to be cached (roughly, | | details below)> | ---------------------------------------------------------------------- | Prefix-cached tokens from either vLLM | | or connector. Can be safely removed if | | they are outside sliding window. | ---------------------------------------------------------------------- | < cached by vLLM > | not cached by | | vLLM, but | | ref_cnt | ref_cnt not | cached by | | increased| increased yet| connector | ---------------------------------------------------------------------- ``` Abbrivations: ``` comp = request.num_computed_tokens new_comp = num_new_computed_tokens = len(new_computed_blocks) * block_size ext_comp = num_external_computed_tokens, cached by the connector new = num_new_tokens, including unverified draft tokens lookahead = num_lookahead_tokens ``` NOTE: for new tokens which include both verified and unverified draft tokens, we only cache the verified tokens (by capping the number at `request.num_tokens`). The allocation has three stages: - Free unnecessary blocks in `comp` and check if we have sufficient free blocks (return None if not). - Handle prefix tokens (`comp + new_comp + ext_comp`): - Free unnecessary blocks (e.g. outside sliding window) - Allocate new blocks for `ext_comp` tokens inside sliding window - Allocate new blocks for tokens to be computed (`new + lookahead`) Returns: A list of new allocated blocks. """ # When loading KV data asynchronously, we may have zero new tokens to # compute while still allocating slots for externally computed tokens. if num_new_tokens == 0 and num_external_computed_tokens == 0: raise ValueError( "num_new_tokens must be greater than 0 when there are no " "external computed tokens" ) if new_computed_blocks is not None: new_computed_block_list = new_computed_blocks.blocks else: new_computed_block_list = self.empty_kv_cache_blocks.blocks # The number of computed tokens is the number of computed tokens plus # the new prefix caching hits num_local_computed_tokens = ( request.num_computed_tokens + num_new_computed_tokens ) total_computed_tokens = min( num_local_computed_tokens + num_external_computed_tokens, self.max_model_len, ) num_tokens_need_slot = min( total_computed_tokens + num_new_tokens + num_lookahead_tokens, self.max_model_len, ) # Free the blocks that are skipped during the attention computation # (e.g., tokens outside the sliding window). # We can do this even if we cannot schedule this request due to # insufficient free blocks. # Should call this function before allocating new blocks to reduce # the number of evicted blocks. self.coordinator.remove_skipped_blocks( request.request_id, total_computed_tokens ) num_blocks_to_allocate = self.coordinator.get_num_blocks_to_allocate( request_id=request.request_id, num_tokens=num_tokens_need_slot, new_computed_blocks=new_computed_block_list, num_encoder_tokens=num_encoder_tokens, total_computed_tokens=num_local_computed_tokens + num_external_computed_tokens, ) if num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): # Cannot allocate new blocks return None if ( new_computed_block_list is not self.empty_kv_cache_blocks.blocks or num_external_computed_tokens > 0 ): # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. self.coordinator.allocate_new_computed_blocks( request_id=request.request_id, new_computed_blocks=new_computed_block_list, num_local_computed_tokens=num_local_computed_tokens, num_external_computed_tokens=num_external_computed_tokens, ) new_blocks = self.coordinator.allocate_new_blocks( request.request_id, num_tokens_need_slot, num_encoder_tokens ) # P/D: delay caching blocks if we have to recv from # remote. Update state for locally cached blocks. if not self.enable_caching or delay_cache_blocks: return self.create_kv_cache_blocks(new_blocks) # NOTE(woosuk): We want to commit (cache) up to num_local_computed_tokens # + num_external_computed_tokens + num_new_tokens, but must exclude # "non-committable" tokens (e.g., draft tokens that could be rejected). # Therefore, we cap the number at `request.num_tokens`, ensuring only # "finalized" tokens are cached. num_tokens_to_cache = min( total_computed_tokens + num_new_tokens, request.num_tokens, ) self.coordinator.cache_blocks(request, num_tokens_to_cache) return self.create_kv_cache_blocks(new_blocks) def free(self, request: Request) -> None: """Free the blocks allocated for the request. We free the blocks in reverse order so that the tail blocks are evicted first when caching is enabled. Args: request: The request to free the blocks. """ self.coordinator.free(request.request_id) def remove_skipped_blocks( self, request_id: str, total_computed_tokens: int ) -> None: """Remove the blocks that are no longer needed from `blocks` and replace the removed blocks with null_block. Args: request_id: The request ID. total_computed_tokens: The total number of computed tokens, including local computed tokens and external computed tokens. """ self.coordinator.remove_skipped_blocks(request_id, total_computed_tokens) def evict_blocks(self, block_ids: set[int]) -> None: """evict blocks from the prefix cache by their block IDs. Args: block_ids: Set of block IDs to evict from cache. """ self.block_pool.evict_blocks(block_ids) def reset_prefix_cache(self) -> bool: """Reset prefix cache. This function may be used in RLHF flows to invalidate prefix caching after the weights are updated, or used for resetting prefix caching status for benchmarking. Returns: bool: True if the prefix cache is successfully reset, False otherwise. """ if not self.block_pool.reset_prefix_cache(): return False if self.log_stats: assert self.prefix_cache_stats is not None self.prefix_cache_stats.reset = True return True def get_num_common_prefix_blocks(self, running_request_id: str) -> list[int]: """Calculate the number of common prefix blocks for each kv cache group. The function selects a running request and iterates through its blocks. A block is considered a common prefix block if ALL requests with allocated KV cache share it (i.e., ref_cnt equals the number of entries in req_to_blocks). NOTE(woosuk): The number of requests with allocated KV cache is **greater than or equal to** the number of requests scheduled in the current step. This is because having allocated KV cache only indicates that: 1. The request has not yet finished, and 2. The request holds its blocks unfreed. While all scheduled requests must have allocated KV cache, the inverse is not necessarily true. There may be requests with allocated KV cache that are not scheduled in the current step. This can result in an edge case where the number of common prefix blocks is 0, even though all scheduled requests share a common prefix. This occurs because there may be unscheduled requests that do not share the common prefix. Currently, this case cannot be easily detected, so the function returns 0 in such cases. Args: running_request_id: The request ID of any running request, used to identify the common prefix blocks. Returns: list[int]: The number of common prefix blocks for each kv cache group. """ return self.coordinator.get_num_common_prefix_blocks(running_request_id) def take_events(self) -> list[KVCacheEvent]: """Take the KV cache events from the block pool. Returns: A list of KV cache events. """ return self.block_pool.take_events() def get_blocks(self, request_id: str) -> KVCacheBlocks: """Get the blocks of a request.""" return self.create_kv_cache_blocks(self.coordinator.get_blocks(request_id)) def get_block_ids(self, request_id: str) -> tuple[list[int], ...]: """Get the block ids of a request.""" return self.get_blocks(request_id).get_block_ids() def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: """Cache the blocks for the request, if enabled. Args: request: The request to cache the blocks. num_computed_tokens: The number of computed tokens, including tokens that are already cached and tokens to be cached. """ if self.enable_caching: self.coordinator.cache_blocks(request, num_computed_tokens) def create_kv_cache_blocks( self, blocks: tuple[list[KVCacheBlock], ...] ) -> KVCacheBlocks: # Only create new KVCacheBlocks for non-empty blocks return KVCacheBlocks(blocks) if any(blocks) else self.empty_kv_cache_blocks
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/async_scheduler.py
vllm/v1/core/sched/async_scheduler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.logger import init_logger from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.core.sched.scheduler import Scheduler from vllm.v1.request import Request, RequestStatus logger = init_logger(__name__) class AsyncScheduler(Scheduler): def _update_after_schedule( self, scheduler_output: SchedulerOutput, ) -> None: super()._update_after_schedule(scheduler_output) pending_structured_output_tokens = False spec_decode_tokens = scheduler_output.scheduled_spec_decode_tokens for req_id in scheduler_output.num_scheduled_tokens: request = self.requests[req_id] pending_structured_output_tokens |= ( request.use_structured_output and request.num_output_placeholders > 0 ) cur_num_spec_tokens = len(spec_decode_tokens.get(req_id, ())) if ( request.num_computed_tokens == request.num_tokens + request.num_output_placeholders + cur_num_spec_tokens ): # The request will generate a new token plus num_spec_tokens # in this scheduling step. request.num_output_placeholders += 1 + cur_num_spec_tokens # Add placeholders for the new tokens in spec_token_ids. # We will update the actual spec token ids in the worker process. request.spec_token_ids = [-1] * self.num_spec_tokens scheduler_output.pending_structured_output_tokens = ( pending_structured_output_tokens ) def _update_request_with_output( self, request: Request, new_token_ids: list[int], ) -> tuple[list[int], bool]: if request.discard_latest_async_tokens: # If the request is force preempted in reset_prefix_cache, we # should discard the latest async token. request.discard_latest_async_tokens = False return [], False status_before_update = request.status new_token_ids, stopped = super()._update_request_with_output( request, new_token_ids ) # Update the number of output placeholders. request.num_output_placeholders -= len(new_token_ids) assert request.num_output_placeholders >= 0 # Cache the new tokens. Preempted requests should be skipped. if status_before_update == RequestStatus.RUNNING: self.kv_cache_manager.cache_blocks( request, request.num_computed_tokens - request.num_output_placeholders ) return new_token_ids, stopped
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/output.py
vllm/v1/core/sched/output.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass from functools import cached_property from typing import TYPE_CHECKING from typing_extensions import deprecated from vllm._bc_linter import bc_linter_include if TYPE_CHECKING: import numpy as np import numpy.typing as npt import torch from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorMetadata from vllm.lora.request import LoRARequest from vllm.multimodal.inputs import MultiModalFeatureSpec from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.v1.request import Request else: ECConnectorMetadata = object KVConnectorMetadata = object LoRARequest = object MultiModalFeatureSpec = object PoolingParams = object SamplingParams = object Request = object @bc_linter_include @dataclass class NewRequestData: req_id: str prompt_token_ids: list[int] | None mm_features: list[MultiModalFeatureSpec] sampling_params: SamplingParams | None pooling_params: PoolingParams | None block_ids: tuple[list[int], ...] num_computed_tokens: int lora_request: LoRARequest | None prompt_embeds: "torch.Tensor | None" = None # Only used for v2 model runner. prefill_token_ids: list[int] | None = None @classmethod def from_request( cls, request: Request, block_ids: tuple[list[int], ...], prefill_token_ids: list[int] | None = None, ) -> "NewRequestData": return cls( req_id=request.request_id, prompt_token_ids=request.prompt_token_ids, mm_features=request.mm_features, sampling_params=request.sampling_params, pooling_params=request.pooling_params, block_ids=block_ids, num_computed_tokens=request.num_computed_tokens, lora_request=request.lora_request, prompt_embeds=request.prompt_embeds, prefill_token_ids=prefill_token_ids, ) def __repr__(self) -> str: prompt_embeds_shape = ( self.prompt_embeds.shape if self.prompt_embeds is not None else None ) return ( f"NewRequestData(" f"req_id={self.req_id}," f"prompt_token_ids={self.prompt_token_ids}," f"prefill_token_ids={self.prefill_token_ids}," f"mm_features={self.mm_features}," f"sampling_params={self.sampling_params}," f"block_ids={self.block_ids}," f"num_computed_tokens={self.num_computed_tokens}," f"lora_request={self.lora_request}," f"prompt_embeds_shape={prompt_embeds_shape}" ")" ) # Version of __repr__ with the prompt data obfuscated def anon_repr(self) -> str: prompt_token_ids_len = ( len(self.prompt_token_ids) if self.prompt_token_ids is not None else None ) prompt_embeds_shape = ( self.prompt_embeds.shape if self.prompt_embeds is not None else None ) return ( f"NewRequestData(" f"req_id={self.req_id}," f"prompt_token_ids_len={prompt_token_ids_len}," f"mm_features={self.mm_features}," f"sampling_params={self.sampling_params}," f"block_ids={self.block_ids}," f"num_computed_tokens={self.num_computed_tokens}," f"lora_request={self.lora_request}," f"prompt_embeds_shape={prompt_embeds_shape}" ")" ) @bc_linter_include @dataclass class CachedRequestData: req_ids: list[str] # For request ids not in resumed_req_ids, new_block_ids will be appended to # the request's block IDs. For those in the set, new_block_ids will be used as the # request's block IDs instead of appending to the existing block IDs. resumed_req_ids: set[str] # NOTE(woosuk): new_token_ids is only used for pipeline parallelism. # When PP is not used, new_token_ids will be empty. new_token_ids: list[list[int]] # For requests not scheduled in the last step, propagate the token ids to the # connector. Won't contain requests that were scheduled in the prior step. all_token_ids: dict[str, list[int]] new_block_ids: list[tuple[list[int], ...] | None] num_computed_tokens: list[int] num_output_tokens: list[int] @property def num_reqs(self) -> int: return len(self.req_ids) @cached_property @deprecated("This will be removed in v0.14, use `resumed_req_ids` instead.") def resumed_from_preemption(self) -> list[bool]: return [req_id in self.resumed_req_ids for req_id in self.req_ids] @cached_property @deprecated("This will be removed in v0.14, use `all_token_ids` instead.") def resumed_req_token_ids(self) -> list[list[int] | None]: return [ self.all_token_ids[req_id] if req_id in self.resumed_req_ids else None for req_id in self.req_ids ] @classmethod def make_empty(cls) -> "CachedRequestData": return cls( req_ids=[], resumed_req_ids=set(), new_token_ids=[], all_token_ids={}, new_block_ids=[], num_computed_tokens=[], num_output_tokens=[], ) @bc_linter_include @dataclass class SchedulerOutput: # list of the requests that are scheduled for the first time. # We cache the request's data in each worker process, so that we don't # need to re-send it every scheduling step. scheduled_new_reqs: list[NewRequestData] # list of the requests that have been scheduled before. # Since the request's data is already cached in the worker processes, # we only send the diff to minimize the communication cost. scheduled_cached_reqs: CachedRequestData # req_id -> num_scheduled_tokens # Number of tokens scheduled for each request. num_scheduled_tokens: dict[str, int] # Total number of tokens scheduled for all requests. # Equal to sum(num_scheduled_tokens.values()) total_num_scheduled_tokens: int # req_id -> spec_token_ids # If a request does not have any spec decode tokens, it will not be # included in the dictionary. scheduled_spec_decode_tokens: dict[str, list[int]] # req_id -> encoder input indices that need processing. # E.g., if a request has [0, 1], it could mean the vision encoder needs # to process that the request's 0-th and 1-th images in the current step. scheduled_encoder_inputs: dict[str, list[int]] # Number of common prefix blocks for all requests in each KV cache group. # This can be used for cascade attention. num_common_prefix_blocks: list[int] # Request IDs that are finished in between the previous and the current # steps. This is used to notify the workers about the finished requests # so that they can free the cached states for those requests. finished_req_ids: set[str] # list of mm_hash strings associated with the encoder outputs to be # freed from the encoder cache. free_encoder_mm_hashes: list[str] # Request IDs that are preempted in this step. # Only used for v2 model runner. preempted_req_ids: set[str] | None = None # Whether the scheduled requests have all the output tokens they # need to perform grammar bitmask computation. pending_structured_output_tokens: bool = False # KV Cache Connector metadata. kv_connector_metadata: KVConnectorMetadata | None = None # EC Cache Connector metadata ec_connector_metadata: ECConnectorMetadata | None = None @classmethod def make_empty(cls) -> "SchedulerOutput": return cls( scheduled_new_reqs=[], scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={}, total_num_scheduled_tokens=0, scheduled_spec_decode_tokens={}, scheduled_encoder_inputs={}, num_common_prefix_blocks=[], finished_req_ids=set(), free_encoder_mm_hashes=[], ) @dataclass class GrammarOutput: # ids of structured output requests. structured_output_request_ids: list[str] # Bitmask ordered as structured_output_request_ids. grammar_bitmask: "npt.NDArray[np.int32]"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/request_queue.py
vllm/v1/core/sched/request_queue.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import heapq from abc import ABC, abstractmethod from collections import deque from collections.abc import Iterable, Iterator from enum import Enum from vllm.v1.request import Request class SchedulingPolicy(Enum): """Enum for scheduling policies.""" FCFS = "fcfs" PRIORITY = "priority" class RequestQueue(ABC): """Abstract base class for request queues.""" @abstractmethod def add_request(self, request: Request) -> None: """Add a request to the queue according to the policy.""" pass @abstractmethod def pop_request(self) -> Request: """Pop a request from the queue according to the policy.""" pass @abstractmethod def peek_request(self) -> Request: """Peek at the request at the front of the queue without removing it.""" pass @abstractmethod def prepend_request(self, request: Request) -> None: """Prepend a request to the front of the queue.""" pass @abstractmethod def prepend_requests(self, requests: "RequestQueue") -> None: """Prepend all requests from another queue to the front of this queue.""" pass @abstractmethod def remove_request(self, request: Request) -> None: """Remove a specific request from the queue.""" pass @abstractmethod def remove_requests(self, requests: Iterable[Request]) -> None: """Remove multiple specific requests from the queue.""" pass @abstractmethod def __bool__(self) -> bool: """Check if queue has any requests.""" pass @abstractmethod def __len__(self) -> int: """Get number of requests in queue.""" pass @abstractmethod def __iter__(self) -> Iterator[Request]: """Iterate over the queue according to the policy.""" pass @abstractmethod def __reversed__(self) -> Iterator[Request]: """Iterate over the queue in reverse order.""" pass class FCFSRequestQueue(deque[Request], RequestQueue): """A first-come-first-served queue that supports deque operations.""" def add_request(self, request: Request) -> None: """Add a request to the queue according to FCFS policy.""" self.append(request) def pop_request(self) -> Request: """Pop a request from the queue according to FCFS policy.""" return self.popleft() def peek_request(self) -> Request: """Peek at the next request in the queue without removing it.""" if not self: raise IndexError("peek from an empty queue") return self[0] def prepend_request(self, request: Request) -> None: """Prepend a request to the front of the queue.""" self.appendleft(request) def prepend_requests(self, requests: RequestQueue) -> None: """Prepend all requests from another queue to the front of this queue.""" self.extendleft(reversed(requests)) def remove_request(self, request: Request) -> None: """Remove a specific request from the queue.""" self.remove(request) def remove_requests(self, requests: Iterable[Request]) -> None: """Remove multiple specific requests from the queue.""" requests_to_remove = set(requests) filtered_requests = [req for req in self if req not in requests_to_remove] # deque does not support in-place filtering, so we need to clear # and extend self.clear() self.extend(filtered_requests) def __bool__(self) -> bool: """Check if queue has any requests.""" return len(self) > 0 def __len__(self) -> int: """Get number of requests in queue.""" return super().__len__() def __iter__(self) -> Iterator[Request]: """Iterate over the queue according to FCFS policy.""" return super().__iter__() def __reversed__(self) -> Iterator[Request]: """Iterate over the queue in reverse order.""" return super().__reversed__() class PriorityRequestQueue(RequestQueue): """ A priority queue that supports heap operations. Respects the ordering defined in the Request class, where requests with a smaller value of `priority` are processed first. If multiple requests have the same priority, the one with the earlier `arrival_time` is processed first. """ def __init__(self) -> None: self._heap: list[Request] = [] def add_request(self, request: Request) -> None: """Add a request to the queue according to priority policy.""" heapq.heappush(self._heap, request) def pop_request(self) -> Request: """Pop a request from the queue according to priority policy.""" if not self._heap: raise IndexError("pop from empty heap") return heapq.heappop(self._heap) def peek_request(self) -> Request: """Peek at the next request in the queue without removing it.""" if not self._heap: raise IndexError("peek from empty heap") return self._heap[0] def prepend_request(self, request: Request) -> None: """Add a request to the queue according to priority policy. Note: In a priority queue, there is no concept of prepending to the front. Requests are ordered by (priority, arrival_time).""" self.add_request(request) def prepend_requests(self, requests: RequestQueue) -> None: """Add all requests from another queue according to priority policy. Note: In a priority queue, there is no concept of prepending to the front. Requests are ordered by (priority, arrival_time).""" for request in requests: self.add_request(request) def remove_request(self, request: Request) -> None: """Remove a specific request from the queue.""" self._heap.remove(request) heapq.heapify(self._heap) def remove_requests(self, requests: Iterable[Request]) -> None: """Remove multiple specific requests from the queue.""" requests_to_remove = requests if isinstance(requests, set) else set(requests) self._heap = [r for r in self._heap if r not in requests_to_remove] heapq.heapify(self._heap) def __bool__(self) -> bool: """Check if queue has any requests.""" return bool(self._heap) def __len__(self) -> int: """Get number of requests in queue.""" return len(self._heap) def __iter__(self) -> Iterator[Request]: """Iterate over the queue according to priority policy.""" heap_copy = self._heap[:] while heap_copy: yield heapq.heappop(heap_copy) def __reversed__(self) -> Iterator[Request]: """Iterate over the queue in reverse priority order.""" return reversed(list(self)) def create_request_queue(policy: SchedulingPolicy) -> RequestQueue: """Create request queue based on scheduling policy.""" if policy == SchedulingPolicy.PRIORITY: return PriorityRequestQueue() elif policy == SchedulingPolicy.FCFS: return FCFSRequestQueue() else: raise ValueError(f"Unknown scheduling policy: {policy}")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/interface.py
vllm/v1/core/sched/interface.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from collections.abc import Iterable from typing import TYPE_CHECKING, Optional from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry if TYPE_CHECKING: from vllm.config import VllmConfig from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorBase_V1 from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput from vllm.v1.engine import EngineCoreOutputs from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.metrics.stats import SchedulerStats from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput from vllm.v1.request import Request, RequestStatus from vllm.v1.structured_output import StructuredOutputManager class SchedulerInterface(ABC): @abstractmethod def __init__( self, vllm_config: "VllmConfig", kv_cache_config: "KVCacheConfig", structured_output_manager: "StructuredOutputManager", block_size: int, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, include_finished_set: bool = False, log_stats: bool = False, ) -> None: raise NotImplementedError @abstractmethod def schedule(self) -> "SchedulerOutput": """Schedule the requests to process in this scheduling step. The scheduling decision is made at the iteration level. Each scheduling step corresponds to a single forward pass of the model. Therefore, this method is called repeatedly by a busy loop in the engine. Essentially, the scheduler produces a dictionary of {req_id: num_tokens} that specifies how many tokens to process for each request in this scheduling step. For example, num_tokens can be as large as the number of prompt tokens for new requests, or it can be 1 for the requests that are auto-regressively generating new tokens one by one. Otherwise, it can be somewhere in between in case of chunked prefills, prefix caching, speculative decoding, etc. Additionally, the scheduler also returns useful data about each request or the batch as a whole. The model runner will use this information in preparing inputs to the model. Returns: A SchedulerOutput object containing information about the scheduled requests. """ raise NotImplementedError @abstractmethod def get_grammar_bitmask( self, scheduler_output: "SchedulerOutput" ) -> "GrammarOutput | None": raise NotImplementedError @abstractmethod def update_from_output( self, scheduler_output: "SchedulerOutput", model_runner_output: "ModelRunnerOutput", ) -> dict[int, "EngineCoreOutputs"]: """Update the scheduler state based on the model runner output. This method is called after the model runner has processed the scheduled requests. The model runner output includes generated token ids, draft token ids for next step, etc. The scheduler uses this information to update its states, checks the finished requests, and returns the output for each request. Returns: A dict of client index to EngineCoreOutputs object containing the outputs for each request originating from that client. """ raise NotImplementedError @abstractmethod def update_draft_token_ids( self, draft_token_ids: "DraftTokenIds", ) -> None: """Update the draft token ids for the scheduled requests.""" raise NotImplementedError @abstractmethod def add_request(self, request: "Request") -> None: """Add a new request to the scheduler's internal queue. Args: request: The new request being added. """ raise NotImplementedError @abstractmethod def finish_requests( self, request_ids: str | Iterable[str], finished_status: "RequestStatus", ) -> None: """Finish the requests in the scheduler's internal queue. If the request is not in the queue, this method will do nothing. This method is called in two cases: 1. When the request is aborted by the client. 2. When the frontend process detects a stop string of the request after de-tokenizing its generated tokens. Args: request_ids: A single or a list of request IDs. finished_status: The finished status of the given requests. """ raise NotImplementedError @abstractmethod def get_num_unfinished_requests(self) -> int: """Number of unfinished requests in the scheduler's internal queue.""" raise NotImplementedError def has_unfinished_requests(self) -> bool: """Returns True if there are unfinished requests in the scheduler's internal queue.""" return self.get_num_unfinished_requests() > 0 @abstractmethod def has_finished_requests(self) -> bool: """Returns True if there are finished requests that need to be cleared. NOTE: This is different from `not self.has_unfinished_requests()`. The scheduler maintains an internal list of the requests finished in the previous step. This list is returned from the next call to schedule(), to be sent to the model runner in the next step to clear cached states for these finished requests. This method checks if this internal list of finished requests is non-empty. This information is useful for DP attention. """ raise NotImplementedError def has_requests(self) -> bool: """Returns True if there are unfinished requests, or finished requests not yet returned in SchedulerOutputs.""" return self.has_unfinished_requests() or self.has_finished_requests() @abstractmethod def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: """Reset the prefix cache for KV cache. This is particularly required when the model weights are live-updated. Args: reset_running_requests: If True, all the running requests will be preempted and moved to the waiting queue. Otherwise, this method will only reset the KV prefix cache when there is no running request taking KV cache. """ raise NotImplementedError @abstractmethod def get_request_counts(self) -> tuple[int, int]: """Returns (num_running_reqs, num_waiting_reqs).""" raise NotImplementedError @abstractmethod def make_stats(self) -> Optional["SchedulerStats"]: """Make a SchedulerStats object for logging. The SchedulerStats object is created for every scheduling step. """ raise NotImplementedError @abstractmethod def shutdown(self) -> None: """Shutdown the scheduler.""" raise NotImplementedError def get_kv_connector(self) -> Optional["KVConnectorBase_V1"]: return None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/utils.py
vllm/v1/core/sched/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib from vllm.v1.request import Request, RequestStatus def remove_all(lst: list, items_to_remove: set) -> list: """Remove all items from a list that are in the items_to_remove set. This method optimizes for the common case of removing a single item, falling back to list comprehension for multiple items. Args: lst: The list to remove items from items_to_remove: Set of items to remove Returns: Either the modified original list (for single item removal) or a new list (for multiple item removal). Callers should use the returned value. Note: For single item removal, this modifies the original list in-place and returns it. For multiple items, it creates and returns a new list. """ if not items_to_remove: return lst if len(items_to_remove) == 1: # Fast path for single item removal (most common case) item = next(iter(items_to_remove)) with contextlib.suppress(ValueError): lst.remove(item) return lst # For multiple items, use list comprehension return [item for item in lst if item not in items_to_remove] def check_stop(request: Request, max_model_len: int) -> bool: assert not request.pooling_params sampling_params = request.sampling_params assert sampling_params is not None if request.num_output_tokens < sampling_params.min_tokens: return False last_token_id = request.output_token_ids[-1] if not sampling_params.ignore_eos and last_token_id == request.eos_token_id: request.status = RequestStatus.FINISHED_STOPPED return True if last_token_id in (sampling_params.stop_token_ids or ()): request.status = RequestStatus.FINISHED_STOPPED request.stop_reason = last_token_id return True if ( request.num_tokens >= max_model_len or request.num_output_tokens >= request.max_tokens ): request.status = RequestStatus.FINISHED_LENGTH_CAPPED return True return False
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/__init__.py
vllm/v1/core/sched/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/core/sched/scheduler.py
vllm/v1/core/sched/scheduler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools import time from collections import defaultdict from collections.abc import Iterable from typing import Any from vllm import envs from vllm.compilation.cuda_graph import CUDAGraphStat from vllm.config import VllmConfig from vllm.distributed.ec_transfer.ec_connector.base import ( ECConnectorMetadata, ECConnectorRole, ) from vllm.distributed.ec_transfer.ec_connector.factory import ECConnectorFactory from vllm.distributed.kv_events import EventPublisherFactory, KVEventBatch from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory from vllm.distributed.kv_transfer.kv_connector.v1 import ( KVConnectorBase_V1, KVConnectorRole, SupportsHMA, ) from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorMetadata from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats from vllm.logger import init_logger from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.v1.core.encoder_cache_manager import ( EncoderCacheManager, EncoderDecoderCacheManager, compute_encoder_budget, ) from vllm.v1.core.kv_cache_manager import KVCacheBlocks, KVCacheManager from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.sched.interface import SchedulerInterface from vllm.v1.core.sched.output import ( CachedRequestData, GrammarOutput, NewRequestData, SchedulerOutput, ) from vllm.v1.core.sched.request_queue import SchedulingPolicy, create_request_queue from vllm.v1.core.sched.utils import check_stop, remove_all from vllm.v1.engine import EngineCoreEventType, EngineCoreOutput, EngineCoreOutputs from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.metrics.perf import ModelMetrics, PerfStats from vllm.v1.metrics.stats import ( PrefixCacheStats, SchedulerStats, ) from vllm.v1.outputs import DraftTokenIds, KVConnectorOutput, ModelRunnerOutput from vllm.v1.request import Request, RequestStatus from vllm.v1.spec_decode.metrics import SpecDecodingStats from vllm.v1.structured_output import StructuredOutputManager from vllm.v1.utils import record_function_or_nullcontext logger = init_logger(__name__) class Scheduler(SchedulerInterface): def __init__( self, vllm_config: VllmConfig, kv_cache_config: KVCacheConfig, structured_output_manager: StructuredOutputManager, block_size: int, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, include_finished_set: bool = False, log_stats: bool = False, ) -> None: self.vllm_config = vllm_config self.scheduler_config = vllm_config.scheduler_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config self.kv_cache_config = kv_cache_config self.kv_events_config = vllm_config.kv_events_config self.parallel_config = vllm_config.parallel_config self.log_stats = log_stats self.observability_config = vllm_config.observability_config self.kv_metrics_collector: KVCacheMetricsCollector | None = None if self.observability_config.kv_cache_metrics: self.kv_metrics_collector = KVCacheMetricsCollector( self.observability_config.kv_cache_metrics_sample, ) self.structured_output_manager = structured_output_manager self.is_encoder_decoder = vllm_config.model_config.is_encoder_decoder # include_finished_set controls whether a separate set of finished # request ids should be included in the EngineCoreOutputs returned # by update_from_outputs(). This is currently used in the multi-engine # case to track request lifetimes efficiently. self.finished_req_ids_dict: dict[int, set[str]] | None = ( defaultdict(set) if include_finished_set else None ) self.prev_step_scheduled_req_ids: set[str] = set() # Scheduling constraints. self.max_num_running_reqs = self.scheduler_config.max_num_seqs self.max_num_scheduled_tokens = self.scheduler_config.max_num_batched_tokens self.max_model_len = vllm_config.model_config.max_model_len self.enable_kv_cache_events = ( self.kv_events_config is not None and self.kv_events_config.enable_kv_cache_events ) # Create KVConnector for the Scheduler. Note that each Worker # will have a corresponding KVConnector with Role=WORKER. # KV Connector pushes/pull of remote KVs for P/D and offloading. self.connector = None self.connector_prefix_cache_stats: PrefixCacheStats | None = None self.recompute_kv_load_failures = True if self.vllm_config.kv_transfer_config is not None: assert not self.is_encoder_decoder, ( "Encoder-decoder models are not currently supported with KV connectors" ) self.connector = KVConnectorFactory.create_connector( config=self.vllm_config, role=KVConnectorRole.SCHEDULER, kv_cache_config=self.kv_cache_config, ) if self.log_stats: self.connector_prefix_cache_stats = PrefixCacheStats() kv_load_failure_policy = ( self.vllm_config.kv_transfer_config.kv_load_failure_policy ) self.recompute_kv_load_failures = kv_load_failure_policy == "recompute" self.kv_event_publisher = EventPublisherFactory.create( self.kv_events_config, self.parallel_config.data_parallel_index, ) self.ec_connector = None if self.vllm_config.ec_transfer_config is not None: self.ec_connector = ECConnectorFactory.create_connector( config=self.vllm_config, role=ECConnectorRole.SCHEDULER ) num_gpu_blocks = self.cache_config.num_gpu_blocks assert num_gpu_blocks is not None and num_gpu_blocks > 0 self.block_size = block_size self.dcp_world_size = vllm_config.parallel_config.decode_context_parallel_size self.pcp_world_size = vllm_config.parallel_config.prefill_context_parallel_size # req_id -> Request self.requests: dict[str, Request] = {} # Scheduling policy try: self.policy = SchedulingPolicy(self.scheduler_config.policy) except ValueError as e: raise ValueError( f"Unknown scheduling policy: {self.scheduler_config.policy}" ) from e # Priority queues for requests. self.waiting = create_request_queue(self.policy) self.running: list[Request] = [] # The request IDs that are finished in between the previous and the # current steps. This is used to notify the workers about the finished # requests so that they can free the cached states for those requests. # This is flushed at the end of each scheduling step. self.finished_req_ids: set[str] = set() # KV Connector: requests in process of async KV loading or recving self.finished_recving_kv_req_ids: set[str] = set() self.failed_recving_kv_req_ids: set[str] = set() # Encoder-related. # Calculate encoder cache size if applicable # NOTE: For now we use the same budget for both compute and space. # This can be changed when we make encoder cache for embedding caching # across requests. encoder_compute_budget, encoder_cache_size = compute_encoder_budget( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, mm_registry=mm_registry, ) # NOTE(woosuk): Here, "encoder" includes the vision encoder (and # projector if needed) for MM models as well as encoder-decoder # transformers. self.max_num_encoder_input_tokens = encoder_compute_budget # NOTE: For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized because cache size is 0 # for these models. self.encoder_cache_manager = ( EncoderDecoderCacheManager(cache_size=encoder_cache_size) if self.is_encoder_decoder else EncoderCacheManager(cache_size=encoder_cache_size) ) # For encoder-decoder models, allocate the maximum number of tokens for Cross # Attn blocks, as for Whisper its input is always padded to the maximum length. # TODO (NickLucche): Generalize to models with variable-length encoder inputs. self._num_encoder_max_input_tokens = ( MULTIMODAL_REGISTRY.get_encdec_max_encoder_len(vllm_config.model_config) ) speculative_config = vllm_config.speculative_config self.use_eagle = False self.num_spec_tokens = self.num_lookahead_tokens = 0 if speculative_config: self.num_spec_tokens = speculative_config.num_speculative_tokens if speculative_config.use_eagle(): self.use_eagle = True self.num_lookahead_tokens = self.num_spec_tokens # Create the KV cache manager. self.kv_cache_manager = KVCacheManager( kv_cache_config=kv_cache_config, max_model_len=self.max_model_len, enable_caching=self.cache_config.enable_prefix_caching, use_eagle=self.use_eagle, log_stats=self.log_stats, enable_kv_cache_events=self.enable_kv_cache_events, dcp_world_size=self.dcp_world_size, pcp_world_size=self.pcp_world_size, hash_block_size=self.block_size, metrics_collector=self.kv_metrics_collector, ) self.use_pp = self.parallel_config.pipeline_parallel_size > 1 self.use_v2_model_runner = envs.VLLM_USE_V2_MODEL_RUNNER self.perf_metrics: ModelMetrics | None = None if self.log_stats and vllm_config.observability_config.enable_mfu_metrics: self.perf_metrics = ModelMetrics(vllm_config) def schedule(self) -> SchedulerOutput: # NOTE(woosuk) on the scheduling algorithm: # There's no "decoding phase" nor "prefill phase" in the scheduler. # Each request just has the num_computed_tokens and # num_tokens_with_spec. num_tokens_with_spec = # len(prompt_token_ids) + len(output_token_ids) + len(spec_token_ids). # At each step, the scheduler tries to assign tokens to the requests # so that each request's num_computed_tokens can catch up its # num_tokens_with_spec. This is general enough to cover # chunked prefills, prefix caching, speculative decoding, # and the "jump decoding" optimization in the future. scheduled_new_reqs: list[Request] = [] scheduled_resumed_reqs: list[Request] = [] scheduled_running_reqs: list[Request] = [] preempted_reqs: list[Request] = [] req_to_new_blocks: dict[str, KVCacheBlocks] = {} num_scheduled_tokens: dict[str, int] = {} token_budget = self.max_num_scheduled_tokens # Encoder-related. scheduled_encoder_inputs: dict[str, list[int]] = {} encoder_compute_budget = self.max_num_encoder_input_tokens # Spec decode-related. scheduled_spec_decode_tokens: dict[str, list[int]] = {} # For logging. scheduled_timestamp = time.monotonic() # First, schedule the RUNNING requests. req_index = 0 while req_index < len(self.running) and token_budget > 0: request = self.running[req_index] if ( request.num_output_placeholders > 0 # This is (num_computed_tokens + 1) - (num_output_placeholders - 1). # Since output placeholders are also included in the computed tokens # count, we subtract (num_output_placeholders - 1) to remove any draft # tokens, so that we can be sure no further steps are needed even if # they are all rejected. and request.num_computed_tokens + 2 - request.num_output_placeholders >= request.num_prompt_tokens + request.max_tokens ): # Async scheduling: Avoid scheduling an extra step when we are sure that # the previous step has reached request.max_tokens. We don't schedule # partial draft tokens since this prevents uniform decode optimizations. req_index += 1 continue num_new_tokens = ( request.num_tokens_with_spec + request.num_output_placeholders - request.num_computed_tokens ) if 0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens: num_new_tokens = self.scheduler_config.long_prefill_token_threshold num_new_tokens = min(num_new_tokens, token_budget) # Make sure the input position does not exceed the max model len. # This is necessary when using spec decoding. num_new_tokens = min( num_new_tokens, self.max_model_len - 1 - request.num_computed_tokens ) # Schedule encoder inputs. encoder_inputs_to_schedule = None external_load_encoder_input: list[int] = [] new_encoder_compute_budget = encoder_compute_budget if request.has_encoder_inputs: ( encoder_inputs_to_schedule, num_new_tokens, new_encoder_compute_budget, external_load_encoder_input, ) = self._try_schedule_encoder_inputs( request, request.num_computed_tokens, num_new_tokens, encoder_compute_budget, shift_computed_tokens=1 if self.use_eagle else 0, ) if num_new_tokens == 0: # The request cannot be scheduled because one of the following # reasons: # 1. No new tokens to schedule. This may happen when # (1) PP>1 and we have already scheduled all prompt tokens # but they are not finished yet. # (2) Async scheduling and the request has reached to either # its max_total_tokens or max_model_len. # 2. The encoder budget is exhausted. # 3. The encoder cache is exhausted. # NOTE(woosuk): Here, by doing `continue` instead of `break`, # we do not strictly follow the FCFS scheduling policy and # allow the lower-priority requests to be scheduled. req_index += 1 continue # Schedule newly needed KV blocks for the request. with record_function_or_nullcontext("schedule: allocate_slots"): while True: new_blocks = self.kv_cache_manager.allocate_slots( request, num_new_tokens, num_lookahead_tokens=self.num_lookahead_tokens, ) if new_blocks is not None: # The request can be scheduled. break # The request cannot be scheduled. # Preempt the lowest-priority request. if self.policy == SchedulingPolicy.PRIORITY: preempted_req = max( self.running, key=lambda r: (r.priority, r.arrival_time), ) self.running.remove(preempted_req) if preempted_req in scheduled_running_reqs: scheduled_running_reqs.remove(preempted_req) token_budget += num_scheduled_tokens[ preempted_req.request_id ] req_to_new_blocks.pop(preempted_req.request_id) num_scheduled_tokens.pop(preempted_req.request_id) scheduled_spec_decode_tokens.pop( preempted_req.request_id, None ) preempted_encoder_inputs = scheduled_encoder_inputs.pop( preempted_req.request_id, None ) if preempted_encoder_inputs: # Restore encoder compute budget if the preempted # request had encoder inputs scheduled in this step. num_embeds_to_restore = sum( preempted_req.get_num_encoder_embeds(i) for i in preempted_encoder_inputs ) encoder_compute_budget += num_embeds_to_restore req_index -= 1 else: preempted_req = self.running.pop() self._preempt_request(preempted_req, scheduled_timestamp) preempted_reqs.append(preempted_req) if preempted_req == request: # No more request to preempt. Cannot schedule this request. break if new_blocks is None: # Cannot schedule this request. break # Schedule the request. scheduled_running_reqs.append(request) req_to_new_blocks[request.request_id] = new_blocks num_scheduled_tokens[request.request_id] = num_new_tokens token_budget -= num_new_tokens req_index += 1 # Speculative decode related. if request.spec_token_ids: num_scheduled_spec_tokens = ( num_new_tokens + request.num_computed_tokens - request.num_tokens - request.num_output_placeholders ) if num_scheduled_spec_tokens > 0: # Trim spec_token_ids list to num_scheduled_spec_tokens. del request.spec_token_ids[num_scheduled_spec_tokens:] scheduled_spec_decode_tokens[request.request_id] = ( request.spec_token_ids ) # New spec tokens will be set in `update_draft_token_ids` before the # next step when applicable. request.spec_token_ids = [] # Encoder-related. if encoder_inputs_to_schedule: scheduled_encoder_inputs[request.request_id] = ( encoder_inputs_to_schedule ) # Allocate the encoder cache. for i in encoder_inputs_to_schedule: self.encoder_cache_manager.allocate(request, i) encoder_compute_budget = new_encoder_compute_budget if external_load_encoder_input: for i in external_load_encoder_input: self.encoder_cache_manager.allocate(request, i) if self.ec_connector is not None: self.ec_connector.update_state_after_alloc(request, i) # Record the LoRAs in scheduled_running_reqs scheduled_loras: set[int] = set() if self.lora_config: scheduled_loras = set( req.lora_request.lora_int_id for req in scheduled_running_reqs if req.lora_request and req.lora_request.lora_int_id > 0 ) assert len(scheduled_loras) <= self.lora_config.max_loras # Use a temporary RequestQueue to collect requests that need to be # skipped and put back at the head of the waiting queue later skipped_waiting_requests = create_request_queue(self.policy) # Next, schedule the WAITING requests. if not preempted_reqs: while self.waiting and token_budget > 0: if len(self.running) == self.max_num_running_reqs: break request = self.waiting.peek_request() # KVTransfer: skip request if still waiting for remote kvs. if request.status == RequestStatus.WAITING_FOR_REMOTE_KVS: is_ready = self._update_waiting_for_remote_kv(request) if is_ready: request.status = RequestStatus.WAITING else: logger.debug( "%s is still in WAITING_FOR_REMOTE_KVS state.", request.request_id, ) self.waiting.pop_request() skipped_waiting_requests.prepend_request(request) continue # Skip request if the structured output request is still waiting # for FSM compilation. if request.status == RequestStatus.WAITING_FOR_FSM: structured_output_req = request.structured_output_request if structured_output_req and structured_output_req.grammar: request.status = RequestStatus.WAITING else: self.waiting.pop_request() skipped_waiting_requests.prepend_request(request) continue # Check that adding the request still respects the max_loras # constraint. if ( self.lora_config and request.lora_request and ( len(scheduled_loras) == self.lora_config.max_loras and request.lora_request.lora_int_id not in scheduled_loras ) ): # Scheduling would exceed max_loras, skip. self.waiting.pop_request() skipped_waiting_requests.prepend_request(request) continue num_external_computed_tokens = 0 load_kv_async = False # Get already-cached tokens. if request.num_computed_tokens == 0: # Get locally-cached tokens. new_computed_blocks, num_new_local_computed_tokens = ( self.kv_cache_manager.get_computed_blocks(request) ) # Get externally-cached tokens if using a KVConnector. if self.connector is not None: ext_tokens, load_kv_async = ( self.connector.get_num_new_matched_tokens( request, num_new_local_computed_tokens ) ) if ext_tokens is None: # The request cannot be scheduled because # the KVConnector couldn't determine # the number of matched tokens. self.waiting.pop_request() skipped_waiting_requests.prepend_request(request) continue request.num_external_computed_tokens = ext_tokens num_external_computed_tokens = ext_tokens # Total computed tokens (local + external). num_computed_tokens = ( num_new_local_computed_tokens + num_external_computed_tokens ) else: # KVTransfer: WAITING reqs have num_computed_tokens > 0 # after async KV recvs are completed. new_computed_blocks = self.kv_cache_manager.empty_kv_cache_blocks num_new_local_computed_tokens = 0 num_computed_tokens = request.num_computed_tokens encoder_inputs_to_schedule = None external_load_encoder_input = [] new_encoder_compute_budget = encoder_compute_budget if load_kv_async: # KVTransfer: loading remote KV, do not allocate for new work. assert num_external_computed_tokens > 0 num_new_tokens = 0 else: # Number of tokens to be scheduled. # We use `request.num_tokens` instead of # `request.num_prompt_tokens` to consider the resumed # requests, which have output tokens. num_new_tokens = request.num_tokens - num_computed_tokens threshold = self.scheduler_config.long_prefill_token_threshold if 0 < threshold < num_new_tokens: num_new_tokens = threshold # chunked prefill has to be enabled explicitly to allow # pooling requests to be chunked if ( not self.scheduler_config.enable_chunked_prefill and num_new_tokens > token_budget ): # If chunked_prefill is disabled, # we can stop the scheduling here. break num_new_tokens = min(num_new_tokens, token_budget) assert num_new_tokens > 0 # Schedule encoder inputs. if request.has_encoder_inputs: ( encoder_inputs_to_schedule, num_new_tokens, new_encoder_compute_budget, external_load_encoder_input, ) = self._try_schedule_encoder_inputs( request, num_computed_tokens, num_new_tokens, encoder_compute_budget, shift_computed_tokens=1 if self.use_eagle else 0, ) if num_new_tokens == 0: # The request cannot be scheduled. break # Handles an edge case when P/D Disaggregation # is used with Spec Decoding where an # extra block gets allocated which # creates a mismatch between the number # of local and remote blocks. effective_lookahead_tokens = ( 0 if request.num_computed_tokens == 0 else self.num_lookahead_tokens ) num_encoder_tokens = ( self._num_encoder_max_input_tokens if self.is_encoder_decoder and request.has_encoder_inputs else 0 ) new_blocks = self.kv_cache_manager.allocate_slots( request, num_new_tokens, num_new_computed_tokens=num_new_local_computed_tokens, new_computed_blocks=new_computed_blocks, num_lookahead_tokens=effective_lookahead_tokens, num_external_computed_tokens=num_external_computed_tokens, delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, ) if new_blocks is None: # The request cannot be scheduled. break # KVTransfer: the connector uses this info to determine # if a load is needed. Note that # This information is used to determine if a load is # needed for this request. if self.connector is not None: self.connector.update_state_after_alloc( request, self.kv_cache_manager.get_blocks(request.request_id), num_external_computed_tokens, ) # Request was already popped from self.waiting # unless it was re-added above due to new_blocks being None. request = self.waiting.pop_request() if load_kv_async: # If loading async, allocate memory and put request # into the WAITING_FOR_REMOTE_KV state. skipped_waiting_requests.prepend_request(request) request.status = RequestStatus.WAITING_FOR_REMOTE_KVS continue self._update_connector_prefix_cache_stats(request) self.running.append(request) if self.log_stats: request.record_event( EngineCoreEventType.SCHEDULED, scheduled_timestamp ) if request.status == RequestStatus.WAITING: scheduled_new_reqs.append(request) elif request.status == RequestStatus.PREEMPTED: scheduled_resumed_reqs.append(request) else: raise RuntimeError(f"Invalid request status: {request.status}") if self.lora_config and request.lora_request: scheduled_loras.add(request.lora_request.lora_int_id) req_to_new_blocks[request.request_id] = ( self.kv_cache_manager.get_blocks(request.request_id) ) num_scheduled_tokens[request.request_id] = num_new_tokens token_budget -= num_new_tokens request.status = RequestStatus.RUNNING request.num_computed_tokens = num_computed_tokens # Count the number of prefix cached tokens. if request.num_cached_tokens < 0: request.num_cached_tokens = num_computed_tokens # Encoder-related. if encoder_inputs_to_schedule: scheduled_encoder_inputs[request.request_id] = ( encoder_inputs_to_schedule ) # Allocate the encoder cache. for i in encoder_inputs_to_schedule: self.encoder_cache_manager.allocate(request, i) encoder_compute_budget = new_encoder_compute_budget # Allocate for external load encoder cache if external_load_encoder_input: for i in external_load_encoder_input: self.encoder_cache_manager.allocate(request, i) if self.ec_connector is not None: self.ec_connector.update_state_after_alloc(request, i) # Put back any skipped requests at the head of the waiting queue if skipped_waiting_requests: self.waiting.prepend_requests(skipped_waiting_requests) # Check if the scheduling constraints are satisfied. total_num_scheduled_tokens = sum(num_scheduled_tokens.values()) assert total_num_scheduled_tokens <= self.max_num_scheduled_tokens assert token_budget >= 0 assert len(self.running) <= self.max_num_running_reqs # Since some requests in the RUNNING queue may not be scheduled in # this step, the total number of scheduled requests can be smaller than # len(self.running). assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len( scheduled_running_reqs ) <= len(self.running) # Get the longest common prefix among all requests in the running queue. # This can be potentially used for cascade attention. num_common_prefix_blocks = [0] * len(self.kv_cache_config.kv_cache_groups) with record_function_or_nullcontext("schedule: get_num_common_prefix_blocks"): if self.running: any_request = self.running[0] num_common_prefix_blocks = ( self.kv_cache_manager.get_num_common_prefix_blocks( any_request.request_id
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/llm_engine.py
vllm/v1/engine/llm_engine.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from collections.abc import Callable, Mapping from copy import copy from typing import Any, cast import torch.nn as nn from typing_extensions import TypeVar import vllm.envs as envs from vllm.config import ParallelConfig, VllmConfig from vllm.distributed import stateless_destroy_torch_distributed_process_group from vllm.distributed.parallel_state import get_dp_group from vllm.engine.arg_utils import EngineArgs from vllm.inputs import PromptType from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.plugins.io_processors import get_io_processor from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.tasks import SupportedTask from vllm.tokenizers import TokenizerLike, cached_tokenizer_from_config from vllm.tracing import init_tracer from vllm.usage.usage_lib import UsageContext from vllm.v1.engine import EngineCoreRequest from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.input_processor import InputProcessor from vllm.v1.engine.output_processor import OutputProcessor from vllm.v1.engine.parallel_sampling import ParentRequest from vllm.v1.executor import Executor from vllm.v1.metrics.loggers import StatLoggerFactory, StatLoggerManager from vllm.v1.metrics.reader import Metric, get_metrics_snapshot from vllm.v1.metrics.stats import IterationStats from vllm.v1.utils import record_function_or_nullcontext from vllm.v1.worker.worker_base import WorkerBase logger = init_logger(__name__) _R = TypeVar("_R", default=Any) class LLMEngine: """Legacy LLMEngine for backwards compatibility.""" def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, aggregate_engine_logging: bool = False, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, use_cached_outputs: bool = False, multiprocess_mode: bool = False, ) -> None: self.vllm_config = vllm_config self.observability_config = vllm_config.observability_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.log_stats = log_stats parallel_config = vllm_config.parallel_config executor_backend = parallel_config.distributed_executor_backend self.external_launcher_dp = ( parallel_config.data_parallel_size > 1 and executor_backend == "external_launcher" ) # important: init dp group before init the engine_core # In the decoupled engine case this is handled in EngineCoreProc. if ( not multiprocess_mode and parallel_config.data_parallel_size > 1 and not self.external_launcher_dp ): self.dp_group = parallel_config.stateless_init_dp_group() else: self.dp_group = None self.should_execute_dummy_batch = False if self.model_config.skip_tokenizer_init: tokenizer = None else: tokenizer = cached_tokenizer_from_config(self.model_config) self.input_processor = InputProcessor(self.vllm_config, tokenizer) self.io_processor = get_io_processor( self.vllm_config, self.model_config.io_processor_plugin, ) # OutputProcessor (convert EngineCoreOutputs --> RequestOutput). self.output_processor = OutputProcessor( self.tokenizer, log_stats=self.log_stats, stream_interval=self.vllm_config.scheduler_config.stream_interval, ) endpoint = self.observability_config.otlp_traces_endpoint if endpoint is not None: tracer = init_tracer("vllm.llm_engine", endpoint) self.output_processor.tracer = tracer # EngineCore (gets EngineCoreRequests and gives EngineCoreOutputs) self.engine_core = EngineCoreClient.make_client( multiprocess_mode=multiprocess_mode, asyncio_mode=False, vllm_config=vllm_config, executor_class=executor_class, log_stats=self.log_stats, ) self.logger_manager: StatLoggerManager | None = None if self.log_stats: self.logger_manager = StatLoggerManager( vllm_config=vllm_config, custom_stat_loggers=stat_loggers, enable_default_loggers=log_stats, aggregate_engine_logging=aggregate_engine_logging, ) self.logger_manager.log_engine_initialized() if not multiprocess_mode: # for v0 compatibility self.model_executor = self.engine_core.engine_core.model_executor # type: ignore if self.external_launcher_dp: # If we use DP in external launcher mode, we reuse the # existing DP group used for data communication. self.dp_group = get_dp_group().cpu_group # Don't keep the dummy data in memory self.reset_mm_cache() @classmethod def from_vllm_config( cls, vllm_config: VllmConfig, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, disable_log_stats: bool = False, ) -> "LLMEngine": return cls( vllm_config=vllm_config, executor_class=Executor.get_class(vllm_config), log_stats=(not disable_log_stats), usage_context=usage_context, stat_loggers=stat_loggers, multiprocess_mode=envs.VLLM_ENABLE_V1_MULTIPROCESSING, ) @classmethod def from_engine_args( cls, engine_args: EngineArgs, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, enable_multiprocessing: bool = False, ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. vllm_config = engine_args.create_engine_config(usage_context) executor_class = Executor.get_class(vllm_config) if envs.VLLM_ENABLE_V1_MULTIPROCESSING: logger.debug("Enabling multiprocessing for LLMEngine.") enable_multiprocessing = True # Create the LLMEngine. return cls( vllm_config=vllm_config, executor_class=executor_class, log_stats=not engine_args.disable_log_stats, usage_context=usage_context, stat_loggers=stat_loggers, multiprocess_mode=enable_multiprocessing, ) def get_num_unfinished_requests(self) -> int: return self.output_processor.get_num_unfinished_requests() def has_unfinished_requests(self) -> bool: has_unfinished = self.output_processor.has_unfinished_requests() if self.dp_group is None: return has_unfinished or self.engine_core.dp_engines_running() return self.has_unfinished_requests_dp(has_unfinished) def has_unfinished_requests_dp(self, has_unfinished: bool) -> bool: aggregated_has_unfinished = ParallelConfig.has_unfinished_dp( self.dp_group, has_unfinished ) if not has_unfinished and aggregated_has_unfinished: self.should_execute_dummy_batch = True return aggregated_has_unfinished @classmethod def validate_outputs(cls, outputs, output_type): return outputs def get_supported_tasks(self) -> tuple[SupportedTask, ...]: return self.engine_core.get_supported_tasks() def abort_request(self, request_ids: list[str], internal: bool = False) -> None: """Remove request_ids from EngineCore and Detokenizer.""" request_ids = self.output_processor.abort_requests(request_ids, internal) self.engine_core.abort_requests(request_ids) def add_request( self, request_id: str, prompt: EngineCoreRequest | PromptType, params: SamplingParams | PoolingParams, arrival_time: float | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, prompt_text: str | None = None, ) -> None: # Validate the request_id type. if not isinstance(request_id, str): raise TypeError(f"request_id must be a string, got {type(request_id)}") # Process raw inputs into the request. if isinstance(prompt, EngineCoreRequest): request = prompt if request_id != request.request_id: logger.warning_once( "AsyncLLM.add_request() was passed a request_id parameter that " "does not match the EngineCoreRequest.request_id attribute. The " "latter will be used, and the former will be ignored." ) else: assert prompt_text is None request = self.input_processor.process_inputs( request_id, prompt, params, arrival_time, lora_request, tokenization_kwargs, trace_headers, priority, ) if isinstance(prompt, str): prompt_text = prompt elif isinstance(prompt, Mapping): prompt_text = cast(str | None, prompt.get("prompt")) self.input_processor.assign_request_id(request) # Use cloned params that may have been updated in process_inputs() params = request.params n = params.n if isinstance(params, SamplingParams) else 1 if n == 1: # Make a new RequestState and queue. self.output_processor.add_request(request, prompt_text, None, 0) # Add the request to EngineCore. self.engine_core.add_request(request) return # Fan out child requests (for n>1). parent_req = ParentRequest(request) for idx in range(n): request_id, child_params = parent_req.get_child_info(idx) child_request = request if idx == n - 1 else copy(request) child_request.request_id = request_id child_request.sampling_params = child_params # Make a new RequestState and queue. self.output_processor.add_request( child_request, prompt_text, parent_req, idx ) # Add the request to EngineCore. self.engine_core.add_request(child_request) def step(self) -> list[RequestOutput | PoolingRequestOutput]: if self.should_execute_dummy_batch: self.should_execute_dummy_batch = False self.engine_core.execute_dummy_batch() return [] # 1) Get EngineCoreOutput from the EngineCore. with record_function_or_nullcontext("llm_engine step: get_output"): outputs = self.engine_core.get_output() # 2) Process EngineCoreOutputs. with record_function_or_nullcontext("llm_engine step: process_outputs"): iteration_stats = IterationStats() if self.log_stats else None processed_outputs = self.output_processor.process_outputs( outputs.outputs, engine_core_timestamp=outputs.timestamp, iteration_stats=iteration_stats, ) self.output_processor.update_scheduler_stats(outputs.scheduler_stats) # 3) Abort any reqs that finished due to stop strings. with record_function_or_nullcontext("llm_engine step: abort_requests"): self.engine_core.abort_requests(processed_outputs.reqs_to_abort) # 4) Record stats with record_function_or_nullcontext("llm_engine step: record_stats"): if self.logger_manager is not None and outputs.scheduler_stats is not None: self.logger_manager.record( scheduler_stats=outputs.scheduler_stats, iteration_stats=iteration_stats, mm_cache_stats=self.input_processor.stat_mm_cache(), ) self.do_log_stats_with_interval() return processed_outputs.request_outputs def start_profile(self): self.engine_core.profile(True) def stop_profile(self): self.engine_core.profile(False) def reset_mm_cache(self): self.input_processor.clear_mm_cache() self.engine_core.reset_mm_cache() def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return self.engine_core.reset_prefix_cache( reset_running_requests, reset_connector ) def sleep(self, level: int = 1): self.engine_core.sleep(level) if self.logger_manager is not None: self.logger_manager.record_sleep_state(1, level) def wake_up(self, tags: list[str] | None = None): self.engine_core.wake_up(tags) if self.logger_manager is not None: self.logger_manager.record_sleep_state(0, 0) def is_sleeping(self) -> bool: return self.engine_core.is_sleeping() def get_metrics(self) -> list[Metric]: assert self.log_stats, "Stat logging disabled" return get_metrics_snapshot() @property def tokenizer(self) -> TokenizerLike | None: return self.input_processor.tokenizer def get_tokenizer(self) -> TokenizerLike: if self.tokenizer is None: raise ValueError( "Unable to get tokenizer because `skip_tokenizer_init=True`" ) return self.tokenizer def do_log_stats(self) -> None: """Log stats if logging is enabled.""" if self.logger_manager: self.logger_manager.log() def do_log_stats_with_interval(self) -> None: """Log stats when the time interval has passed.""" now = time.time() if not hasattr(self, "_last_log_time"): self._last_log_time = now if now - self._last_log_time >= envs.VLLM_LOG_STATS_INTERVAL: self.do_log_stats() self._last_log_time = now def add_lora(self, lora_request: LoRARequest) -> bool: """Load a new LoRA adapter into the engine for future requests.""" return self.engine_core.add_lora(lora_request) def remove_lora(self, lora_id: int) -> bool: """Remove an already loaded LoRA adapter.""" return self.engine_core.remove_lora(lora_id) def list_loras(self) -> set[int]: """List all registered adapters.""" return self.engine_core.list_loras() def pin_lora(self, lora_id: int) -> bool: """Prevent an adapter from being evicted.""" return self.engine_core.pin_lora(lora_id) def collective_rpc( self, method: str | Callable[[WorkerBase], _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: return self.engine_core.collective_rpc(method, timeout, args, kwargs) def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]: return self.collective_rpc("apply_model", args=(func,)) def __del__(self): dp_group = getattr(self, "dp_group", None) if dp_group is not None and not self.external_launcher_dp: stateless_destroy_torch_distributed_process_group(dp_group)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/async_llm.py
vllm/v1/engine/async_llm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import os import socket import time import warnings from collections.abc import AsyncGenerator, Iterable, Mapping from copy import copy from typing import Any, cast import numpy as np import torch import vllm.envs as envs from vllm.config import VllmConfig from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.protocol import EngineClient from vllm.entrypoints.utils import _validate_truncation_size from vllm.inputs import PromptType from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.plugins.io_processors import get_io_processor from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.tasks import SupportedTask from vllm.tokenizers import TokenizerLike, cached_tokenizer_from_config from vllm.tracing import init_tracer from vllm.transformers_utils.config import maybe_register_config_serialize_by_value from vllm.usage.usage_lib import UsageContext from vllm.utils.async_utils import cancel_task_threadsafe from vllm.utils.collection_utils import as_list from vllm.utils.math_utils import cdiv from vllm.v1.engine import EngineCoreRequest from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.exceptions import EngineDeadError, EngineGenerateError from vllm.v1.engine.input_processor import InputProcessor from vllm.v1.engine.output_processor import OutputProcessor, RequestOutputCollector from vllm.v1.engine.parallel_sampling import ParentRequest from vllm.v1.executor import Executor from vllm.v1.metrics.loggers import ( StatLoggerFactory, StatLoggerManager, load_stat_logger_plugin_factories, ) from vllm.v1.metrics.prometheus import shutdown_prometheus from vllm.v1.metrics.stats import IterationStats logger = init_logger(__name__) class AsyncLLM(EngineClient): def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, use_cached_outputs: bool = False, log_requests: bool = True, start_engine_loop: bool = True, stat_loggers: list[StatLoggerFactory] | None = None, aggregate_engine_logging: bool = False, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ) -> None: """ Create an AsyncLLM. Args: vllm_config: global configuration. executor_class: an Executor impl, e.g. MultiprocExecutor. log_stats: Whether to log stats. usage_context: Usage context of the LLM. mm_registry: Multi-modal registry. use_cached_outputs: Whether to use cached outputs. log_requests: Whether to log requests. start_engine_loop: Whether to start the engine loop. stat_loggers: customized stat loggers for the engine. If not provided, default stat loggers will be used. PLEASE BE AWARE THAT STAT LOGGER IS NOT STABLE IN V1, AND ITS BASE CLASS INTERFACE MIGHT CHANGE. Returns: None """ # Ensure we can serialize custom transformer configs maybe_register_config_serialize_by_value() self.model_config = vllm_config.model_config self.vllm_config = vllm_config self.observability_config = vllm_config.observability_config self.log_requests = log_requests custom_stat_loggers = list(stat_loggers or []) custom_stat_loggers.extend(load_stat_logger_plugin_factories()) has_custom_loggers = bool(custom_stat_loggers) self.log_stats = log_stats or has_custom_loggers if not log_stats and has_custom_loggers: logger.info( "AsyncLLM created with log_stats=False, " "but custom stat loggers were found; " "enabling logging without default stat loggers." ) if self.model_config.skip_tokenizer_init: tokenizer = None else: tokenizer = cached_tokenizer_from_config(self.model_config) self.input_processor = InputProcessor(self.vllm_config, tokenizer) self.io_processor = get_io_processor( self.vllm_config, self.model_config.io_processor_plugin, ) # OutputProcessor (converts EngineCoreOutputs --> RequestOutput). self.output_processor = OutputProcessor( self.tokenizer, log_stats=self.log_stats, stream_interval=self.vllm_config.scheduler_config.stream_interval, ) endpoint = self.observability_config.otlp_traces_endpoint if endpoint is not None: tracer = init_tracer("vllm.llm_engine", endpoint) self.output_processor.tracer = tracer # EngineCore (starts the engine in background process). self.engine_core = EngineCoreClient.make_async_mp_client( vllm_config=vllm_config, executor_class=executor_class, log_stats=self.log_stats, client_addresses=client_addresses, client_count=client_count, client_index=client_index, ) # Loggers. self.logger_manager: StatLoggerManager | None = None if self.log_stats: self.logger_manager = StatLoggerManager( vllm_config=vllm_config, engine_idxs=self.engine_core.engine_ranks_managed, custom_stat_loggers=custom_stat_loggers, enable_default_loggers=log_stats, client_count=client_count, aggregate_engine_logging=aggregate_engine_logging, ) self.logger_manager.log_engine_initialized() # Pause / resume state for async RL workflows. self._pause_cond = asyncio.Condition() self._paused = False self.output_handler: asyncio.Task | None = None try: # Start output handler eagerly if we are in the asyncio eventloop. asyncio.get_running_loop() self._run_output_handler() except RuntimeError: pass if ( vllm_config.profiler_config.profiler == "torch" and not vllm_config.profiler_config.ignore_frontend ): profiler_dir = vllm_config.profiler_config.torch_profiler_dir logger.info( "Torch profiler enabled. AsyncLLM CPU traces will be collected under %s", # noqa: E501 profiler_dir, ) worker_name = f"{socket.gethostname()}_{os.getpid()}.async_llm" self.profiler = torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, ], with_stack=vllm_config.profiler_config.torch_profiler_with_stack, on_trace_ready=torch.profiler.tensorboard_trace_handler( profiler_dir, worker_name=worker_name, use_gzip=vllm_config.profiler_config.torch_profiler_use_gzip, ), ) else: self.profiler = None @classmethod def from_vllm_config( cls, vllm_config: VllmConfig, start_engine_loop: bool = True, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, enable_log_requests: bool = False, aggregate_engine_logging: bool = False, disable_log_stats: bool = False, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ) -> "AsyncLLM": # Create the LLMEngine. return cls( vllm_config=vllm_config, executor_class=Executor.get_class(vllm_config), start_engine_loop=start_engine_loop, stat_loggers=stat_loggers, log_requests=enable_log_requests, log_stats=not disable_log_stats, aggregate_engine_logging=aggregate_engine_logging, usage_context=usage_context, client_addresses=client_addresses, client_count=client_count, client_index=client_index, ) @classmethod def from_engine_args( cls, engine_args: AsyncEngineArgs, start_engine_loop: bool = True, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, ) -> "AsyncLLM": """Create an AsyncLLM from the EngineArgs.""" # Create the engine configs. vllm_config = engine_args.create_engine_config(usage_context) executor_class = Executor.get_class(vllm_config) # Create the AsyncLLM. return cls( vllm_config=vllm_config, executor_class=executor_class, log_requests=engine_args.enable_log_requests, log_stats=not engine_args.disable_log_stats, start_engine_loop=start_engine_loop, usage_context=usage_context, stat_loggers=stat_loggers, ) def __del__(self): self.shutdown() def shutdown(self): """Shutdown, cleaning up the background proc and IPC.""" shutdown_prometheus() if engine_core := getattr(self, "engine_core", None): engine_core.shutdown() handler = getattr(self, "output_handler", None) if handler is not None: cancel_task_threadsafe(handler) async def get_supported_tasks(self) -> tuple[SupportedTask, ...]: return await self.engine_core.get_supported_tasks_async() async def add_request( self, request_id: str, prompt: EngineCoreRequest | PromptType, params: SamplingParams | PoolingParams, arrival_time: float | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, prompt_text: str | None = None, ) -> RequestOutputCollector: """Add new request to the AsyncLLM.""" if self.errored: raise EngineDeadError() is_pooling = isinstance(params, PoolingParams) if ( self.vllm_config.cache_config.kv_sharing_fast_prefill and not is_pooling and params.prompt_logprobs ): raise ValueError( "--kv-sharing-fast-prefill produces incorrect logprobs for " "prompt tokens, please disable it when the requests need " "prompt logprobs" ) if tokenization_kwargs is None: tokenization_kwargs = {} _validate_truncation_size( self.model_config.max_model_len, params.truncate_prompt_tokens, tokenization_kwargs, ) # Convert Input --> Request. if isinstance(prompt, EngineCoreRequest): request = prompt if request_id != request.request_id: logger.warning_once( "AsyncLLM.add_request() was passed a request_id parameter that " "does not match the EngineCoreRequest.request_id attribute. The " "latter will be used, and the former will be ignored." ) else: if prompt_text is not None: raise ValueError( "should only provide prompt_text with EngineCoreRequest" ) request = self.input_processor.process_inputs( request_id, prompt, params, arrival_time, lora_request, tokenization_kwargs, trace_headers, priority, data_parallel_rank, ) if isinstance(prompt, str): prompt_text = prompt elif isinstance(prompt, Mapping): prompt_text = cast(str | None, prompt.get("prompt")) self.input_processor.assign_request_id(request) # We start the output_handler on the first call to add_request() so # we can call __init__ before the event loop, which enables us # to handle startup failure gracefully in the OpenAI server. self._run_output_handler() # Respect pause state before accepting new requests. async with self._pause_cond: await self._pause_cond.wait_for(lambda: not self._paused) # Create a new output collector for the request. queue = RequestOutputCollector(params.output_kind, request.request_id) # Use cloned params that may have been updated in process_inputs() params = request.params if is_pooling or params.n == 1: await self._add_request(request, prompt_text, None, 0, queue) return queue parent_params = params assert isinstance(parent_params, SamplingParams) # Fan out child requests (for n>1). parent_request = ParentRequest(request) for idx in range(parent_params.n): request_id, child_params = parent_request.get_child_info(idx) child_request = request if idx == parent_params.n - 1 else copy(request) child_request.request_id = request_id child_request.sampling_params = child_params await self._add_request( child_request, prompt_text, parent_request, idx, queue ) return queue async def _add_request( self, request: EngineCoreRequest, prompt: str | None, parent_req: ParentRequest | None, index: int, queue: RequestOutputCollector, ): # Add the request to OutputProcessor (this process). self.output_processor.add_request(request, prompt, parent_req, index, queue) # Add the EngineCoreRequest to EngineCore (separate process). await self.engine_core.add_request_async(request) if self.log_requests: logger.info("Added request %s.", request.request_id) # TODO: we should support multiple prompts in one call, as you # can do with LLM.generate. So that for multi-prompt completion # requests we don't need to send multiple messages to core proc, # and so we don't need multiple streams which then get # re-multiplexed in the API server anyhow. async def generate( self, prompt: EngineCoreRequest | PromptType, sampling_params: SamplingParams, request_id: str, *, prompt_text: str | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, ) -> AsyncGenerator[RequestOutput, None]: """ Main function called by the API server to kick off a request * 1) Making an AsyncStream corresponding to the Request. * 2) Processing the Input. * 3) Adding the Request to the Detokenizer. * 4) Adding the Request to the EngineCore (separate process). A separate output_handler loop runs in a background AsyncIO task, pulling outputs from EngineCore and putting them into the per-request AsyncStream. The caller of generate() iterates the returned AsyncGenerator, returning the RequestOutput back to the caller. """ q: RequestOutputCollector | None = None try: q = await self.add_request( request_id, prompt, sampling_params, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, data_parallel_rank=data_parallel_rank, prompt_text=prompt_text, ) # The output_handler task pushes items into the queue. # This task pulls from the queue and yields to caller. finished = False while not finished: # Note: drain queue without await if possible (avoids # task switching under load which helps performance). out = q.get_nowait() or await q.get() # Note: both OutputProcessor and EngineCore handle their # own request cleanup based on finished. finished = out.finished assert isinstance(out, RequestOutput) yield out # If the request is disconnected by the client, generate() # is cancelled or the generator is garbage collected. So, # we abort the request if we end up here. except (asyncio.CancelledError, GeneratorExit): if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s aborted.", request_id) raise # Engine is dead. Do not abort since we shut down. except EngineDeadError: if self.log_requests: logger.info("Request %s failed (engine dead).", request_id) raise # Request validation error. except ValueError: if self.log_requests: logger.info("Request %s failed (bad request).", request_id) raise # Unexpected error in the generate() task (possibly recoverable). except Exception as e: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s failed.", request_id) raise EngineGenerateError() from e def _run_output_handler(self): """Background loop: pulls from EngineCore and pushes to AsyncStreams.""" if self.output_handler is not None: return # Ensure that the task doesn't have a circular ref back to the AsyncLLM # object, or else it won't be garbage collected and cleaned up properly. engine_core = self.engine_core output_processor = self.output_processor log_stats = self.log_stats logger_manager = self.logger_manager input_processor = self.input_processor async def output_handler(): try: while True: # 1) Pull EngineCoreOutputs from the EngineCore. outputs = await engine_core.get_output_async() num_outputs = len(outputs.outputs) iteration_stats = ( IterationStats() if (log_stats and num_outputs) else None ) # Split outputs into chunks of at most # VLLM_V1_OUTPUT_PROC_CHUNK_SIZE, so that we don't block the # event loop for too long. if num_outputs <= envs.VLLM_V1_OUTPUT_PROC_CHUNK_SIZE: slices = (outputs.outputs,) else: slices = np.array_split( outputs.outputs, cdiv(num_outputs, envs.VLLM_V1_OUTPUT_PROC_CHUNK_SIZE), ) for i, outputs_slice in enumerate(slices): # 2) Process EngineCoreOutputs. processed_outputs = output_processor.process_outputs( outputs_slice, outputs.timestamp, iteration_stats ) # NOTE: RequestOutputs are pushed to their queues. assert not processed_outputs.request_outputs # Allow other asyncio tasks to run between chunks if i + 1 < len(slices): await asyncio.sleep(0) # 3) Abort any reqs that finished due to stop strings. await engine_core.abort_requests_async( processed_outputs.reqs_to_abort ) output_processor.update_scheduler_stats(outputs.scheduler_stats) # 4) Logging. # TODO(rob): make into a coroutine and launch it in # background thread once Prometheus overhead is non-trivial. if logger_manager: logger_manager.record( engine_idx=outputs.engine_index, scheduler_stats=outputs.scheduler_stats, iteration_stats=iteration_stats, mm_cache_stats=input_processor.stat_mm_cache(), ) except Exception as e: logger.exception("AsyncLLM output_handler failed.") output_processor.propagate_error(e) self.output_handler = asyncio.create_task(output_handler()) async def abort( self, request_id: str | Iterable[str], internal: bool = False ) -> None: """Abort RequestId in OutputProcessor and EngineCore.""" request_ids = ( (request_id,) if isinstance(request_id, str) else as_list(request_id) ) all_request_ids = self.output_processor.abort_requests(request_ids, internal) await self.engine_core.abort_requests_async(all_request_ids) if self.log_requests: logger.info("Aborted request(s) %s.", ",".join(request_ids)) async def pause_generation( self, *, wait_for_inflight_requests: bool = False, clear_cache: bool = True, ) -> None: """ Pause generation to allow model weight updates. New generation/encoding requests are blocked until resume. Args: wait_for_inflight_requests: When ``True`` waits for in-flight requests to finish before pausing. When ``False`` (default), immediately aborts any in-flight requests. clear_cache: Whether to clear KV cache and prefix cache after draining. Set to ``False`` to preserve cache for faster resume. Default is ``True`` (clear caches). """ async with self._pause_cond: if self._paused: return self._paused = True if not wait_for_inflight_requests: request_ids = list(self.output_processor.request_states.keys()) if request_ids: await self.abort(request_ids, internal=True) # Wait for running requests to drain before clearing cache. if self.output_processor.has_unfinished_requests(): await self.output_processor.wait_for_requests_to_drain() # Clear cache if clear_cache: await self.reset_prefix_cache() await self.reset_mm_cache() async def resume_generation(self) -> None: """Resume generation after :meth:`pause_generation`.""" async with self._pause_cond: self._paused = False self._pause_cond.notify_all() # Wake up all waiting requests async def is_paused(self) -> bool: """Return whether the engine is currently paused.""" async with self._pause_cond: return self._paused async def encode( self, prompt: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: LoRARequest | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, truncate_prompt_tokens: int | None = None, tokenization_kwargs: dict[str, Any] | None = None, ) -> AsyncGenerator[PoolingRequestOutput, None]: """ Main function called by the API server to kick off a request * 1) Making an AsyncStream corresponding to the Request. * 2) Processing the Input. * 3) Adding the Request to the EngineCore (separate process). A separate output_handler loop runs in a background AsyncIO task, pulling outputs from EngineCore and putting them into the per-request AsyncStream. The caller of generate() iterates the returned AsyncGenerator, returning the RequestOutput back to the caller. NOTE: truncate_prompt_tokens is deprecated in v0.14. TODO: Remove truncate_prompt_tokens in v0.15. """ q: RequestOutputCollector | None = None try: if truncate_prompt_tokens is not None: warnings.warn( "The `truncate_prompt_tokens` parameter in `AsyncLLM.encode()` " "is deprecated and will be removed in v0.15. " "Please use `pooling_params.truncate_prompt_tokens` instead.", DeprecationWarning, stacklevel=2, ) q = await self.add_request( request_id, prompt, pooling_params, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, ) # The output_handler task pushes items into the queue. # This task pulls from the queue and yields to caller. finished = False while not finished: # Note: drain queue without await if possible (avoids # task switching under load which helps performance). out = q.get_nowait() or await q.get() assert isinstance(out, PoolingRequestOutput) # Note: both OutputProcessor and EngineCore handle their # own request cleanup based on finished. finished = out.finished yield out # If the request is disconnected by the client, generate() # is cancelled. So, we abort the request if we end up here. except asyncio.CancelledError: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s aborted.", request_id) raise # Engine is dead. Do not abort since we shut down. except EngineDeadError: if self.log_requests: logger.info("Request %s failed (engine dead).", request_id) raise # Request validation error. except ValueError: if self.log_requests: logger.info("Request %s failed (bad request).", request_id) raise # Unexpected error in the generate() task (possibly recoverable). except Exception as e: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s failed.", request_id) raise EngineGenerateError() from e @property def tokenizer(self) -> TokenizerLike | None: return self.input_processor.tokenizer async def get_tokenizer(self) -> TokenizerLike: if self.tokenizer is None: raise ValueError( "Unable to get tokenizer because `skip_tokenizer_init=True`" ) return self.tokenizer async def is_tracing_enabled(self) -> bool: return self.observability_config.otlp_traces_endpoint is not None # type: ignore async def do_log_stats(self) -> None: if self.logger_manager: self.logger_manager.log() async def check_health(self) -> None: logger.debug("Called check_health.") if self.errored: raise self.dead_error async def start_profile(self) -> None: coros = [self.engine_core.profile_async(True)] if self.profiler is not None: coros.append(asyncio.to_thread(self.profiler.start)) await asyncio.gather(*coros) async def stop_profile(self) -> None: coros = [self.engine_core.profile_async(False)] if self.profiler is not None: coros.append(asyncio.to_thread(self.profiler.stop)) await asyncio.gather(*coros) async def reset_mm_cache(self) -> None: self.input_processor.clear_mm_cache() await self.engine_core.reset_mm_cache_async() async def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return await self.engine_core.reset_prefix_cache_async( reset_running_requests, reset_connector ) async def sleep(self, level: int = 1) -> None: await self.reset_prefix_cache() await self.engine_core.sleep_async(level) if self.logger_manager is not None: self.logger_manager.record_sleep_state(1, level) async def wake_up(self, tags: list[str] | None = None) -> None: await self.engine_core.wake_up_async(tags) if self.logger_manager is not None: self.logger_manager.record_sleep_state(0, 0) async def is_sleeping(self) -> bool: return await self.engine_core.is_sleeping_async() async def add_lora(self, lora_request: LoRARequest) -> bool: """Load a new LoRA adapter into the engine for future requests.""" return await self.engine_core.add_lora_async(lora_request) async def remove_lora(self, lora_id: int) -> bool: """Remove an already loaded LoRA adapter.""" return await self.engine_core.remove_lora_async(lora_id) async def list_loras(self) -> set[int]: """List all registered adapters.""" return await self.engine_core.list_loras_async() async def pin_lora(self, lora_id: int) -> bool: """Prevent an adapter from being evicted.""" return await self.engine_core.pin_lora_async(lora_id) async def collective_rpc( self, method: str, timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, ): """ Perform a collective RPC call to the given path. """ return await self.engine_core.collective_rpc_async( method, timeout, args, kwargs ) async def wait_for_requests_to_drain(self, drain_timeout: int = 300): """Wait for all requests to be drained.""" start_time = time.time() while time.time() - start_time < drain_timeout: if not self.engine_core.dp_engines_running(): logger.info("Engines are idle, requests have been drained") return logger.info("Engines are still running, waiting for requests to drain...") await asyncio.sleep(1) # Wait 1 second before checking again raise TimeoutError( f"Timeout reached after {drain_timeout} seconds " "waiting for requests to drain." ) async def scale_elastic_ep( self, new_data_parallel_size: int, drain_timeout: int = 300 ): """ Scale up or down the data parallel size by adding or removing engine cores. Args: new_data_parallel_size: The new number of data parallel workers drain_timeout: Maximum time to wait for requests to drain (seconds) """ old_data_parallel_size = self.vllm_config.parallel_config.data_parallel_size if old_data_parallel_size == new_data_parallel_size: logger.info( "Data parallel size is already %s, skipping scale", new_data_parallel_size, ) return logger.info( "Waiting for requests to drain before scaling up to %s engines...", new_data_parallel_size, ) await self.wait_for_requests_to_drain(drain_timeout) logger.info( "Requests have been drained, proceeding with scale to %s engines", new_data_parallel_size, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/core_client.py
vllm/v1/engine/core_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import contextlib import multiprocessing import queue import sys import uuid import weakref from abc import ABC, abstractmethod from collections import defaultdict, deque from collections.abc import Awaitable, Callable, Sequence from concurrent.futures import Future from dataclasses import dataclass from threading import Thread from typing import Any, TypeAlias, TypeVar import msgspec.msgpack import zmq import zmq.asyncio from vllm.config import VllmConfig from vllm.envs import VLLM_ENGINE_READY_TIMEOUT_S from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.tasks import SupportedTask from vllm.utils.async_utils import in_loop from vllm.utils.network_utils import ( close_sockets, get_open_port, get_open_zmq_inproc_path, make_zmq_socket, ) from vllm.v1.engine import ( EngineCoreOutputs, EngineCoreRequest, EngineCoreRequestType, ReconfigureDistributedRequest, ReconfigureRankType, UtilityOutput, ) from vllm.v1.engine.coordinator import DPCoordinator from vllm.v1.engine.core import EngineCore, EngineCoreProc from vllm.v1.engine.exceptions import EngineDeadError from vllm.v1.engine.utils import ( CoreEngineActorManager, CoreEngineProcManager, launch_core_engines, ) from vllm.v1.executor import Executor from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder, bytestr logger = init_logger(__name__) AnyFuture: TypeAlias = asyncio.Future[Any] | Future[Any] _R = TypeVar("_R") # Return type for collective_rpc EngineIdentity = bytes class EngineCoreClient(ABC): """ EngineCoreClient: subclasses handle different methods for pushing and pulling from the EngineCore for asyncio / multiprocessing. Subclasses: * InprocClient: In process EngineCore (for V0-style LLMEngine use) * SyncMPClient: ZMQ + background proc EngineCore (for LLM) * AsyncMPClient: ZMQ + background proc EngineCore w/ asyncio (for AsyncLLM) """ @staticmethod def make_client( multiprocess_mode: bool, asyncio_mode: bool, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, ) -> "EngineCoreClient": # TODO: support this for debugging purposes. if asyncio_mode and not multiprocess_mode: raise NotImplementedError( "Running EngineCore in asyncio without multiprocessing " "is not currently supported." ) if multiprocess_mode and asyncio_mode: return EngineCoreClient.make_async_mp_client( vllm_config, executor_class, log_stats ) if multiprocess_mode and not asyncio_mode: return SyncMPClient(vllm_config, executor_class, log_stats) return InprocClient(vllm_config, executor_class, log_stats) @staticmethod def make_async_mp_client( vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ) -> "MPClient": parallel_config = vllm_config.parallel_config client_args = ( vllm_config, executor_class, log_stats, client_addresses, client_count, client_index, ) if parallel_config.data_parallel_size > 1: if parallel_config.data_parallel_external_lb: # External load balancer - client per DP rank. return DPAsyncMPClient(*client_args) # Internal load balancer - client balances to all DP ranks. return DPLBAsyncMPClient(*client_args) return AsyncMPClient(*client_args) @abstractmethod def shutdown(self): ... def get_output(self) -> EngineCoreOutputs: raise NotImplementedError def get_supported_tasks(self) -> tuple[SupportedTask, ...]: raise NotImplementedError def add_request(self, request: EngineCoreRequest) -> None: raise NotImplementedError def profile(self, is_start: bool = True) -> None: raise NotImplementedError def reset_mm_cache(self) -> None: raise NotImplementedError def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: raise NotImplementedError def sleep(self, level: int = 1) -> None: raise NotImplementedError def wake_up(self, tags: list[str] | None = None) -> None: raise NotImplementedError def is_sleeping(self) -> bool: raise NotImplementedError def execute_dummy_batch(self) -> None: raise NotImplementedError async def execute_dummy_batch_async(self) -> None: raise NotImplementedError def abort_requests(self, request_ids: list[str]) -> None: raise NotImplementedError def add_lora(self, lora_request: LoRARequest) -> bool: raise NotImplementedError def remove_lora(self, lora_id: int) -> bool: raise NotImplementedError def list_loras(self) -> set[int]: raise NotImplementedError def pin_lora(self, lora_id: int) -> bool: raise NotImplementedError def save_sharded_state( self, path: str, pattern: str | None = None, max_size: int | None = None ) -> None: raise NotImplementedError def collective_rpc( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: raise NotImplementedError def dp_engines_running(self) -> bool: """Returns True id data parallel engines are collectively in a running state.""" raise NotImplementedError async def scale_elastic_ep(self, new_data_parallel_size: int) -> None: raise NotImplementedError async def get_output_async(self) -> EngineCoreOutputs: raise NotImplementedError async def get_supported_tasks_async(self) -> tuple[SupportedTask, ...]: raise NotImplementedError async def add_request_async(self, request: EngineCoreRequest) -> None: raise NotImplementedError async def profile_async(self, is_start: bool = True) -> None: raise NotImplementedError async def reset_mm_cache_async(self) -> None: raise NotImplementedError async def reset_prefix_cache_async( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: raise NotImplementedError async def sleep_async(self, level: int = 1) -> None: raise NotImplementedError async def wake_up_async(self, tags: list[str] | None = None) -> None: raise NotImplementedError async def is_sleeping_async(self) -> bool: raise NotImplementedError async def abort_requests_async(self, request_ids: list[str]) -> None: raise NotImplementedError async def add_lora_async(self, lora_request: LoRARequest) -> bool: raise NotImplementedError async def remove_lora_async(self, lora_id: int) -> bool: raise NotImplementedError async def list_loras_async(self) -> set[int]: raise NotImplementedError async def pin_lora_async(self, lora_id: int) -> bool: raise NotImplementedError async def save_sharded_state_async( self, path: str, pattern: str | None = None, max_size: int | None = None ) -> None: raise NotImplementedError async def collective_rpc_async( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: raise NotImplementedError class InprocClient(EngineCoreClient): """ InprocClient: client for in-process EngineCore. Intended for use in LLMEngine for V0-style add_request() and step() EngineCore setup in this process (no busy loop). * pushes EngineCoreRequest directly into the EngineCore * pulls EngineCoreOutputs by stepping the EngineCore """ def __init__(self, *args, **kwargs): self.engine_core = EngineCore(*args, **kwargs) def get_output(self) -> EngineCoreOutputs: outputs, model_executed = self.engine_core.step_fn() self.engine_core.post_step(model_executed=model_executed) return outputs and outputs.get(0) or EngineCoreOutputs() def get_supported_tasks(self) -> tuple[SupportedTask, ...]: return self.engine_core.get_supported_tasks() def add_request(self, request: EngineCoreRequest) -> None: req, request_wave = self.engine_core.preprocess_add_request(request) self.engine_core.add_request(req, request_wave) def abort_requests(self, request_ids: list[str]) -> None: if len(request_ids) > 0: self.engine_core.abort_requests(request_ids) def shutdown(self) -> None: self.engine_core.shutdown() def profile(self, is_start: bool = True) -> None: self.engine_core.profile(is_start) def reset_mm_cache(self) -> None: self.engine_core.reset_mm_cache() def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return self.engine_core.reset_prefix_cache( reset_running_requests, reset_connector ) def sleep(self, level: int = 1) -> None: self.engine_core.sleep(level) def wake_up(self, tags: list[str] | None = None) -> None: self.engine_core.wake_up(tags) def is_sleeping(self) -> bool: return self.engine_core.is_sleeping() def execute_dummy_batch(self) -> None: self.engine_core.execute_dummy_batch() def add_lora(self, lora_request: LoRARequest) -> bool: return self.engine_core.add_lora(lora_request) def remove_lora(self, lora_id: int) -> bool: return self.engine_core.remove_lora(lora_id) def list_loras(self) -> set[int]: return self.engine_core.list_loras() def pin_lora(self, lora_id: int) -> bool: return self.engine_core.pin_lora(lora_id) def save_sharded_state( self, path: str, pattern: str | None = None, max_size: int | None = None ) -> None: self.engine_core.save_sharded_state(path, pattern, max_size) def collective_rpc( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: return self.engine_core.collective_rpc(method, timeout, args, kwargs) def dp_engines_running(self) -> bool: return False @dataclass class BackgroundResources: """Used as a finalizer for clean shutdown, avoiding circular reference back to the client object.""" ctx: zmq.Context # If CoreEngineProcManager, it manages local engines; # if CoreEngineActorManager, it manages all engines. engine_manager: CoreEngineProcManager | CoreEngineActorManager | None = None coordinator: DPCoordinator | None = None output_socket: zmq.Socket | zmq.asyncio.Socket | None = None input_socket: zmq.Socket | zmq.asyncio.Socket | None = None first_req_send_socket: zmq.asyncio.Socket | None = None first_req_rcv_socket: zmq.asyncio.Socket | None = None stats_update_socket: zmq.asyncio.Socket | None = None output_queue_task: asyncio.Task | None = None stats_update_task: asyncio.Task | None = None shutdown_path: str | None = None # Set if any of the engines are dead. Here so that the output # processing threads can access it without holding a ref to the client. engine_dead: bool = False def __call__(self): """Clean up background resources.""" self.engine_dead = True if self.engine_manager is not None: self.engine_manager.close() if self.coordinator is not None: self.coordinator.close() if isinstance(self.output_socket, zmq.asyncio.Socket): # Async case. loop = self.output_queue_task._loop if self.output_queue_task else None sockets = ( self.output_socket, self.input_socket, self.first_req_send_socket, self.first_req_rcv_socket, self.stats_update_socket, ) tasks = (self.output_queue_task, self.stats_update_task) def close_sockets_and_tasks(): close_sockets(sockets) for task in tasks: if task is not None and not task.done(): with contextlib.suppress(Exception): task.cancel() if loop is not None: if in_loop(loop): close_sockets_and_tasks() elif not loop.is_closed(): loop.call_soon_threadsafe(close_sockets_and_tasks) else: # Loop has been closed, try to clean up directly. del tasks del close_sockets_and_tasks close_sockets(sockets) del self.output_queue_task del self.stats_update_task else: # Sync case. # ZMQ context termination can hang if the sockets # aren't explicitly closed first. close_sockets((self.output_socket, self.input_socket)) if self.shutdown_path is not None: # We must ensure that the sync output socket is # closed cleanly in its own thread. with self.ctx.socket(zmq.PAIR) as shutdown_sender: shutdown_sender.connect(self.shutdown_path) # Send shutdown signal. shutdown_sender.send(b"") def validate_alive(self, frames: Sequence[zmq.Frame]): if len(frames) == 1 and (frames[0].buffer == EngineCoreProc.ENGINE_CORE_DEAD): self.engine_dead = True raise EngineDeadError() class MPClient(EngineCoreClient): """ MPClient: base client for multi-proc EngineCore. EngineCore runs in a background process busy loop, getting new EngineCoreRequests and returning EngineCoreOutputs * pushes EngineCoreRequests via input_socket * pulls EngineCoreOutputs via output_socket * AsyncMPClient subclass for AsyncLLM usage * SyncMPClient subclass for LLM usage """ def __init__( self, asyncio_mode: bool, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, client_addresses: dict[str, str] | None = None, ): self.vllm_config = vllm_config # Serialization setup. self.encoder = MsgpackEncoder() self.decoder = MsgpackDecoder(EngineCoreOutputs) # ZMQ setup. sync_ctx = zmq.Context(io_threads=2) self.ctx = zmq.asyncio.Context(sync_ctx) if asyncio_mode else sync_ctx # This will ensure resources created so far are closed # when the client is garbage collected, even if an # exception is raised mid-construction. self.resources = BackgroundResources(ctx=sync_ctx) self._finalizer = weakref.finalize(self, self.resources) success = False try: # State used for data parallel. self.engines_running = False self.stats_update_address: str | None = None if client_addresses: # Engines are managed externally to this client. input_address = client_addresses["input_address"] output_address = client_addresses["output_address"] self.stats_update_address = client_addresses.get("stats_update_address") else: # Engines are managed by this client. with launch_core_engines(vllm_config, executor_class, log_stats) as ( engine_manager, coordinator, addresses, ): self.resources.coordinator = coordinator self.resources.engine_manager = engine_manager (input_address,) = addresses.inputs (output_address,) = addresses.outputs self.stats_update_address = addresses.frontend_stats_publish_address if coordinator is not None: assert self.stats_update_address == ( coordinator.get_stats_publish_address() ) # Create input and output sockets. self.input_socket = self.resources.input_socket = make_zmq_socket( self.ctx, input_address, zmq.ROUTER, bind=True ) self.resources.output_socket = make_zmq_socket( self.ctx, output_address, zmq.PULL ) parallel_config = vllm_config.parallel_config dp_size = parallel_config.data_parallel_size dp_rank = parallel_config.data_parallel_index dp_local_size = parallel_config.data_parallel_size_local offline_mode = parallel_config.data_parallel_rank_local is not None # Client manages local+remote EngineCores in pure internal LB case. # Client manages local EngineCores in hybrid and external LB case. local_engines_only = ( parallel_config.data_parallel_hybrid_lb or parallel_config.data_parallel_external_lb ) num_ranks = dp_local_size if local_engines_only else dp_size self.engine_ranks_managed = ( [dp_rank] if offline_mode else list(range(dp_rank, dp_rank + num_ranks)) ) assert parallel_config.data_parallel_size_local <= len( self.engine_ranks_managed ) # ZMQ identity of each engine that this client will talk to. self.core_engines: list[EngineIdentity] = [ rank.to_bytes(2, "little") for rank in self.engine_ranks_managed ] # Wait for ready messages from each engine on the input socket. identities = set(self.core_engines) sync_input_socket = zmq.Socket.shadow(self.input_socket) while identities: if not sync_input_socket.poll( timeout=VLLM_ENGINE_READY_TIMEOUT_S * 1000 # convert to ms ): raise TimeoutError( "Timed out waiting for engines to send" "initial message on input socket." ) identity, _ = sync_input_socket.recv_multipart() identities.remove(identity) self.core_engine: EngineIdentity = self.core_engines[0] self.utility_results: dict[int, AnyFuture] = {} # Request objects which may contain pytorch-allocated tensors # that we need to keep references to until zmq is done with the # underlying data. self.pending_messages = deque[tuple[zmq.MessageTracker, Any]]() # Start monitoring engine core processes for unexpected failures self.start_engine_core_monitor() success = True finally: if not success: self._finalizer() def shutdown(self): # Terminate background resources. self._finalizer() def _format_exception(self, e: Exception) -> Exception: """If errored, use EngineDeadError so root cause is clear.""" return ( EngineDeadError(suppress_context=True) if self.resources.engine_dead else e ) def ensure_alive(self): if self.resources.engine_dead: raise EngineDeadError() def add_pending_message(self, tracker: zmq.MessageTracker, msg: Any): if not tracker.done: self.pending_messages.appendleft((tracker, msg)) def free_pending_messages(self): while self.pending_messages and self.pending_messages[-1][0].done: self.pending_messages.pop() def dp_engines_running(self) -> bool: return self.engines_running def start_engine_core_monitor(self): """Start a monitor thread for engine core processes.""" engine_manager = self.resources.engine_manager if ( engine_manager is None or not hasattr(engine_manager, "processes") or not engine_manager.processes ): # No engine processes to monitor return engine_processes = engine_manager.processes self_ref = weakref.ref(self) # Monitor engine core process liveness. If any die unexpectedly, # logs an error, shuts down the client and invokes the failure # callback to inform the engine. def monitor_engine_cores(): sentinels = [proc.sentinel for proc in engine_processes] died = multiprocessing.connection.wait(sentinels) _self = self_ref() if not _self or _self.resources.engine_dead: return _self.resources.engine_dead = True proc_name = next( proc.name for proc in engine_processes if proc.sentinel == died[0] ) logger.error( "Engine core proc %s died unexpectedly, shutting down client.", proc_name, ) _self.shutdown() # Note: For MPClient, we don't have a failure callback mechanism # like MultiprocExecutor, but we set engine_dead flag which will # cause subsequent operations to raise EngineDeadError Thread( target=monitor_engine_cores, daemon=True, name="MPClientEngineMonitor" ).start() def _process_utility_output( output: UtilityOutput, utility_results: dict[int, AnyFuture] ): """Set the result from a utility method in the waiting future.""" future = utility_results.pop(output.call_id) failure_message = output.failure_message try: if failure_message is not None: future.set_exception(Exception(failure_message)) else: assert output.result is not None future.set_result(output.result.result) except asyncio.InvalidStateError: # This can happen if the future is cancelled due to the # original calling task being cancelled. if failure_message is not None: logger.error( "Cancelled call to utility method failed with error: %s", failure_message, ) class SyncMPClient(MPClient): """Synchronous client for multi-proc EngineCore.""" def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool ): super().__init__( asyncio_mode=False, vllm_config=vllm_config, executor_class=executor_class, log_stats=log_stats, ) self.is_dp = self.vllm_config.parallel_config.data_parallel_size > 1 self.outputs_queue = queue.Queue[EngineCoreOutputs | Exception]() # Ensure that the outputs socket processing thread does not have # a ref to the client which prevents gc. ctx = self.ctx out_socket = self.resources.output_socket decoder = self.decoder utility_results = self.utility_results outputs_queue = self.outputs_queue shutdown_path = get_open_zmq_inproc_path() resources = self.resources resources.shutdown_path = shutdown_path def process_outputs_socket(): assert isinstance(out_socket, zmq.Socket) shutdown_socket = ctx.socket(zmq.PAIR) try: shutdown_socket.bind(shutdown_path) poller = zmq.Poller() poller.register(shutdown_socket, zmq.POLLIN) poller.register(out_socket, zmq.POLLIN) while True: socks = poller.poll() if not socks: continue if len(socks) == 2 or socks[0][0] == shutdown_socket: # shutdown signal, exit thread. break frames = out_socket.recv_multipart(copy=False) resources.validate_alive(frames) outputs: EngineCoreOutputs = decoder.decode(frames) if outputs.utility_output: _process_utility_output(outputs.utility_output, utility_results) else: outputs_queue.put_nowait(outputs) except Exception as e: outputs_queue.put_nowait(e) finally: # Close sockets. shutdown_socket.close(linger=0) out_socket.close(linger=0) # Process outputs from engine in separate thread. self.output_queue_thread = Thread( target=process_outputs_socket, name="EngineCoreOutputQueueThread", daemon=True, ) self.output_queue_thread.start() # The thread takes on responsibility for closing the socket. self.resources.output_socket = None def get_output(self) -> EngineCoreOutputs: # If an exception arises in process_outputs_socket task, # it is forwarded to the outputs_queue so we can raise it # from this (run_output_handler) task to shut down the server. outputs = self.outputs_queue.get() if isinstance(outputs, Exception): raise self._format_exception(outputs) from None if outputs.wave_complete is not None: self.engines_running = False return outputs def _send_input(self, request_type: EngineCoreRequestType, request: Any): self.ensure_alive() self.free_pending_messages() # (Identity, RequestType, SerializedRequest) msg = (self.core_engine, request_type.value, *self.encoder.encode(request)) if len(msg) <= 3: # No auxiliary buffers => no tensor backing buffers in request. self.input_socket.send_multipart(msg, copy=False) return tracker = self.input_socket.send_multipart(msg, copy=False, track=True) self.add_pending_message(tracker, request) def call_utility(self, method: str, *args) -> Any: call_id = uuid.uuid1().int >> 64 future: Future[Any] = Future() self.utility_results[call_id] = future self._send_input(EngineCoreRequestType.UTILITY, (0, call_id, method, args)) return future.result() def get_supported_tasks(self) -> tuple[SupportedTask, ...]: return self.call_utility("get_supported_tasks") def add_request(self, request: EngineCoreRequest) -> None: if self.is_dp: self.engines_running = True self._send_input(EngineCoreRequestType.ADD, request) def abort_requests(self, request_ids: list[str]) -> None: if request_ids and not self.resources.engine_dead: self._send_input(EngineCoreRequestType.ABORT, request_ids) def profile(self, is_start: bool = True) -> None: self.call_utility("profile", is_start) def reset_mm_cache(self) -> None: self.call_utility("reset_mm_cache") def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return self.call_utility( "reset_prefix_cache", reset_running_requests, reset_connector ) def add_lora(self, lora_request: LoRARequest) -> bool: return self.call_utility("add_lora", lora_request) def remove_lora(self, lora_id: int) -> bool: return self.call_utility("remove_lora", lora_id) def list_loras(self) -> set[int]: return self.call_utility("list_loras") def pin_lora(self, lora_id: int) -> bool: return self.call_utility("pin_lora", lora_id) def sleep(self, level: int = 1) -> None: self.call_utility("sleep", level) def wake_up(self, tags: list[str] | None = None) -> None: self.call_utility("wake_up", tags) def is_sleeping(self) -> bool: return self.call_utility("is_sleeping") def execute_dummy_batch(self) -> None: self.call_utility("execute_dummy_batch") def collective_rpc( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: return self.call_utility("collective_rpc", method, timeout, args, kwargs) def save_sharded_state( self, path: str, pattern: str | None = None, max_size: int | None = None ) -> None: self.call_utility("save_sharded_state", path, pattern, max_size) class AsyncMPClient(MPClient): """Asyncio-compatible client for multi-proc EngineCore.""" def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ): super().__init__( asyncio_mode=True, vllm_config=vllm_config, executor_class=executor_class, log_stats=log_stats, client_addresses=client_addresses, ) self.client_count = client_count self.client_index = client_index self.outputs_queue = asyncio.Queue[EngineCoreOutputs | Exception]() try: # If we are running in an asyncio event loop, start the queue task. # Otherwise, it will be started lazily. If it is not started here, # we could miss EXECUTOR_FAILED messages from engine core if they # occur prior to any requests being sent. asyncio.get_running_loop() self._ensure_output_queue_task() except RuntimeError: pass def _ensure_output_queue_task(self): resources = self.resources if resources.output_queue_task is not None: return # Perform IO in separate task to parallelize as much as possible. # Avoid task having direct reference back to the client. decoder = self.decoder utility_results = self.utility_results outputs_queue = self.outputs_queue output_handler: ( Callable[[AsyncMPClient, EngineCoreOutputs], Awaitable[None]] | None ) = getattr(self.__class__, "process_engine_outputs", None) _self_ref = weakref.ref(self) if output_handler else None output_socket = resources.output_socket assert output_socket is not None async def process_outputs_socket(): try: while True: frames = await output_socket.recv_multipart(copy=False) resources.validate_alive(frames) outputs: EngineCoreOutputs = decoder.decode(frames) if outputs.utility_output: _process_utility_output(outputs.utility_output, utility_results) continue if output_handler is not None: assert _self_ref is not None _self = _self_ref() if not _self: # Client has been garbage collected, abort. return await output_handler(_self, outputs) if outputs.outputs or outputs.scheduler_stats: outputs_queue.put_nowait(outputs) except Exception as e: outputs_queue.put_nowait(e) except asyncio.CancelledError: outputs_queue.put_nowait(EngineDeadError()) resources.output_queue_task = asyncio.create_task( process_outputs_socket(), name="EngineCoreOutputQueueTask" ) async def get_output_async(self) -> EngineCoreOutputs: self._ensure_output_queue_task() # If an exception arises in process_outputs_socket task, # it is forwarded to the outputs_queue so we can raise it # from this (run_output_handler) task to shut down the server. assert self.outputs_queue is not None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/detokenizer.py
vllm/v1/engine/detokenizer.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod import tokenizers from packaging import version from tokenizers import Tokenizer from tokenizers.decoders import DecodeStream from transformers import PreTrainedTokenizerFast from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike from vllm.tokenizers.detokenizer_utils import ( convert_prompt_ids_to_tokens, detokenize_incrementally, ) from vllm.utils import length_from_prompt_token_ids_or_embeds from vllm.v1.engine import EngineCoreRequest logger = init_logger(__name__) # Only tokenizers >= 0.21.1 supports DecodeStream used for # FastIncrementalDetokenizer. USE_FAST_DETOKENIZER = version.parse(tokenizers.__version__) >= version.parse("0.21.1") # Error string from https://github.com/huggingface/tokenizers/blob/909fdde2a4ffedd9295206f705eb612be2a91b12/tokenizers/src/tokenizer/mod.rs#L1042 INVALID_PREFIX_ERR_MSG = "Invalid prefix encountered" class IncrementalDetokenizer: def __init__(self): self.token_ids: list[int] = [] @property def output_token_ids(self) -> list[int]: return self.token_ids def update(self, new_token_ids: list[int], stop_terminated: bool) -> str | None: self.token_ids.extend(new_token_ids) return None def get_next_output_text(self, finished: bool, delta: bool) -> str: return "" @classmethod def from_new_request( cls, tokenizer: TokenizerLike | None, request: EngineCoreRequest, ) -> "IncrementalDetokenizer": assert request.sampling_params is not None if tokenizer is None: # No tokenizer => skipping detokenization. return IncrementalDetokenizer() if USE_FAST_DETOKENIZER and isinstance(tokenizer, PreTrainedTokenizerFast): # Fast tokenizer => use tokenizers library DecodeStream. return FastIncrementalDetokenizer(tokenizer, request) # Fall back to slow python-based incremental detokenization. return SlowIncrementalDetokenizer(tokenizer, request) class BaseIncrementalDetokenizer(IncrementalDetokenizer, ABC): def __init__(self, request: EngineCoreRequest): super().__init__() # Stop strings params = request.sampling_params assert params is not None stop_list: list[str] if params.stop is None: stop_list = [] elif isinstance(params.stop, str): stop_list = [params.stop] else: stop_list = params.stop self.stop = stop_list self.min_tokens = params.min_tokens self.include_stop_str_in_output = params.include_stop_str_in_output # Number of chars to hold back when stop strings are to be excluded # from streamed output. if self.stop and not self.include_stop_str_in_output: self.stop_buffer_length = max(len(s) for s in self.stop) - 1 else: self.stop_buffer_length = 0 self._last_output_text_offset: int = 0 # Generation data self.output_text = "" def update(self, new_token_ids: list[int], stop_terminated: bool) -> str | None: """ Update RequestState for the request_id by: 1) Detokenize the new token ids incrementally. 2) Evaluate stop criteria. Return matched stop string or None. """ if not new_token_ids: # Skip detokenization if no new token ids. return None if stop_terminated and not self.include_stop_str_in_output: # If stop-terminated, exclude last token from detokenization # based on include_stop_str_in_output parameter. skipped_stop_token_id = new_token_ids[-1] new_token_ids = new_token_ids[:-1] else: skipped_stop_token_id = None # 1) Detokenize the new token ids incrementally. # TODO(woosuk): This method becomes very inefficient when the number of # new_token_ids is more than 1. We need to optimize this. stop_check_offset = len(self.output_text) for new_token_id in new_token_ids: self.token_ids.append(new_token_id) self.output_text += self.decode_next(new_token_id) # Support min_tokens, see https://github.com/vllm-project/vllm/pull/22014 if self.min_tokens and len(self.output_token_ids) <= self.min_tokens: stop_check_offset = len(self.output_text) if skipped_stop_token_id is not None: # Cleanup after skipping detokenization. self.token_ids.append(skipped_stop_token_id) # 2) Evaluate stop strings. stop_string = None if self.stop and len(self.output_token_ids) > self.min_tokens: stop = check_stop_strings( output_text=self.output_text, new_char_count=len(self.output_text) - stop_check_offset, stop=self.stop, include_in_output=self.include_stop_str_in_output, ) if stop is not None: stop_string, truncate_to = stop if truncate_to != -1: self.output_text = self.output_text[:truncate_to] return stop_string @abstractmethod def decode_next(self, next_token_id: int) -> str: raise NotImplementedError def get_next_output_text(self, finished: bool, delta: bool) -> str: """If delta is True, only new text since the last call to this method is returned""" # We return the full output text if the sequence is finished. buffer_length = 0 if finished else self.stop_buffer_length if not delta: return ( self.output_text[:-buffer_length] if buffer_length else (self.output_text) ) length = len(self.output_text) - buffer_length last_offset = self._last_output_text_offset if last_offset < length: self._last_output_text_offset = length return self.output_text[last_offset:length] return "" class FastIncrementalDetokenizer(BaseIncrementalDetokenizer): def __init__(self, tokenizer: PreTrainedTokenizerFast, request: EngineCoreRequest): super().__init__(request) sampling_params = request.sampling_params assert sampling_params is not None self.request_id = request.request_id self.skip_special_tokens = sampling_params.skip_special_tokens self.stream = DecodeStream(skip_special_tokens=self.skip_special_tokens) self.tokenizer: Tokenizer = tokenizer._tokenizer # Find a safe place to start. prompt_token_ids = request.prompt_token_ids or [] prompt_suffix = prompt_token_ids prompt_len = len(prompt_suffix) if prompt_len > 4: for i in range(4, min(prompt_len + 1, 24)): suffix = prompt_token_ids[-i:] if "�" not in self.tokenizer.decode(suffix): prompt_suffix = suffix break # Prime the stream. for tid in prompt_suffix: self._protected_step(tid) self.spaces_between_special_tokens = ( sampling_params.skip_special_tokens or sampling_params.spaces_between_special_tokens ) if not self.spaces_between_special_tokens: # Store dict of added token ids so that we can suppress # the spaces between them. if ( added_token_ids := getattr(self.tokenizer, "added_token_ids", None) ) is None: self.tokenizer.added_token_ids = added_token_ids = { tid: tok.content for tid, tok in self.tokenizer.get_added_tokens_decoder().items() } if added_token_ids: self.last_special = False self.added_token_ids = added_token_ids else: # No added tokens. self.spaces_between_special_tokens = True def decode_next(self, next_token_id: int) -> str: token = self._protected_step(next_token_id) if not self.spaces_between_special_tokens: special_token = self.added_token_ids.get(next_token_id) is_special = special_token is not None if is_special and self.last_special: # Return raw token string without any prefixed spaces. token = special_token self.last_special = is_special return token or "" def _protected_step(self, next_token_id: int) -> str | None: try: token = self.stream.step(self.tokenizer, next_token_id) except (OverflowError, TypeError): # Handle rare observed overflow, still to be diagnosed. # See https://github.com/vllm-project/vllm/issues/21951. logger.exception("Encountered invalid token id: %r", next_token_id) token = None except Exception as e: if not str(e).startswith(INVALID_PREFIX_ERR_MSG): raise e # Recover from edge case where tokenizer can produce non-monotonic, # invalid UTF-8 output, which breaks the internal state of # tokenizers' DecodeStream. # See https://github.com/vllm-project/vllm/issues/17448. logger.warning( "Encountered invalid prefix detokenization error" " for request %s, resetting decode stream.", self.request_id, ) self.stream = DecodeStream(skip_special_tokens=self.skip_special_tokens) token = self.stream.step(self.tokenizer, next_token_id) return token class SlowIncrementalDetokenizer(BaseIncrementalDetokenizer): def __init__(self, tokenizer: TokenizerLike, request: EngineCoreRequest): super().__init__(request) self.tokenizer = tokenizer params = request.sampling_params assert params is not None self.prompt_len = length_from_prompt_token_ids_or_embeds( request.prompt_token_ids, request.prompt_embeds ) # Metadata for incremental detokenization. if request.prompt_token_ids is not None: self.tokens, self.prefix_offset, self.read_offset = ( convert_prompt_ids_to_tokens( tokenizer=tokenizer, prompt_ids=request.prompt_token_ids, skip_special_tokens=params.skip_special_tokens, ) ) else: # Prompt embedding requests cannot be detokenized, in general. self.tokens = [""] * self.prompt_len self.prefix_offset = 0 self.read_offest = 0 self.token_ids.extend(request.prompt_token_ids or [0] * self.prompt_len) self.skip_special_tokens = params.skip_special_tokens self.spaces_between_special_tokens = params.spaces_between_special_tokens @property def output_token_ids(self) -> list[int]: return ( self.token_ids if not self.prompt_len else (self.token_ids[self.prompt_len :]) ) def decode_next(self, next_token_id: int) -> str: new_tokens, decoded_text, prefix_offset, read_offset = detokenize_incrementally( tokenizer=self.tokenizer, all_input_ids=self.token_ids, prev_tokens=self.tokens, prefix_offset=self.prefix_offset, read_offset=self.read_offset, skip_special_tokens=self.skip_special_tokens, spaces_between_special_tokens=self.spaces_between_special_tokens, ) self.tokens.extend(new_tokens) self.prefix_offset = prefix_offset self.read_offset = read_offset return decoded_text def check_stop_strings( output_text: str, new_char_count: int, stop: list[str], include_in_output: bool, ) -> tuple[str, int] | None: """Check if any stop strings are matched and truncate sequence output text accordingly. Returns tuple (stop_string, offset) if matched or else None. Where stop_string is the matched stop string and offset is the length to which output_text should be truncated, or -1 for no truncation. """ if not new_char_count or not stop: return None for stop_str in stop: stop_string_len = len(stop_str) # Avoid searching already-searched text. stop_index = output_text.find(stop_str, 1 - new_char_count - stop_string_len) if stop_index == -1: continue if include_in_output: # Truncate to end of stop string. stop_index += stop_string_len if stop_index >= len(output_text): # No truncation required. return stop_str, -1 # Truncate the output text to either the beginning # or end of the stop string. return stop_str, stop_index return None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/exceptions.py
vllm/v1/engine/exceptions.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project class EngineGenerateError(Exception): """Raised when a AsyncLLM.generate() fails. Recoverable.""" pass class EngineDeadError(Exception): """Raised when the EngineCore dies. Unrecoverable.""" def __init__(self, *args, suppress_context: bool = False, **kwargs): ENGINE_DEAD_MESSAGE = "EngineCore encountered an issue. See stack trace (above) for the root cause." # noqa: E501 super().__init__(ENGINE_DEAD_MESSAGE, *args, **kwargs) # Make stack trace clearer when using with LLMEngine by # silencing irrelevant ZMQError. self.__suppress_context__ = suppress_context
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/core.py
vllm/v1/engine/core.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import queue import signal import threading import time from collections import deque from collections.abc import Callable, Generator from concurrent.futures import Future from contextlib import ExitStack, contextmanager from inspect import isclass, signature from logging import DEBUG from typing import Any, TypeVar, cast import msgspec import zmq from vllm.config import ParallelConfig, VllmConfig from vllm.distributed import stateless_destroy_torch_distributed_process_group from vllm.envs import enable_envs_cache from vllm.logger import init_logger from vllm.logging_utils.dump_input import dump_engine_exception from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.cache import engine_receiver_cache_from_config from vllm.tasks import POOLING_TASKS, SupportedTask from vllm.transformers_utils.config import maybe_register_config_serialize_by_value from vllm.utils.gc_utils import ( freeze_gc_heap, maybe_attach_gc_debug_callback, ) from vllm.utils.hashing import get_hash_fn_by_name from vllm.utils.network_utils import make_zmq_socket from vllm.utils.system_utils import decorate_logs, set_process_title from vllm.v1.core.kv_cache_utils import ( BlockHash, generate_scheduler_kv_cache_config, get_kv_cache_configs, get_request_block_hasher, init_none_hash, ) from vllm.v1.core.sched.interface import SchedulerInterface from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.engine import ( EngineCoreOutput, EngineCoreOutputs, EngineCoreRequest, EngineCoreRequestType, FinishReason, ReconfigureDistributedRequest, ReconfigureRankType, UtilityOutput, UtilityResult, ) from vllm.v1.engine.utils import ( EngineHandshakeMetadata, EngineZmqAddresses, get_device_indices, ) from vllm.v1.executor import Executor from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.metrics.stats import SchedulerStats from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.request import Request, RequestStatus from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder from vllm.v1.structured_output import StructuredOutputManager from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) POLLING_TIMEOUT_S = 2.5 HANDSHAKE_TIMEOUT_MINS = 5 _R = TypeVar("_R") # Return type for collective_rpc class EngineCore: """Inner loop of vLLM's Engine.""" def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, executor_fail_callback: Callable | None = None, include_finished_set: bool = False, ): # plugins need to be loaded at the engine/scheduler level too from vllm.plugins import load_general_plugins load_general_plugins() self.vllm_config = vllm_config if not vllm_config.parallel_config.data_parallel_rank_local: logger.info( "Initializing a V1 LLM engine (v%s) with config: %s", VLLM_VERSION, vllm_config, ) self.log_stats = log_stats # Setup Model. self.model_executor = executor_class(vllm_config) if executor_fail_callback is not None: self.model_executor.register_failure_callback(executor_fail_callback) self.available_gpu_memory_for_kv_cache = -1 # Setup KV Caches and update CacheConfig after profiling. num_gpu_blocks, num_cpu_blocks, kv_cache_config = self._initialize_kv_caches( vllm_config ) vllm_config.cache_config.num_gpu_blocks = num_gpu_blocks vllm_config.cache_config.num_cpu_blocks = num_cpu_blocks self.collective_rpc("initialize_cache", args=(num_gpu_blocks, num_cpu_blocks)) self.structured_output_manager = StructuredOutputManager(vllm_config) # Setup scheduler. Scheduler = vllm_config.scheduler_config.get_scheduler_cls() if len(kv_cache_config.kv_cache_groups) == 0: # noqa: SIM102 # Encoder models without KV cache don't support # chunked prefill. But do SSM models? if vllm_config.scheduler_config.enable_chunked_prefill: logger.warning("Disabling chunked prefill for model without KVCache") vllm_config.scheduler_config.enable_chunked_prefill = False scheduler_block_size = ( vllm_config.cache_config.block_size * vllm_config.parallel_config.decode_context_parallel_size * vllm_config.parallel_config.prefill_context_parallel_size ) self.scheduler: SchedulerInterface = Scheduler( vllm_config=vllm_config, kv_cache_config=kv_cache_config, structured_output_manager=self.structured_output_manager, include_finished_set=include_finished_set, log_stats=self.log_stats, block_size=scheduler_block_size, ) self.use_spec_decode = vllm_config.speculative_config is not None if self.scheduler.connector is not None: # type: ignore self.model_executor.init_kv_output_aggregator(self.scheduler.connector) # type: ignore self.mm_registry = mm_registry = MULTIMODAL_REGISTRY self.mm_receiver_cache = engine_receiver_cache_from_config( vllm_config, mm_registry ) # If a KV connector is initialized for scheduler, we want to collect # handshake metadata from all workers so the connector in the scheduler # will have the full context kv_connector = self.scheduler.get_kv_connector() if kv_connector is not None: # Collect and store KV connector xfer metadata from workers # (after KV cache registration) xfer_handshake_metadata = ( self.model_executor.get_kv_connector_handshake_metadata() ) if xfer_handshake_metadata: # xfer_handshake_metadata is list of dicts from workers # Each dict already has structure {tp_rank: metadata} # Merge all worker dicts into a single dict content: dict[int, Any] = {} for worker_dict in xfer_handshake_metadata: if worker_dict is not None: content.update(worker_dict) kv_connector.set_xfer_handshake_metadata(content) # Setup batch queue for pipeline parallelism. # Batch queue for scheduled batches. This enables us to asynchronously # schedule and execute batches, and is required by pipeline parallelism # to eliminate pipeline bubbles. self.batch_queue_size = self.model_executor.max_concurrent_batches self.batch_queue: ( deque[tuple[Future[ModelRunnerOutput], SchedulerOutput]] | None ) = None if self.batch_queue_size > 1: logger.info("Batch queue is enabled with size %d", self.batch_queue_size) self.batch_queue = deque(maxlen=self.batch_queue_size) self.is_ec_producer = ( vllm_config.ec_transfer_config is not None and vllm_config.ec_transfer_config.is_ec_producer ) self.is_pooling_model = vllm_config.model_config.runner_type == "pooling" self.request_block_hasher: Callable[[Request], list[BlockHash]] | None = None if vllm_config.cache_config.enable_prefix_caching or kv_connector is not None: caching_hash_fn = get_hash_fn_by_name( vllm_config.cache_config.prefix_caching_hash_algo ) init_none_hash(caching_hash_fn) self.request_block_hasher = get_request_block_hasher( scheduler_block_size, caching_hash_fn ) self.step_fn = ( self.step if self.batch_queue is None else self.step_with_batch_queue ) self.async_scheduling = vllm_config.scheduler_config.async_scheduling self.aborts_queue = queue.Queue[list[str]]() # Mark the startup heap as static so that it's ignored by GC. # Reduces pause times of oldest generation collections. freeze_gc_heap() # If enable, attach GC debugger after static variable freeze. maybe_attach_gc_debug_callback() # Enable environment variable cache (e.g. assume no more # environment variable overrides after this point) enable_envs_cache() def _initialize_kv_caches( self, vllm_config: VllmConfig ) -> tuple[int, int, KVCacheConfig]: start = time.time() # Get all kv cache needed by the model kv_cache_specs = self.model_executor.get_kv_cache_specs() has_kv_cache = any(kv_cache_spec for kv_cache_spec in kv_cache_specs) if has_kv_cache: if os.environ.get("VLLM_ELASTIC_EP_SCALE_UP_LAUNCH") == "1": dp_group = getattr(self, "dp_group", None) assert dp_group is not None self.available_gpu_memory_for_kv_cache = ( ParallelConfig.sync_kv_cache_memory_size(dp_group, -1) ) available_gpu_memory = [self.available_gpu_memory_for_kv_cache] * len( kv_cache_specs ) else: # Profiles the peak memory usage of the model to determine how # much memory can be allocated for kv cache. available_gpu_memory = self.model_executor.determine_available_memory() self.available_gpu_memory_for_kv_cache = available_gpu_memory[0] else: # Attention free models don't need memory for kv cache available_gpu_memory = [0] * len(kv_cache_specs) assert len(kv_cache_specs) == len(available_gpu_memory) # Track max_model_len before KV cache config to detect auto-fit changes max_model_len_before = vllm_config.model_config.max_model_len kv_cache_configs = get_kv_cache_configs( vllm_config, kv_cache_specs, available_gpu_memory ) # If auto-fit reduced max_model_len, sync the new value to workers. # This is needed because workers were spawned before memory profiling # and have the original (larger) max_model_len cached. max_model_len_after = vllm_config.model_config.max_model_len if max_model_len_after != max_model_len_before: self.collective_rpc("update_max_model_len", args=(max_model_len_after,)) scheduler_kv_cache_config = generate_scheduler_kv_cache_config(kv_cache_configs) num_gpu_blocks = scheduler_kv_cache_config.num_blocks num_cpu_blocks = 0 # Initialize kv cache and warmup the execution self.model_executor.initialize_from_config(kv_cache_configs) elapsed = time.time() - start logger.info_once( "init engine (profile, create kv cache, warmup model) took %.2f seconds", elapsed, scope="local", ) return num_gpu_blocks, num_cpu_blocks, scheduler_kv_cache_config def get_supported_tasks(self) -> tuple[SupportedTask, ...]: return self.model_executor.supported_tasks def add_request(self, request: Request, request_wave: int = 0): """Add request to the scheduler. `request_wave`: indicate which wave of requests this is expected to belong to in DP case """ # Validate the request_id type. if not isinstance(request.request_id, str): raise TypeError( f"request_id must be a string, got {type(request.request_id)}" ) if pooling_params := request.pooling_params: supported_pooling_tasks = [ task for task in self.get_supported_tasks() if task in POOLING_TASKS ] if pooling_params.task not in supported_pooling_tasks: raise ValueError( f"Unsupported task: {pooling_params.task!r} " f"Supported tasks: {supported_pooling_tasks}" ) if request.kv_transfer_params is not None and ( not self.scheduler.get_kv_connector() ): logger.warning( "Got kv_transfer_params, but no KVConnector found. " "Disabling KVTransfer for this request." ) self.scheduler.add_request(request) def abort_requests(self, request_ids: list[str]): """Abort requests from the scheduler.""" # TODO: The scheduler doesn't really need to know the # specific finish reason, TBD whether we propagate that # (i.e. client-aborted vs stop criteria met). self.scheduler.finish_requests(request_ids, RequestStatus.FINISHED_ABORTED) @contextmanager def log_error_detail(self, scheduler_output: SchedulerOutput): """Execute the model and log detailed info on failure.""" try: yield except Exception as err: # We do not want to catch BaseException here since we're only # interested in dumping info when the exception is due to an # error from execute_model itself. # NOTE: This method is exception-free dump_engine_exception( self.vllm_config, scheduler_output, self.scheduler.make_stats() ) raise err def _log_err_callback(self, scheduler_output: SchedulerOutput): """Log error details of a future that's not expected to return a result.""" def callback(f, sched_output=scheduler_output): with self.log_error_detail(sched_output): result = f.result() assert result is None return callback def step(self) -> tuple[dict[int, EngineCoreOutputs], bool]: """Schedule, execute, and make output. Returns tuple of outputs and a flag indicating whether the model was executed. """ # Check for any requests remaining in the scheduler - unfinished, # or finished and not yet removed from the batch. if not self.scheduler.has_requests(): return {}, False scheduler_output = self.scheduler.schedule() future = self.model_executor.execute_model(scheduler_output, non_block=True) grammar_output = self.scheduler.get_grammar_bitmask(scheduler_output) with self.log_error_detail(scheduler_output): model_output = future.result() if model_output is None: model_output = self.model_executor.sample_tokens(grammar_output) # Before processing the model output, process any aborts that happened # during the model execution. self._process_aborts_queue() engine_core_outputs = self.scheduler.update_from_output( scheduler_output, model_output ) return engine_core_outputs, scheduler_output.total_num_scheduled_tokens > 0 def post_step(self, model_executed: bool) -> None: # When using async scheduling we can't get draft token ids in advance, # so we update draft token ids in the worker process and don't # need to update draft token ids here. if not self.async_scheduling and self.use_spec_decode and model_executed: # Take the draft token ids. draft_token_ids = self.model_executor.take_draft_token_ids() if draft_token_ids is not None: self.scheduler.update_draft_token_ids(draft_token_ids) def step_with_batch_queue( self, ) -> tuple[dict[int, EngineCoreOutputs] | None, bool]: """Schedule and execute batches with the batch queue. Note that if nothing to output in this step, None is returned. The execution flow is as follows: 1. Try to schedule a new batch if the batch queue is not full. If a new batch is scheduled, directly return an empty engine core output. In other words, fulfilling the batch queue has a higher priority than getting model outputs. 2. If there is no new scheduled batch, meaning that the batch queue is full or no other requests can be scheduled, we block until the first batch in the job queue is finished. 3. Update the scheduler from the output. """ batch_queue = self.batch_queue assert batch_queue is not None # Try to schedule a new batch if the batch queue is not full, but # the scheduler may return an empty batch if all requests are scheduled. # Note that this is not blocking. assert len(batch_queue) < self.batch_queue_size model_executed = False deferred_scheduler_output = None if self.scheduler.has_requests(): scheduler_output = self.scheduler.schedule() exec_future = self.model_executor.execute_model( scheduler_output, non_block=True ) if not self.is_ec_producer: model_executed = scheduler_output.total_num_scheduled_tokens > 0 if self.is_pooling_model or not model_executed: # No sampling required (no requests scheduled). future = cast(Future[ModelRunnerOutput], exec_future) else: exec_future.add_done_callback(self._log_err_callback(scheduler_output)) if not scheduler_output.pending_structured_output_tokens: # We aren't waiting for any tokens, get any grammar output # and sample immediately. grammar_output = self.scheduler.get_grammar_bitmask( scheduler_output ) future = self.model_executor.sample_tokens( grammar_output, non_block=True ) else: # We need to defer sampling until we have processed the model output # from the prior step. deferred_scheduler_output = scheduler_output if not deferred_scheduler_output: # Add this step's future to the queue. batch_queue.appendleft((future, scheduler_output)) if ( model_executed and len(batch_queue) < self.batch_queue_size and not batch_queue[-1][0].done() ): # Don't block on next worker response unless the queue is full # or there are no more requests to schedule. return None, True elif not batch_queue: # Queue is empty. We should not reach here since this method should # only be called when the scheduler contains requests or the queue # is non-empty. return None, False # Block until the next result is available. future, scheduler_output = batch_queue.pop() with self.log_error_detail(scheduler_output): model_output = future.result() # Before processing the model output, process any aborts that happened # during the model execution. self._process_aborts_queue() engine_core_outputs = self.scheduler.update_from_output( scheduler_output, model_output ) # NOTE(nick): We can either handle the deferred tasks here or save # in a field and do it immediately once step_with_batch_queue is # re-called. The latter slightly favors TTFT over TPOT/throughput. if deferred_scheduler_output: # We now have the tokens needed to compute the bitmask for the # deferred request. Get the bitmask and call sample tokens. grammar_output = self.scheduler.get_grammar_bitmask( deferred_scheduler_output ) future = self.model_executor.sample_tokens(grammar_output, non_block=True) batch_queue.appendleft((future, deferred_scheduler_output)) return engine_core_outputs, model_executed def _process_aborts_queue(self): if not self.aborts_queue.empty(): request_ids = [] while not self.aborts_queue.empty(): ids = self.aborts_queue.get_nowait() # Should be a list here, but also handle string just in case. request_ids.extend((ids,) if isinstance(ids, str) else ids) # More efficient to abort all as a single batch. self.abort_requests(request_ids) def shutdown(self): self.structured_output_manager.clear_backend() if self.model_executor: self.model_executor.shutdown() if self.scheduler: self.scheduler.shutdown() def profile(self, is_start: bool = True): self.model_executor.profile(is_start) def reset_mm_cache(self): # NOTE: Since this is mainly for debugging, we don't attempt to # re-sync the internal caches (P0 sender, P1 receiver) if self.scheduler.has_unfinished_requests(): logger.warning( "Resetting the multi-modal cache when requests are " "in progress may lead to desynced internal caches." ) # The cache either exists in EngineCore or WorkerWrapperBase if self.mm_receiver_cache is not None: self.mm_receiver_cache.clear_cache() self.model_executor.reset_mm_cache() def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return self.scheduler.reset_prefix_cache( reset_running_requests, reset_connector ) def sleep(self, level: int = 1): self.model_executor.sleep(level) def wake_up(self, tags: list[str] | None = None): self.model_executor.wake_up(tags) def is_sleeping(self) -> bool: return self.model_executor.is_sleeping def execute_dummy_batch(self): self.model_executor.execute_dummy_batch() def add_lora(self, lora_request: LoRARequest) -> bool: return self.model_executor.add_lora(lora_request) def remove_lora(self, lora_id: int) -> bool: return self.model_executor.remove_lora(lora_id) def list_loras(self) -> set[int]: return self.model_executor.list_loras() def pin_lora(self, lora_id: int) -> bool: return self.model_executor.pin_lora(lora_id) def save_sharded_state( self, path: str, pattern: str | None = None, max_size: int | None = None, ) -> None: self.model_executor.save_sharded_state( path=path, pattern=pattern, max_size=max_size ) def collective_rpc( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: return self.model_executor.collective_rpc(method, timeout, args, kwargs) def preprocess_add_request(self, request: EngineCoreRequest) -> tuple[Request, int]: """Preprocess the request. This function could be directly used in input processing thread to allow request initialization running in parallel with Model forward """ # Note on thread safety: no race condition. # `mm_receiver_cache` is reset at the end of LLMEngine init, # and will only be accessed in the input processing thread afterwards. if self.mm_receiver_cache is not None and request.mm_features: request.mm_features = self.mm_receiver_cache.get_and_update_features( request.mm_features ) req = Request.from_engine_core_request(request, self.request_block_hasher) if req.use_structured_output: # Note on thread safety: no race condition. # `grammar_init` is only invoked in input processing thread. For # `structured_output_manager`, each request is independent and # grammar compilation is async. Scheduler always checks grammar # compilation status before scheduling request. self.structured_output_manager.grammar_init(req) return req, request.current_wave class EngineCoreProc(EngineCore): """ZMQ-wrapper for running EngineCore in background process.""" ENGINE_CORE_DEAD = b"ENGINE_CORE_DEAD" def __init__( self, vllm_config: VllmConfig, local_client: bool, handshake_address: str, executor_class: type[Executor], log_stats: bool, client_handshake_address: str | None = None, *, engine_index: int = 0, ): self.input_queue = queue.Queue[tuple[EngineCoreRequestType, Any]]() self.output_queue = queue.Queue[tuple[int, EngineCoreOutputs] | bytes]() executor_fail_callback = lambda: self.input_queue.put_nowait( (EngineCoreRequestType.EXECUTOR_FAILED, b"") ) self.engine_index = engine_index identity = self.engine_index.to_bytes(length=2, byteorder="little") self.engines_running = False with self._perform_handshakes( handshake_address, identity, local_client, vllm_config, client_handshake_address, ) as addresses: self.client_count = len(addresses.outputs) # Set up data parallel environment. self.has_coordinator = addresses.coordinator_output is not None self.frontend_stats_publish_address = ( addresses.frontend_stats_publish_address ) logger.debug( "Has DP Coordinator: %s, stats publish address: %s", self.has_coordinator, self.frontend_stats_publish_address, ) internal_dp_balancing = ( self.has_coordinator and not vllm_config.parallel_config.data_parallel_external_lb ) # Only publish request queue stats to coordinator for "internal" # and "hybrid" LB modes. self.publish_dp_lb_stats = internal_dp_balancing self._init_data_parallel(vllm_config) super().__init__( vllm_config, executor_class, log_stats, executor_fail_callback, internal_dp_balancing, ) # Background Threads and Queues for IO. These enable us to # overlap ZMQ socket IO with GPU since they release the GIL, # and to overlap some serialization/deserialization with the # model forward pass. # Threads handle Socket <-> Queues and core_busy_loop uses Queue. ready_event = threading.Event() input_thread = threading.Thread( target=self.process_input_sockets, args=( addresses.inputs, addresses.coordinator_input, identity, ready_event, ), daemon=True, ) input_thread.start() self.output_thread = threading.Thread( target=self.process_output_sockets, args=( addresses.outputs, addresses.coordinator_output, self.engine_index, ), daemon=True, ) self.output_thread.start() # Don't complete handshake until DP coordinator ready message is # received. while not ready_event.wait(timeout=10): if not input_thread.is_alive(): raise RuntimeError("Input socket thread died during startup") assert addresses.coordinator_input is not None logger.info("Waiting for READY message from DP Coordinator...") @contextmanager def _perform_handshakes( self, handshake_address: str, identity: bytes, local_client: bool, vllm_config: VllmConfig, client_handshake_address: str | None, ) -> Generator[EngineZmqAddresses, None, None]: """ Perform startup handshakes. For DP=1 or offline mode, this is with the colocated front-end process. For DP>1 with internal load-balancing this is with the shared front-end process which may reside on a different node. For DP>1 with external or hybrid load-balancing, two handshakes are performed: - With the rank 0 front-end process which retrieves the DP Coordinator ZMQ addresses and DP process group address. - With the colocated front-end process which retrieves the client input/output socket addresses. with the exception of the rank 0 and colocated engines themselves which don't require the second handshake. Here, "front-end" process can mean the process containing the engine core client (which is the API server process in the case the API server is not scaled out), OR the launcher process running the run_multi_api_server() function in serve.py. """ input_ctx = zmq.Context() is_local = local_client and client_handshake_address is None headless = not local_client handshake = self._perform_handshake( input_ctx, handshake_address, identity, is_local, headless, vllm_config, vllm_config.parallel_config, ) if client_handshake_address is None: with handshake as addresses: yield addresses else: assert local_client local_handshake = self._perform_handshake( input_ctx, client_handshake_address, identity, True, False, vllm_config ) with handshake as addresses, local_handshake as client_addresses: addresses.inputs = client_addresses.inputs addresses.outputs = client_addresses.outputs yield addresses # Update config which may have changed from the handshake vllm_config.__post_init__() @contextmanager def _perform_handshake( self, ctx: zmq.Context, handshake_address: str, identity: bytes, local_client: bool, headless: bool, vllm_config: VllmConfig, parallel_config_to_update: ParallelConfig | None = None, ) -> Generator[EngineZmqAddresses, None, None]: with make_zmq_socket( ctx, handshake_address, zmq.DEALER, identity=identity, linger=5000, bind=False, ) as handshake_socket: # Register engine with front-end. addresses = self.startup_handshake( handshake_socket, local_client, headless, parallel_config_to_update ) yield addresses # Send ready message. num_gpu_blocks = vllm_config.cache_config.num_gpu_blocks # We pass back the coordinator stats update address here for the # external LB case for our colocated front-end to use (coordinator # only runs with rank 0). dp_stats_address = self.frontend_stats_publish_address # Include config hash for DP configuration validation ready_msg = { "status": "READY", "local": local_client, "headless": headless, "num_gpu_blocks": num_gpu_blocks, "dp_stats_address": dp_stats_address, } if vllm_config.parallel_config.data_parallel_size > 1: ready_msg["parallel_config_hash"] = ( vllm_config.parallel_config.compute_hash() ) handshake_socket.send(msgspec.msgpack.encode(ready_msg)) @staticmethod def startup_handshake( handshake_socket: zmq.Socket, local_client: bool, headless: bool, parallel_config: ParallelConfig | None = None, ) -> EngineZmqAddresses: # Send registration message. handshake_socket.send( msgspec.msgpack.encode( { "status": "HELLO", "local": local_client,
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/logprobs.py
vllm/v1/engine/logprobs.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools from dataclasses import dataclass from vllm.logger import init_logger from vllm.logprobs import ( PromptLogprobs, SampleLogprobs, append_logprobs_for_next_position, create_prompt_logprobs, create_sample_logprobs, ) from vllm.tokenizers.detokenizer_utils import ( TokenizerLike, convert_ids_list_to_tokens, ) from vllm.v1.engine import EngineCoreOutput, EngineCoreRequest from vllm.v1.outputs import LogprobsLists, LogprobsTensors logger = init_logger(__name__) NONES = itertools.repeat(None) @dataclass class LogprobsProcessor: # Tokenizer for this request, # None if detokenization is disabled. tokenizer: TokenizerLike | None # Logprobs for this request logprobs: SampleLogprobs | None prompt_logprobs: PromptLogprobs | None cumulative_logprob: float | None num_logprobs: int | None num_prompt_logprobs: int | None @classmethod def from_new_request( cls, tokenizer: TokenizerLike | None, request: EngineCoreRequest, ) -> "LogprobsProcessor": sampling_params = request.sampling_params assert sampling_params is not None num_logprobs = sampling_params.logprobs num_prompt_logprobs = sampling_params.prompt_logprobs return cls( tokenizer=tokenizer, cumulative_logprob=(None if num_logprobs is None else 0.0), logprobs=( None if num_logprobs is None else create_sample_logprobs(sampling_params.flat_logprobs) ), prompt_logprobs=( None if num_prompt_logprobs is None else create_prompt_logprobs(sampling_params.flat_logprobs) ), num_prompt_logprobs=num_prompt_logprobs, num_logprobs=num_logprobs, ) def _update_sample_logprobs(self, logprobs_lists: LogprobsLists) -> None: """Update with sample logprobs from EngineCore. Outer lists are only of len > 1 if EngineCore made >1 tokens in prior step (e.g. in spec decoding). Args: logprobs_lists: the lists of logprob tokens, logprobs, and ranks. """ assert self.num_logprobs is not None assert self.logprobs is not None assert self.cumulative_logprob is not None token_ids_lst, logprobs_lst, ranks_lst, _ = logprobs_lists for rank_np, logprobs_np, token_ids_np in zip( ranks_lst, logprobs_lst, token_ids_lst ): rank = rank_np.tolist() logprobs = logprobs_np.tolist() token_ids = token_ids_np.tolist() # Detokenize (non-incrementally). decoded_tokens = ( NONES if self.tokenizer is None else (convert_ids_list_to_tokens(self.tokenizer, token_ids)) ) # Sampler puts the sampled logprob in first. sampled_token_logprob = logprobs[0] self.cumulative_logprob += sampled_token_logprob # Update with the Logprob container for this pos. append_logprobs_for_next_position( self.logprobs, token_ids, logprobs, decoded_tokens, rank, self.num_logprobs, ) def _update_prompt_logprobs( self, prompt_logprobs_tensors: LogprobsTensors, ) -> None: """Update with prompt logprobs from EngineCore. Args: prompt_logprobs_tensors: tuple containing the prompt logprobs tensors. """ # Prompt logprobs are enabled. assert self.num_prompt_logprobs is not None assert self.prompt_logprobs is not None token_ids, logprobs, ranks = prompt_logprobs_tensors # Detokenize non-incrementally. # Output is flat: [num_tok, num_lps] -> [num_tok * num_lps] decoded_tokens = ( None if self.tokenizer is None else ( convert_ids_list_to_tokens(self.tokenizer, token_ids.flatten().tolist()) ) ) # Recover shapes. num_prompt_tokens, num_logprobs = logprobs.shape # Pythonize the torch tensors. prompt_token_ranks = ranks.tolist() prompt_logprobs = logprobs.tolist() token_ids = token_ids.tolist() # Make Logprob for each position. for pos in range(num_prompt_tokens): # Handle flattening. offset = pos * num_logprobs offset_end = offset + num_logprobs decoded_tokens_for_pos = ( NONES if decoded_tokens is None else decoded_tokens[offset:offset_end] ) # Update with the Logprob container for this pos. append_logprobs_for_next_position( self.prompt_logprobs, token_ids[pos], prompt_logprobs[pos], decoded_tokens_for_pos, prompt_token_ranks[pos], self.num_prompt_logprobs, ) def pop_prompt_logprobs(self) -> PromptLogprobs | None: """Pop and return all request prompt logprobs The logprobs processor aggregates prompt chunk logprobs over one or more prefill chunks. This method returns all prompt logprobs at once and then forgets them. Ensures correct RequestOutputKind.DELTA semantics wherein all prompt logprobs are returned at once at the end of prefill. Returns: None if prompt logprobs are disabled for this request. List of all prompt logprobs, otherwise. """ plp = self.prompt_logprobs if plp: self.prompt_logprobs = [] return plp def update_from_output(self, output: EngineCoreOutput) -> None: if output.new_logprobs is not None: self._update_sample_logprobs(output.new_logprobs) if output.new_prompt_logprobs_tensors is not None: self._update_prompt_logprobs(output.new_prompt_logprobs_tensors)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/coordinator.py
vllm/v1/engine/coordinator.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy import multiprocessing import time import weakref import msgspec.msgpack import zmq from vllm.config import ParallelConfig from vllm.logger import init_logger from vllm.utils.network_utils import make_zmq_socket from vllm.utils.system_utils import get_mp_context, set_process_title from vllm.v1.engine import EngineCoreOutputs, EngineCoreRequestType from vllm.v1.serial_utils import MsgpackDecoder from vllm.v1.utils import get_engine_client_zmq_addr, shutdown logger = init_logger(__name__) class DPCoordinator: """Coordinator process used for data-parallel deployments (DP>1). Intermediates between multiple DP engine rank processes and one or more front-end API server processes. * Collects stats from each DP engine (currently just waiting and running queue lengths), and publishes these to all front-ends for use in load-balancing decisions. * Keeps track of the current DP "request wave" number and running state of the engines. This is received from the DP rank 0 engine and published to the front-end processes along with the current load stats. The engines alternate between a global running/paused state. The global "request wave" number is a count of the number of times that the workers collectively move from a running state to a paused state. This transition is synchronized via the all-reduce operation performed in the DPEngineCoreProc._has_global_unfinished_reqs method. * Broadcasts the START_DP_WAVE message to engines to move them from paused to running state when one engine receives a new request. This can happen in two cases: 1) A front-end sending a new request while the engines are paused will concurrently notify the coordinator. 2) An engine receiving a request for a stale request wave while in paused state will notify the coordinator. Engines will move into running state when receiving a new request or START_DP_WAVE message. Note that when deployed in External LB mode, no stats will be published by the engines and thus updates will only be sent to front-ends when the request wave / running state changes. """ def __init__( self, parallel_config: ParallelConfig, enable_wave_coordination: bool = True ): dp_size = parallel_config.data_parallel_size assert dp_size > 1, "Coordinator only used for data parallel" host = parallel_config.data_parallel_master_ip external_lb = parallel_config.data_parallel_external_lb hybrid_lb = parallel_config.data_parallel_hybrid_lb # Assume coordinator is colocated with front-end procs when not in # either external or hybrid DP LB mode. local_only = not (external_lb or hybrid_lb) front_publish_address = get_engine_client_zmq_addr( local_only=local_only, host=host ) local_only_eng = dp_size == parallel_config.data_parallel_size_local back_publish_address = get_engine_client_zmq_addr(local_only_eng, host) back_output_address = get_engine_client_zmq_addr(local_only_eng, host) context = get_mp_context() self.proc: multiprocessing.Process = context.Process( target=DPCoordinatorProc.run_coordinator, name="VLLM_DP_Coordinator", kwargs={ "engine_count": parallel_config.data_parallel_size, "front_publish_address": front_publish_address, "back_output_address": back_output_address, "back_publish_address": back_publish_address, "enable_wave_coordination": enable_wave_coordination, }, daemon=True, ) self.proc.start() self.stats_publish_address = front_publish_address self.coord_in_address = back_publish_address self.coord_out_address = back_output_address self._finalizer = weakref.finalize(self, shutdown, [self.proc]) def get_stats_publish_address(self) -> str: return self.stats_publish_address def get_engine_socket_addresses(self) -> tuple[str, str]: """Returns tuple of ZMQ input address, output address.""" return self.coord_in_address, self.coord_out_address def close(self): self._finalizer() class EngineState: def __init__(self): self.request_counts = [0, 0] # [waiting, running] class DPCoordinatorProc: def __init__( self, engine_count: int, min_stats_update_interval_ms: int = 100, enable_wave_coordination: bool = True, ): set_process_title("DPCoordinator") self.ctx = zmq.Context() self.engines = [EngineState() for _ in range(engine_count)] self.stats_update_interval_ms = min_stats_update_interval_ms self.enable_wave_coordination = enable_wave_coordination @staticmethod def run_coordinator( engine_count: int, front_publish_address: str, back_output_address: str, back_publish_address: str, min_stats_update_interval_ms: int = 100, enable_wave_coordination: bool = True, ): coordinator = DPCoordinatorProc( engine_count=engine_count, min_stats_update_interval_ms=min_stats_update_interval_ms, enable_wave_coordination=enable_wave_coordination, ) try: coordinator.process_input_socket( front_publish_address, back_output_address, back_publish_address, ) except KeyboardInterrupt: logger.info("DP Coordinator process exiting") def process_input_socket( self, front_publish_address: str, back_output_address: str, back_publish_address: str, ): decoder = MsgpackDecoder(EngineCoreOutputs) # For tracking request wave progression. current_wave = 0 engines_running = False # For tracking request counts for internal load-balancing. stats_changed = False last_stats_step = -1 last_stats_wave = -1 last_step_counts: list[list[int]] | None = None with ( make_zmq_socket( path=front_publish_address, # IPC ctx=self.ctx, socket_type=zmq.XPUB, bind=True, ) as publish_front, make_zmq_socket( path=back_output_address, # IPC or TCP ctx=self.ctx, socket_type=zmq.PULL, bind=True, ) as output_back, make_zmq_socket( path=back_publish_address, # IPC or TCP ctx=self.ctx, socket_type=zmq.XPUB, bind=True, ) as publish_back, ): # Wait until all engines subscribe. for _ in self.engines: if publish_back.recv() != b"\x01": logger.error( "DP Coordinator received unexpected message while " "waiting for engines to subscribe" ) return # Send ready message to engines. publish_back.send(b"READY") logger.info("All engine subscriptions received by DP coordinator") poller = zmq.Poller() poller.register(publish_front, zmq.POLLIN) poller.register(output_back, zmq.POLLIN) last_publish_time = 0 while True: elapsed = int(time.time() * 1000) - last_publish_time # Send at stats_update_interval_ms interval if the stats have # changed, or otherwise every 5 seconds. wait_for = self.stats_update_interval_ms if stats_changed else 5000 # Wait at least 50ms to ensure we've received all stats for # the current step. min_timeout = 50 if last_step_counts is None else 0 events = poller.poll(timeout=max(min_timeout, wait_for - elapsed)) if not events: # Poller timeout - publish current stats to front-ends. if last_step_counts is not None: engine_req_counts_list = last_step_counts last_step_counts = None else: engine_req_counts_list = self._get_engine_counts() stats_changed = False to_publish = (engine_req_counts_list, current_wave, engines_running) publish_front.send(msgspec.msgpack.encode(to_publish)) last_publish_time = int(time.time() * 1000) continue events = dict(events) wave_state_changed = False if publish_front in events: buffer = publish_front.recv() if buffer in (b"\x01", b"\x00"): # Ignore subscription messages. continue decoded = msgspec.msgpack.decode(buffer) if ( isinstance(decoded, (list, tuple)) and len(decoded) == 2 and decoded[0] == "SCALE_ELASTIC_EP" ): # Handle scale up notification new_engine_count = decoded[1] current_count = len(self.engines) if new_engine_count > current_count: for _ in range(new_engine_count - current_count): self.engines.append(EngineState()) # NOTE(yongji): handle the case # where newly started engines have current_wave = 0 # if existing engines just finished a wave # and engine_running isn't updated yet at # CoordinatorProc requests routed to newly started # engines may not wake up existing engines, as long # as 0 < request.wave < existing engines' # current_wave # we note that 0 is the wave number for the new # engine engines_running = False logger.info( "DPCoordinator scaled up from %s to %s engines", current_count, new_engine_count, ) else: self.engines = self.engines[:new_engine_count] logger.info( "DPCoordinator scaled down from %s to %s engines", current_count, new_engine_count, ) continue # Skip normal engine notification processing # Wave coordination: handle new-request messages from front-end. # Only process these when wave coordination is enabled if self.enable_wave_coordination: # We received a message on the front-end XPUB socket, # from an API server sending a new request while the # engines are paused, so that we can wake the other # engines. engine_to_exclude, wave = decoded if not engines_running: if wave < current_wave: # If the wave number is stale, ensure the message # is handled by all the engines. engine_to_exclude = None engines_running = True wave_state_changed = True self._send_start_wave( publish_back, current_wave, engine_to_exclude ) if output_back in events: # We received a message from one of the engines. buffer = output_back.recv() outputs: EngineCoreOutputs = decoder.decode(buffer) assert not outputs.outputs assert outputs.utility_output is None eng_index = outputs.engine_index scheduler_stats = outputs.scheduler_stats if scheduler_stats: # 1. Updated request load stats - update our local # state with these. stats = self.engines[eng_index].request_counts stats_step = scheduler_stats.step_counter stats_wave = scheduler_stats.current_wave if ( stats_wave > last_stats_wave or stats_wave == last_stats_wave and stats_step > last_stats_step ): if stats_changed: last_step_counts = self._get_engine_counts(do_copy=True) last_stats_step = stats_step last_stats_wave = stats_wave elif stats_wave != last_stats_wave or ( stats_step != last_stats_step ): logger.warning( "Received stats for out-of-order " "step (%d, %d) from engine %d (expected " "> (%d, %d))", stats_wave, stats_step, eng_index, last_stats_wave, last_stats_step, ) stats[0] = scheduler_stats.num_waiting_reqs stats[1] = scheduler_stats.num_running_reqs stats_changed = True # Wave coordination: handle wave completion and start notifications # Only process these when wave coordination is enabled if self.enable_wave_coordination: if (wave := outputs.wave_complete) is not None: # 2. Notification from rank 0 engine that we've # moved into the global paused state # (engines_running==False). if current_wave <= wave: new_wave = wave + 1 logger.debug( "Moving DP wave from %d to %d.", current_wave, new_wave, ) current_wave = new_wave engines_running = False wave_state_changed = True elif (wave := outputs.start_wave) is not None and ( wave > current_wave or (wave == current_wave and not engines_running) ): # 3. The engine received request for a non-current wave # so we must ensure that other engines progress to the # next wave (race condition handling). logger.debug( "Starting wave %d after notification of " "stale wave request from engine.", wave, ) current_wave = wave engines_running = True wave_state_changed = True self._send_start_wave(publish_back, wave, eng_index) if wave_state_changed: message = (None, current_wave, engines_running) publish_front.send(msgspec.msgpack.encode(message)) @staticmethod def _send_start_wave( socket: zmq.Socket, wave: int, exclude_engine_index: int | None ): """Broadcast the START_DP_WAVE message to all the engines. It includes the current wave number and index of engine which has already received a request with this wave number and so doesn't require additional notification. """ wave_encoded = msgspec.msgpack.encode((wave, exclude_engine_index)) socket.send_multipart((EngineCoreRequestType.START_DP_WAVE.value, wave_encoded)) def _get_engine_counts(self, do_copy=False) -> list[list[int]]: """Return list of [waiting, running] count lists for each engine.""" if do_copy: return [copy.copy(e.request_counts) for e in self.engines] return [e.request_counts for e in self.engines]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/utils.py
vllm/v1/engine/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib import os import weakref from collections.abc import Callable, Iterator from dataclasses import dataclass from enum import Enum, auto from multiprocessing import Process, connection from multiprocessing.process import BaseProcess from typing import TYPE_CHECKING from unittest.mock import patch import msgspec import zmq from vllm import envs from vllm.config import CacheConfig, ParallelConfig, VllmConfig from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.ray.ray_env import get_env_vars_to_copy from vllm.utils.network_utils import get_open_zmq_ipc_path, zmq_socket_ctx from vllm.utils.system_utils import get_mp_context from vllm.v1.engine.coordinator import DPCoordinator from vllm.v1.executor import Executor from vllm.v1.utils import get_engine_client_zmq_addr, shutdown if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup logger = init_logger(__name__) STARTUP_POLL_PERIOD_MS = 10000 class CoreEngineState(Enum): NEW = auto() CONNECTED = auto() READY = auto() class CoreEngine: """One per data parallel rank, used to track state during handshaking.""" def __init__(self, index: int = 0, local: bool = True): self.local = local self.identity = index.to_bytes(2, "little") self.state = CoreEngineState.NEW @dataclass class EngineZmqAddresses: # ZMQ input socket addresses for each front-end client (requests) inputs: list[str] # ZMQ output socket addresses for each front-end client (responses) outputs: list[str] # ZMQ input socket address of DP coordinator if applicable coordinator_input: str | None = None # ZMQ output socket address of DP coordinator if applicable coordinator_output: str | None = None # ZMQ socket for front-end to connect to DP coordinator. # Not used by engine, just relayed to front-end in handshake response. # Only required for external DP LB case. frontend_stats_publish_address: str | None = None @dataclass class EngineHandshakeMetadata: """Metadata sent to each engine process during startup handshake, including addresses of the front-end ZMQ queues that they should connect to. """ addresses: EngineZmqAddresses parallel_config: dict[str, int | str | list[int]] class CoreEngineProcManager: """ Utility class to handle creation, readiness, and shutdown of background processes used by the AsyncLLM and LLMEngine. """ def __init__( self, target_fn: Callable, local_engine_count: int, start_index: int, local_start_index: int, vllm_config: VllmConfig, local_client: bool, handshake_address: str, executor_class: type[Executor], log_stats: bool, client_handshake_address: str | None = None, ): context = get_mp_context() common_kwargs = { "vllm_config": vllm_config, "local_client": local_client, "handshake_address": handshake_address, "executor_class": executor_class, "log_stats": log_stats, } if client_handshake_address: common_kwargs["client_handshake_address"] = client_handshake_address self.processes: list[BaseProcess] = [] local_dp_ranks = [] for index in range(local_engine_count): local_index = local_start_index + index global_index = start_index + index # Start EngineCore in background process. local_dp_ranks.append(local_index) self.processes.append( context.Process( target=target_fn, name=f"EngineCore_DP{global_index}", kwargs=common_kwargs | { "dp_rank": global_index, "local_dp_rank": local_index, }, ) ) self._finalizer = weakref.finalize(self, shutdown, self.processes) data_parallel = vllm_config.parallel_config.data_parallel_size > 1 try: for proc, local_dp_rank in zip(self.processes, local_dp_ranks): # Adjust device control in DP for non-CUDA platforms # as well as external and ray launchers # For CUDA platforms, we use torch.cuda.set_device() with ( set_device_control_env_var(vllm_config, local_dp_rank) if ( data_parallel and ( not current_platform.is_cuda_alike() or vllm_config.parallel_config.use_ray ) ) else contextlib.nullcontext() ): proc.start() finally: # Kill other procs if not all are running. if self.finished_procs(): self.close() def close(self): """Shutdown all procs.""" self._finalizer() def join_first(self): """Wait for any process to exit.""" connection.wait(proc.sentinel for proc in self.processes) def sentinels(self) -> list: return [proc.sentinel for proc in self.processes] def finished_procs(self) -> dict[str, int]: """Returns dict of proc name -> exit code for any finished procs.""" return { proc.name: proc.exitcode for proc in self.processes if proc.exitcode is not None } @contextlib.contextmanager def set_device_control_env_var( vllm_config: VllmConfig, local_dp_rank: int ) -> Iterator[None]: """ Temporarily set CUDA_VISIBLE_DEVICES or equivalent for engine subprocess. """ world_size = vllm_config.parallel_config.world_size local_world_size = vllm_config.parallel_config.local_world_size evar = current_platform.device_control_env_var value = get_device_indices(evar, local_dp_rank, world_size, local_world_size) with patch.dict(os.environ, values=((evar, value),)): yield def get_device_indices( device_control_env_var: str, local_dp_rank: int, world_size: int, local_world_size: int | None = None, ): """ Returns a comma-separated string of device indices for the specified data parallel rank. For example, if world_size=2 and local_dp_rank=1, and there are 4 devices, this will select devices 2 and 3 for local_dp_rank=1. """ if local_world_size is None: local_world_size = world_size try: value = ",".join( str(current_platform.device_id_to_physical_device_id(i)) for i in range( local_dp_rank * world_size, local_dp_rank * world_size + local_world_size, ) ) except IndexError as e: raise Exception( f"Error setting {device_control_env_var}: " f"local range: [{local_dp_rank * world_size}, " f"{(local_dp_rank + 1) * world_size}) " "base value: " f'"{os.getenv(device_control_env_var)}"' ) from e return value class CoreEngineActorManager: """ Utility class to handle creation, readiness, and shutdown of core engine Ray actors used by the AsyncLLM and LLMEngine. Different from CoreEngineProcManager, this class manages core engines for both local and remote nodes. """ def __init__( self, vllm_config: VllmConfig, addresses: EngineZmqAddresses, executor_class: type[Executor], log_stats: bool, placement_groups: list["PlacementGroup"] | None = None, local_dp_ranks: list[int] | None = None, ): import copy import ray from ray.runtime_env import RuntimeEnv from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from vllm.v1.engine.core import DPMoEEngineCoreActor, EngineCoreActor dp_size = vllm_config.parallel_config.data_parallel_size actor_class = ( DPMoEEngineCoreActor if dp_size > 1 and vllm_config.model_config.is_moe else EngineCoreActor ) self.local_engine_actors: list[ray.ActorHandle] = [] self.remote_engine_actors: list[ray.ActorHandle] = [] env_vars_list = get_env_vars_to_copy(destination=actor_class.__name__) self.env_vars_dict = { name: os.environ[name] for name in env_vars_list if name in os.environ } runtime_env = RuntimeEnv(env_vars=self.env_vars_dict) self.addresses = addresses self.executor_class = executor_class self.log_stats = log_stats local_engine_count = vllm_config.parallel_config.data_parallel_size_local world_size = vllm_config.parallel_config.world_size if ray.is_initialized(): logger.info("Ray is already initialized. Skipping Ray initialization.") else: ray.init() if placement_groups is not None: assert local_dp_ranks is not None, ( "local_dp_ranks must be provided if placement_groups is provided" ) assert len(placement_groups) == len(local_dp_ranks), ( "placement_groups and local_dp_ranks must have the same length" ) logger.info("Using provided placement groups") # TODO(rui): validate passed-in placement groups self.created_placement_groups = [] else: placement_groups, local_dp_ranks = ( CoreEngineActorManager.create_dp_placement_groups(vllm_config) ) self.created_placement_groups = placement_groups assert len(placement_groups) == dp_size, ( "Number of placement groups must match data parallel size" ) self.placement_group_is_local = [] refs = [] for index, local_index, pg in zip( range(dp_size), local_dp_ranks, placement_groups ): dp_vllm_config = copy.deepcopy(vllm_config) dp_vllm_config.parallel_config.placement_group = pg local_client = index < local_engine_count # Ray XPU known issue: dpctl initializes the GPU runtime early, so # setting device env vars in Ray actor's initialization method # will not affect device selection. See: # https://github.com/ray-project/ray/blob/master/python/ray/_private/accelerators/intel_gpu.py#L56 # noqa: E501 if current_platform.is_xpu(): device_evar = current_platform.device_control_env_var device_indices = get_device_indices( device_evar, local_index, world_size ) actor_env_vars = self.env_vars_dict.copy() actor_env_vars[device_evar] = device_indices runtime_env = RuntimeEnv(env_vars=actor_env_vars) actor = ( ray.remote(actor_class) .options( scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg, placement_group_bundle_index=world_size, ), runtime_env=runtime_env, ) .remote( vllm_config=dp_vllm_config, executor_class=executor_class, log_stats=log_stats, local_client=local_client, addresses=addresses, dp_rank=index, local_dp_rank=local_index, ) ) if local_client: self.local_engine_actors.append(actor) else: self.remote_engine_actors.append(actor) self.placement_group_is_local.append(local_client) refs.append(actor.wait_for_init.remote()) ray.get(refs) self.run_refs = [] for actor in self.local_engine_actors + self.remote_engine_actors: self.run_refs.append(actor.run.remote()) @staticmethod def create_dp_placement_groups( vllm_config: VllmConfig, ) -> tuple[list["PlacementGroup"], list[int]]: """ Create placement groups for data parallel. """ import ray from ray._private.state import available_resources_per_node logger.info("Creating placement groups for data parallel") dp_master_ip = vllm_config.parallel_config.data_parallel_master_ip dp_size = vllm_config.parallel_config.data_parallel_size dp_size_local = vllm_config.parallel_config.data_parallel_size_local available_resources = available_resources_per_node() world_size = vllm_config.parallel_config.world_size placement_groups: list[PlacementGroup] = [] local_dp_ranks: list[int] = [] dp_master_ip_key = f"node:{dp_master_ip}" nodes = sorted( available_resources.values(), key=lambda x: dp_master_ip_key not in x ) assert len(nodes) > 0, "No nodes with resources found in Ray cluster." assert dp_master_ip_key in nodes[0], ( f"The DP master node (ip: {dp_master_ip}) is missing or dead" ) device_str = current_platform.ray_device_key n_node_devices: list[int] = [ int(node_resources[device_str]) for node_resources in nodes if device_str in node_resources ] assert n_node_devices, f"No {device_str} found in Ray cluster." max_device_per_node = max(n_node_devices) pack_strategy = envs.VLLM_RAY_DP_PACK_STRATEGY _supported_pack_strategies = ("strict", "fill", "span") if pack_strategy not in _supported_pack_strategies: raise ValueError( f"{envs.VLLM_RAY_DP_PACK_STRATEGY} is not supported. " "Make sure to set `VLLM_RAY_DP_PACK_STRATEGY` " f"to one of {_supported_pack_strategies}" ) all2all_backend = vllm_config.parallel_config.all2all_backend if pack_strategy == "fill" and ( all2all_backend == "deepep_high_throughput" or all2all_backend == "deepep_low_latency" ): raise ValueError( "DeepEP kernels require EP ranks [0,7] (same for [8,15], ...) " "to be on the same node, but VLLM_RAY_DP_PACK_STRATEGY=fill " "does not guarantee that. " "Please use VLLM_RAY_DP_PACK_STRATEGY=strict instead." ) if pack_strategy in ("strict", "fill"): placement_strategy = "STRICT_PACK" else: placement_strategy = "PACK" assert world_size > max_device_per_node, ( f"World size {world_size} is smaller than the " "maximum number of devices per node " f"{max_device_per_node}. Make sure to set " "`VLLM_RAY_DP_PACK_STRATEGY` to `strict` or `fill`" ) # if we need multiple nodes per dp group, we require for now that # available nodes are homogenous assert set(n_node_devices) == {max_device_per_node}, ( f"Nodes are not homogenous, {nodes}" ) assert world_size % max_device_per_node == 0, ( f"For multi-node data parallel groups, world_size ({world_size}) must " f"be a multiple of number of devices per node ({max_device_per_node})." ) assert len(n_node_devices) * max_device_per_node >= world_size * dp_size, ( f"Not enough total available nodes ({len(n_node_devices)}) " f"and devices per node ({max_device_per_node}) " f"to satisfy required world size {world_size} and data parallel size " f"{dp_size}" ) assert dp_size_local == 1, ( f"data-parallel-size-local {dp_size_local} should be set as the " "default (1) for VLLM_RAY_DP_PACK_STRATEGY=span. " "The actual data-parallel-size-local will be auto determined." ) # bundles collected for a single DP rank from multiple nodes, # for "span" pack strategy collected_bundles = [] for node_resources in nodes: node_ip_keys = [ key for key in node_resources if key != "node:__internal_head__" and key.startswith("node:") ] assert len(node_ip_keys) == 1, ( f"Zero or multiple node IP keys found in node resources: {node_ip_keys}" ) node_ip_key = node_ip_keys[0] node_ip = node_ip_key.split(":")[1] n_device_on_node = int(node_resources.get(device_str, 0)) if pack_strategy == "span" and n_device_on_node != 0: # Strictly speaking, # dp_size_available = n_device_on_node / world_size # and is a fraction, but we use 1 for easier processing dp_size_available = 1 else: dp_size_available = n_device_on_node // world_size if node_ip == dp_master_ip: if dp_size_available < dp_size_local: raise ValueError( f"Not enough resources to allocate {dp_size_local} DP ranks " f"on DP master node {dp_master_ip}, possible to fit " f"{dp_size_available} DP ranks." ) dp_size_to_allocate = dp_size_local elif pack_strategy == "strict": if dp_size_available < dp_size_local: logger.info( "Skipping node %s as %s DP ranks could not fit, " "possible to fit %s DP ranks", node_ip, dp_size_local, dp_size_available, ) continue dp_size_to_allocate = dp_size_local else: # for "pack_strategy" in "fill" and "span" # we always take everything that's available dp_size_to_allocate = dp_size_available for i in range(dp_size_to_allocate): device_bundle = [{device_str: 1.0, "node:" + node_ip: 0.001}] if pack_strategy == "span": collected_bundles += device_bundle * n_device_on_node assert len(collected_bundles) <= world_size, ( "collected_bundles should be <= world_size, " f"but got {len(collected_bundles)=} and {world_size=}" ) # we only create a placement group if we collected enough devices if len(collected_bundles) < world_size: continue bundles = collected_bundles + [{"CPU": 1.0}] collected_bundles = [] else: bundles = device_bundle * world_size + [{"CPU": 1.0}] pg = ray.util.placement_group( name=f"dp_rank_{len(placement_groups)}", strategy=placement_strategy, bundles=bundles, ) placement_groups.append(pg) local_dp_ranks.append(i) if len(placement_groups) == dp_size: break if len(placement_groups) < dp_size: raise ValueError( f"Not enough resources to allocate {dp_size} " "placement groups, only created " f"{len(placement_groups)} placement groups. " "Available resources: " f"{available_resources}" ) assert len(placement_groups) == dp_size, ( f"Created {len(placement_groups)} DP placement groups, expected {dp_size}" ) assert len(local_dp_ranks) == dp_size, ( f"local_dp_ranks length {len(local_dp_ranks)} does not match " f"expected {dp_size}" ) return placement_groups, local_dp_ranks @staticmethod def add_dp_placement_groups( old_vllm_config: VllmConfig, new_data_parallel_size: int ) -> tuple[list["PlacementGroup"], list[int]]: """ Add placement groups for new data parallel size. """ import ray from ray._private.state import ( available_resources_per_node, total_resources_per_node, ) from ray.util.state import list_nodes old_dp_size = old_vllm_config.parallel_config.data_parallel_size num_pg_to_create = new_data_parallel_size - old_dp_size if num_pg_to_create <= 0: return [], [] dp_master_ip = old_vllm_config.parallel_config.data_parallel_master_ip world_size = old_vllm_config.parallel_config.world_size nodes = list_nodes() nodes = sorted(nodes, key=lambda node: node.node_ip != dp_master_ip) assert nodes[0].node_ip == dp_master_ip, "The first node must be the head node" assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, ( "There can only be one head node" ) available_resources = available_resources_per_node() total_resources = total_resources_per_node() placement_groups = [] local_dp_ranks = [] num_pg_created = 0 device_str = current_platform.ray_device_key for node in nodes: if num_pg_created >= num_pg_to_create: break node_ip = node.node_ip node_id = node.node_id available_gpus = int(available_resources[node_id][device_str]) # Get total GPUs on this node from the node's resources # Ray stores node resources with node ID as key total_gpus = int(total_resources[node_id][device_str]) # Calculate used GPUs and used engines on this node used_gpus = max(0, total_gpus - available_gpus) used_engines_on_node = used_gpus // world_size # Calculate how many new engines this node can accommodate available_engine_count = available_gpus // world_size # Create placement groups for new engines on this node for i in range(available_engine_count): if num_pg_created >= num_pg_to_create: break rank = old_dp_size + num_pg_created # Create bundles with node constraint for master node if node_ip == dp_master_ip: bundles = [ {device_str: 1.0, "node:" + dp_master_ip: 0.001} ] * world_size + [{"CPU": 1.0}] else: bundles = [{device_str: 1.0}] * world_size + [{"CPU": 1.0}] pg = ray.util.placement_group( name=f"dp_rank_{rank}", strategy="STRICT_PACK", bundles=bundles, ) placement_groups.append(pg) # Local rank starts from the number of engines already used # on this node local_rank = used_engines_on_node + i local_dp_ranks.append(local_rank) num_pg_created += 1 return placement_groups, local_dp_ranks def scale_up_elastic_ep( self, cur_vllm_config: VllmConfig, new_data_parallel_size: int ) -> None: import copy import ray from ray.runtime_env import RuntimeEnv from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from vllm.v1.engine.core import DPMoEEngineCoreActor, EngineCoreActor actor_class = ( DPMoEEngineCoreActor if cur_vllm_config.model_config.is_moe else EngineCoreActor ) cur_data_parallel_size = len(self.local_engine_actors) + len( self.remote_engine_actors ) assert new_data_parallel_size > cur_data_parallel_size, ( f"New data parallel size {new_data_parallel_size} must be greater " f"than current data parallel size {cur_data_parallel_size} " "for scale up" ) placement_groups, local_dp_ranks = self.add_dp_placement_groups( cur_vllm_config, new_data_parallel_size ) world_size = cur_vllm_config.parallel_config.world_size dp_master_ip = cur_vllm_config.parallel_config.data_parallel_master_ip new_local_engines = 0 runtime_env = RuntimeEnv( env_vars=self.env_vars_dict | {"VLLM_ELASTIC_EP_SCALE_UP_LAUNCH": "1"} ) for i, (pg, local_rank) in enumerate(zip(placement_groups, local_dp_ranks)): rank = cur_data_parallel_size + i dp_vllm_config = copy.deepcopy(cur_vllm_config) dp_vllm_config.parallel_config.data_parallel_size = new_data_parallel_size dp_vllm_config.parallel_config.placement_group = pg # Check if this placement group is on the head node local_client = any( bundle.get("node:" + dp_master_ip, 0) > 0 for bundle in pg.bundle_specs ) if local_client: new_local_engines += 1 # Update data_parallel_size_local dp_vllm_config.parallel_config.data_parallel_size_local = ( cur_vllm_config.parallel_config.data_parallel_size_local + new_local_engines ) actor = ( ray.remote(actor_class) .options( scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg, placement_group_bundle_index=world_size, ), runtime_env=runtime_env, ) .remote( vllm_config=dp_vllm_config, executor_class=self.executor_class, log_stats=self.log_stats, local_client=local_client, addresses=self.addresses, dp_rank=rank, local_dp_rank=local_rank, ) ) if local_client: self.local_engine_actors.append(actor) else: self.remote_engine_actors.append(actor) self.created_placement_groups.append(pg) self.placement_group_is_local.append(local_client) ray.get( [ actor.wait_for_init.remote() for actor in ( self.local_engine_actors[-new_local_engines:] if new_local_engines > 0 else [] ) + self.remote_engine_actors[ -(len(placement_groups) - new_local_engines) : ] ] ) actors = ( self.local_engine_actors[-new_local_engines:] if new_local_engines > 0 else [] ) + self.remote_engine_actors[-(len(placement_groups) - new_local_engines) :] for actor in actors: self.run_refs.append(actor.run.remote()) cur_vllm_config.parallel_config.data_parallel_size = new_data_parallel_size # Update old_vllm_config with new data_parallel_size_local if any new # local engines were added if new_local_engines > 0: cur_vllm_config.parallel_config.data_parallel_size_local += ( new_local_engines ) def scale_down_elastic_ep( self, cur_data_parallel_size: int, new_data_parallel_size: int ) -> None: import ray assert cur_data_parallel_size > new_data_parallel_size, ( f"cur_data_parallel_size {cur_data_parallel_size} must be greater " f"than new_data_parallel_size {new_data_parallel_size} " "for scale down" ) for _ in range(cur_data_parallel_size - new_data_parallel_size): pg = self.created_placement_groups.pop() is_local = self.placement_group_is_local.pop() if is_local: self.local_engine_actors.pop() else: self.remote_engine_actors.pop() ray.util.remove_placement_group(pg) def get_run_refs(self): return self.run_refs def close(self): import ray for actor in self.local_engine_actors + self.remote_engine_actors: ray.kill(actor) for pg in self.created_placement_groups: ray.util.remove_placement_group(pg) @contextlib.contextmanager def launch_core_engines( vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, num_api_servers: int = 1, ) -> Iterator[ tuple[ CoreEngineProcManager | CoreEngineActorManager | None, DPCoordinator | None, EngineZmqAddresses, ] ]: """Launch engine and DP coordinator processes as needed.""" parallel_config = vllm_config.parallel_config dp_size = parallel_config.data_parallel_size local_engine_count = parallel_config.data_parallel_size_local local_start_index = parallel_config.data_parallel_rank_local dp_rank = parallel_config.data_parallel_rank host = parallel_config.data_parallel_master_ip local_engines_only = ( parallel_config.data_parallel_hybrid_lb or parallel_config.data_parallel_external_lb ) # In offline mode there is an LLM instance per DP rank and # one core engine per LLM, see # examples/offline_inference/data_parallel.py. offline_mode = local_start_index is not None # client_local_only = True for cases where this front-end # sends requests only to colocated engines. client_local_only = ( offline_mode or local_engines_only or (local_engine_count == dp_size) ) # Set up input and output addresses. addresses = EngineZmqAddresses( inputs=[ get_engine_client_zmq_addr(client_local_only, host) for _ in range(num_api_servers) ], outputs=[ get_engine_client_zmq_addr(client_local_only, host) for _ in range(num_api_servers) ], ) # Run the DP Coordinator process with rank 0 when in online DP mode. # The coordinator is needed for: # 1. Internal/hybrid LB: collecting and publishing queue stats for load balancing # 2. MoE models: wave coordination in addition to stats run_coordinator = ( vllm_config.needs_dp_coordinator and not offline_mode and dp_rank == 0 ) if run_coordinator: coordinator = DPCoordinator( parallel_config, enable_wave_coordination=vllm_config.model_config.is_moe, ) addresses.coordinator_input, addresses.coordinator_output = ( coordinator.get_engine_socket_addresses() ) addresses.frontend_stats_publish_address = ( coordinator.get_stats_publish_address() ) logger.info("Started DP Coordinator process (PID: %d)", coordinator.proc.pid) else: coordinator = None if parallel_config.data_parallel_backend == "ray": logger.info("Starting ray-based data parallel backend") engine_actor_manager = CoreEngineActorManager( vllm_config=vllm_config, addresses=addresses, executor_class=executor_class, log_stats=log_stats, ) yield engine_actor_manager, coordinator, addresses return if offline_mode: assert local_engine_count == 1 engines_to_handshake = [CoreEngine(index=dp_rank, local=True)] elif dp_rank == 0: # Rank 0 holds Coordinator, so it handshakes with all Cores # in both external dplb and internal dplb mode. # Note this also covers the case where we have zero local engines # and rank 0 is headless. engines_to_handshake = [ CoreEngine(index=i, local=(i < local_engine_count)) for i in range(dp_size) ] else: # Rank > 0 handshakes with just the local cores it is managing.
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/parallel_sampling.py
vllm/v1/engine/parallel_sampling.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from copy import copy from typing import Optional, cast from vllm.outputs import CompletionOutput from vllm.sampling_params import RequestOutputKind, SamplingParams from vllm.v1.engine import EngineCoreRequest from vllm.v1.metrics.stats import IterationStats class ParentRequest: """Info, state & processing for parallel sampling request. Store parent request ID and sampling params. Facilitate generating child request sampling params. """ request_id: str external_req_id: str sampling_params: SamplingParams # To track the completion of child requests child_requests: set[str] # To aggregate child completions when not streaming output_aggregator: list[CompletionOutput] # To find the max number of generated tokens across all children max_num_generation_tokens: int # To efficiently obtain child sampling params cached_child_sampling_params: SamplingParams | None def __init__(self, request: EngineCoreRequest) -> None: assert request.external_req_id is not None sampling_params = request.params self.request_id = request.request_id self.external_req_id = request.external_req_id self.sampling_params = sampling_params self.child_requests = set() self.output_aggregator = ( [cast(CompletionOutput, None)] * sampling_params.n if (sampling_params.output_kind == RequestOutputKind.FINAL_ONLY) else [] ) self.max_num_generation_tokens = 0 self.cached_child_sampling_params = None def _get_child_sampling_params( self, index: int, ) -> SamplingParams: """Efficiently obtain child `sampling_params` If `sampling_params.seed` is not `None` then each child request requires a unique clone of parent `sampling_params` with a unique seed. Args: index: index within `n` child requests Returns: Child `sampling_params` instance. """ seed = self.sampling_params.seed if self.cached_child_sampling_params: # Reuse child sampling_params data structure return self.cached_child_sampling_params # Build child sampling_params child_sampling_params = copy(self.sampling_params) child_sampling_params.n = 1 if seed is None: # Cache child sampling_params for later reuse self.cached_child_sampling_params = child_sampling_params else: # Each child gets a clone with a unique seed child_sampling_params.seed = seed + index return child_sampling_params def get_child_info(self, index: int) -> tuple[str, SamplingParams]: """Get child request ID and sampling params. Args: index: index within `n` child requests. Returns: (request ID, sampling_params) tuple """ child_req_id = f"{index}_{self.request_id}" self.child_requests.add(child_req_id) return child_req_id, self._get_child_sampling_params(index) @property def n(self) -> int: return self.sampling_params.n def get_outputs( self, child_request_id: str, completion_output: CompletionOutput, ) -> tuple[list[CompletionOutput], bool]: already_finished_and_returned: bool = False if completion_output.finished(): if child_request_id in self.child_requests: self.child_requests.remove(child_request_id) else: # child request ID is not available in child_requests # which means the request had finished in previous # batch step and returned to the client earlier already_finished_and_returned = True if self.sampling_params.output_kind != RequestOutputKind.FINAL_ONLY: # If streaming, just return the current output # # DO NOT output finished and already returned child request to client again outputs = [] if already_finished_and_returned else [completion_output] else: # If not streaming, aggregate the n final outputs. self.output_aggregator[completion_output.index] = completion_output outputs = [] if self.child_requests else self.output_aggregator finished = not self.child_requests return outputs, finished def observe_num_generation_tokens(self, num_generation_tokens: int): self.max_num_generation_tokens = max( num_generation_tokens, self.max_num_generation_tokens ) return self.max_num_generation_tokens @staticmethod def observe_finished_request( parent_req: Optional["ParentRequest"], iteration_stats: IterationStats, num_generation_tokens: int, ): n_param = parent_req.n if parent_req is not None else 1 if parent_req is not None: num_generation_tokens = parent_req.observe_num_generation_tokens( num_generation_tokens ) # Child requests finished, we can now record to iteration stats if parent_req is None or not parent_req.child_requests: iteration_stats.max_num_generation_tokens_iter.append(num_generation_tokens) iteration_stats.n_params_iter.append(n_param)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/__init__.py
vllm/v1/engine/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import enum import time from collections.abc import Mapping from typing import Any import msgspec import torch from vllm.lora.request import LoRARequest from vllm.multimodal.inputs import MultiModalFeatureSpec from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.v1.metrics.stats import SchedulerStats from vllm.v1.outputs import LogprobsLists, LogprobsTensors from vllm.v1.serial_utils import UtilityResult # These are possible values of RequestOutput.finish_reason, # so form part of the external API. FINISH_REASON_STRINGS = ("stop", "length", "abort", "error") class FinishReason(enum.IntEnum): """ Reason a request finished - stop, length, abort, or error. Int rather than Str for more compact serialization. stop - a stop string was emitted length - max_tokens was consumed, or max_model_len was reached abort - aborted by client error - retryable request-level internal error (e.g., KV load failure). Invariant: always converted to 500 Internal Server Error. """ STOP = 0 LENGTH = 1 ABORT = 2 ERROR = 3 def __str__(self): return FINISH_REASON_STRINGS[self.value] class EngineCoreRequest( msgspec.Struct, array_like=True, # type: ignore[call-arg] omit_defaults=True, # type: ignore[call-arg] gc=False, ): # type: ignore[call-arg] request_id: str prompt_token_ids: list[int] | None mm_features: list[MultiModalFeatureSpec] | None sampling_params: SamplingParams | None pooling_params: PoolingParams | None eos_token_id: int | None arrival_time: float lora_request: LoRARequest | None cache_salt: str | None data_parallel_rank: int | None prompt_embeds: torch.Tensor | None = None # Index of the client, used to ensure outputs are sent back to the same # client for this request when scaling out the front-end. client_index: int = 0 # Used in DP case to indicate which wave of requests this is expected to # belong to, to cover a race condition where the request is sent before # a wave finished notification is received. current_wave: int = 0 priority: int = 0 trace_headers: Mapping[str, str] | None = None # The user-provided request ID. This field is set internally, # copied from the provided request_id that's originally assigned # to the request_id field, see InputProcessor.assign_request_id(). # Used in outputs and to support abort(req_id, internal=False). external_req_id: str | None = None @property def params(self) -> SamplingParams | PoolingParams: """Return the processed params (sampling or pooling).""" if self.sampling_params is not None: return self.sampling_params assert self.pooling_params is not None return self.pooling_params class EngineCoreEventType(enum.IntEnum): """The type of engine core request event.""" QUEUED = 1 SCHEDULED = 2 PREEMPTED = 3 class EngineCoreEvent(msgspec.Struct): """A timestamped engine core event associated with a request. The timestamp is a monotonic timestamps and is used for by the engine frontend to calculate intervals between engine core events. These timestamps should not be compared with timestamps from other processes. """ type: EngineCoreEventType timestamp: float @classmethod def new_event( cls, event_type: EngineCoreEventType, timestamp: float | None = None ) -> "EngineCoreEvent": timestamp = time.monotonic() if timestamp is None else timestamp return cls(event_type, timestamp) class EngineCoreOutput( msgspec.Struct, array_like=True, # type: ignore[call-arg] omit_defaults=True, # type: ignore[call-arg] gc=False, ): # type: ignore[call-arg] request_id: str new_token_ids: list[int] new_logprobs: LogprobsLists | None = None new_prompt_logprobs_tensors: LogprobsTensors | None = None pooling_output: torch.Tensor | None = None finish_reason: FinishReason | None = None stop_reason: int | str | None = None events: list[EngineCoreEvent] | None = None kv_transfer_params: dict[str, Any] | None = None trace_headers: Mapping[str, str] | None = None # The number of tokens with prefix cache hits. num_cached_tokens: int = 0 # The number of NaNs in logits. # A value greater than 0 indicates that the output is corrupted. num_nans_in_logits: int = 0 @property def finished(self) -> bool: return self.finish_reason is not None class UtilityOutput( msgspec.Struct, array_like=True, # type: ignore[call-arg] gc=False, ): # type: ignore[call-arg] call_id: int # Non-None implies the call failed, result should be None. failure_message: str | None = None result: UtilityResult | None = None class EngineCoreOutputs( msgspec.Struct, array_like=True, # type: ignore[call-arg] omit_defaults=True, # type: ignore[call-arg] gc=False, ): # type: ignore[call-arg] # NOTE(Nick): We could consider ways to make this more compact, # e.g. columnwise layout engine_index: int = 0 # [num_reqs] outputs: list[EngineCoreOutput] = [] scheduler_stats: SchedulerStats | None = None timestamp: float = 0.0 utility_output: UtilityOutput | None = None finished_requests: set[str] | None = None # In DP case, used to signal that the current wave of requests # has finished and the engines are paused. wave_complete: int | None = None # In DP case, used to signal that a request was received for an # "old" wave, so the next wave needs to be started in other engines. start_wave: int | None = None def __post_init__(self): if self.timestamp == 0.0: self.timestamp = time.monotonic() class EngineCoreRequestType(enum.Enum): """ Request types defined as hex byte strings, so it can be sent over sockets without separate encoding step. """ ADD = b"\x00" ABORT = b"\x01" START_DP_WAVE = b"\x02" UTILITY = b"\x03" # Sentinel used within EngineCoreProc. EXECUTOR_FAILED = b"\x04" class ReconfigureDistributedRequest(msgspec.Struct): new_data_parallel_size: int new_data_parallel_rank: int new_data_parallel_rank_local: int new_data_parallel_master_ip: str new_data_parallel_master_port: int class ReconfigureRankType(enum.IntEnum): """ Rank type for reconfiguring distributed request. """ KEEP_CURRENT_RANK = -1 SHUTDOWN_CURRENT_RANK = -2
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/input_processor.py
vllm/v1/engine/input_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from collections.abc import Mapping from typing import Any, Literal, cast from vllm.config import VllmConfig from vllm.inputs import ProcessorInputs, PromptType, SingletonInputs from vllm.inputs.parse import split_enc_dec_inputs from vllm.inputs.preprocess import InputPreprocessor from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.multimodal.cache import processor_cache_from_config from vllm.multimodal.inputs import MultiModalFeatureSpec, MultiModalUUIDDict from vllm.multimodal.parse import MultiModalDataParser from vllm.multimodal.processing import EncDecMultiModalProcessor, set_request_id from vllm.multimodal.utils import argsort_mm_positions from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils import length_from_prompt_token_ids_or_embeds, random_uuid from vllm.v1.engine import EngineCoreRequest from vllm.v1.metrics.stats import MultiModalCacheStats from vllm.v1.structured_output.backend_guidance import ( has_guidance_unsupported_json_features, validate_guidance_grammar, ) from vllm.v1.structured_output.backend_lm_format_enforcer import ( validate_structured_output_request_lm_format_enforcer, ) from vllm.v1.structured_output.backend_outlines import ( validate_structured_output_request_outlines, ) from vllm.v1.structured_output.backend_xgrammar import validate_xgrammar_grammar logger = init_logger(__name__) class InputProcessor: def __init__( self, vllm_config: VllmConfig, tokenizer: TokenizerLike | None, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, ) -> None: self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config self.structured_outputs_config = vllm_config.structured_outputs_config self.generation_config_fields = self.model_config.try_get_generation_config() self.mm_registry = mm_registry self.mm_processor_cache = processor_cache_from_config(vllm_config, mm_registry) self.input_preprocessor = InputPreprocessor( self.model_config, tokenizer, self.vllm_config.observability_config, mm_registry, mm_processor_cache=self.mm_processor_cache, ) @property def tokenizer(self) -> TokenizerLike | None: return self.input_preprocessor.tokenizer def _validate_logprobs( self, params: SamplingParams, ) -> None: max_logprobs = self.model_config.max_logprobs if max_logprobs == -1: max_logprobs = self.model_config.get_vocab_size() # Validate sample logprobs. if params.logprobs: num_logprobs = params.logprobs if num_logprobs == -1: num_logprobs = self.model_config.get_vocab_size() if num_logprobs > max_logprobs: raise ValueError( f"Requested sample logprobs of {num_logprobs}, " f"which is greater than max allowed: {max_logprobs}" ) # Validate prompt logprobs. if params.prompt_logprobs: num_prompt_logprobs = params.prompt_logprobs if num_prompt_logprobs == -1: num_prompt_logprobs = self.model_config.get_vocab_size() if num_prompt_logprobs > max_logprobs: raise ValueError( f"Requested prompt logprobs of {num_prompt_logprobs}, " f"which is greater than max allowed: {max_logprobs}" ) def _validate_sampling_params( self, params: SamplingParams, ) -> None: self._validate_structured_output(params) self._validate_logit_bias(params) if params.allowed_token_ids is None: return if not params.allowed_token_ids: raise ValueError("allowed_token_ids is not None and empty!") if self.tokenizer is None: # When skip_tokenizer_init=True, we can't validate token IDs # Skip validation and let the model handle invalid tokens return vocab_size = len(self.tokenizer) if not all(0 <= tid < vocab_size for tid in params.allowed_token_ids): raise ValueError("allowed_token_ids contains out-of-vocab token id!") def _validate_logit_bias( self, params: SamplingParams, ) -> None: """Validate logit_bias token IDs are within vocabulary range.""" if not params.logit_bias: return vocab_size = self.model_config.get_vocab_size() invalid_token_ids = [] for token_id in params.logit_bias: if token_id < 0 or token_id >= vocab_size: invalid_token_ids.append(token_id) if invalid_token_ids: raise ValueError( f"token_id(s) {invalid_token_ids} in logit_bias contain " f"out-of-vocab token ids. Vocabulary size: {vocab_size}" ) def _validate_supported_sampling_params( self, params: SamplingParams, ) -> None: # Logits processors not supported. if params.logits_processors: raise ValueError( "vLLM V1 does not support per request user provided logits processors." ) # Async scheduling + spec decode currently incompatible with some # sampling parameters. if ( self.vllm_config.speculative_config is not None and self.vllm_config.scheduler_config.async_scheduling and ( params.frequency_penalty != 0.0 or params.presence_penalty != 0.0 or params.repetition_penalty != 1.0 or params.bad_words_token_ids or params.structured_outputs ) ): raise ValueError( "async scheduling with spec decoding doesn't yet support " "penalties, bad words or structured outputs in sampling parameters." ) def _validate_params( self, params: SamplingParams | PoolingParams, ): """ Validate supported SamplingParam. Should raise ValueError if unsupported for API Server. """ if isinstance(params, PoolingParams): return self._validate_logprobs(params) self._validate_sampling_params(params) self._validate_supported_sampling_params(params) def _validate_multi_modal_uuids(self, prompt: PromptType) -> None: """ Validate that user-provided multi_modal_uuids align with multi_modal_data in the incoming request prompt(s). Only checks lengths; `None` entries are allowed and will be auto-hashed downstream. """ def _validate_single_prompt(single_prompt: dict | str) -> None: if not isinstance(single_prompt, dict): return mm_data = single_prompt.get("multi_modal_data") mm_uuids = single_prompt.get("multi_modal_uuids") if not mm_data or not mm_uuids: return import torch def _get_len(items: object): if isinstance(items, dict): # Embedding inputs return _get_len(next(iter(items.values()))) if items else 1 if isinstance(items, list): return len(items) if isinstance(items, torch.Tensor): # To keep backwards compatibility for single item embedding input return 1 if getattr(items, "_is_single_item", False) else len(items) return 1 for modality, items in mm_data.items(): if modality in mm_uuids: data_len = _get_len(items) uuid_len = _get_len(mm_uuids[modality]) if uuid_len != data_len: raise ValueError( f"multi_modal_uuids for modality {modality!r} " "must have same length as data: got " f"{uuid_len} uuids vs {data_len} items." ) else: raise ValueError( f"multi_modal_uuids for modality {modality!r} must " "be provided if multi_modal_data is provided." ) # Handle explicit encoder/decoder prompts or singleton prompt if isinstance(prompt, dict) and "encoder_prompt" in prompt: enc = prompt.get("encoder_prompt") dec = prompt.get("decoder_prompt") if enc is not None: _validate_single_prompt(cast(dict | str, enc)) if dec is not None: _validate_single_prompt(cast(dict | str, dec)) else: _validate_single_prompt(prompt) # type: ignore[arg-type] def _validate_lora(self, lora_request: LoRARequest | None) -> None: if lora_request is None: return # LoRA request passed in while LoRA is not enabled if not self.lora_config: raise ValueError( f"Got lora_request {lora_request} but LoRA is not enabled!" ) if self.tokenizer is not None: logger.warning_once( "vLLM has deprecated support for supporting different " "tokenizers for different LoRAs. By default, vLLM uses base " "model's tokenizer. If you are using a LoRA " "with its own tokenizer, consider specifying `--tokenizer " "[lora_path]` to use the LoRA tokenizer." ) def _validate_structured_output(self, params: SamplingParams) -> None: if not params.structured_outputs or not self.structured_outputs_config: return if self.model_config.skip_tokenizer_init and params.structured_outputs: raise ValueError( "Structured outputs requires a tokenizer so it can't be used with 'skip_tokenizer_init'" # noqa: E501 ) backend = self.structured_outputs_config.backend if _backend := params.structured_outputs._backend: # Request-level backend selection is not supported. # The values may differ if `params` is reused and was set # to a specific backend based on `auto` behavior in a previous # request. We remember that it was set as a result of `auto` # using the `_backend_was_auto` field set in the params. if backend != _backend and not ( backend == "auto" and params.structured_outputs._backend_was_auto ): raise ValueError( "Request-level structured output backend selection is not " f"supported. The request specified '{_backend}', but vLLM " f"was initialised with '{backend}'. This error can be " "resolved by removing '_backend' from the request." ) else: params.structured_outputs._backend = backend # Request content validation if ( isinstance(params.structured_outputs.choice, list) and not params.structured_outputs.choice ): # It is invalid for choice to be an empty list raise ValueError( f"Choice '{params.structured_outputs.choice}' cannot be an empty list" # noqa: E501 ) # Reject empty string grammar early to avoid engine-side crashes if ( isinstance(params.structured_outputs.grammar, str) and params.structured_outputs.grammar.strip() == "" ): raise ValueError("structured_outputs.grammar cannot be an empty string") if backend.startswith("xgrammar"): # xgrammar with no fallback validate_xgrammar_grammar(params) elif backend.startswith("guidance"): # TODO: ideally we would have the LLTokenizer here as Lark syntax # allows <|special_token|> and similar, see # https://github.com/guidance-ai/llguidance/blob/main/docs/syntax.md#special-tokens # Without tokenizer these are disallowed in grammars. if isinstance(self.tokenizer, MistralTokenizer): raise ValueError( "Mistral tokenizer is not supported for the 'guidance' " "structured output backend. Please use ['xgrammar', 'outlines'] " "backends or tokenizer_mode='hf' instead." ) validate_guidance_grammar(params, tokenizer=None) elif backend == "outlines": # outlines backend validate_structured_output_request_outlines(params) elif backend == "lm-format-enforcer": # lm format enforcer backend if isinstance(self.tokenizer, MistralTokenizer): raise ValueError( "Mistral tokenizer is not supported for the 'lm-format-enforcer' " "structured output backend. Please use ['xgrammar', 'outlines'] " "backends or tokenizer_mode='hf' instead." ) validate_structured_output_request_lm_format_enforcer(params) else: # NOTE: backend must be "auto" here, because we have # checked supported_backends above. # In this mode, we set opinionated defaults based on what we think # will satisfy the most use cases without having to worry about # this setting. We include fallback behavior here, but not with any # other setting where a specific backend was specified. try: validate_xgrammar_grammar(params) params.structured_outputs._backend = "xgrammar" except ValueError: # The request either failed validation # or includes some jsonschema feature(s) that # are not supported in xgrammar. # Check if schema has features unsupported by guidance so_params = params.structured_outputs skip_guidance = False if so_params.json: if isinstance(so_params.json, str): import json schema = json.loads(so_params.json) else: schema = so_params.json skip_guidance = has_guidance_unsupported_json_features(schema) if isinstance(self.tokenizer, MistralTokenizer) or skip_guidance: # Fall back to outlines if the tokenizer is Mistral # or if schema contains features unsupported by guidance validate_structured_output_request_outlines(params) params.structured_outputs._backend = "outlines" else: # Fall back to guidance by default. validate_guidance_grammar(params, tokenizer=None) params.structured_outputs._backend = "guidance" # Remember that this backend was set automatically params.structured_outputs._backend_was_auto = True def _maybe_build_mm_uuids( self, request_id: str, prompt: PromptType, ) -> MultiModalUUIDDict | None: """Build per-item multimodal hash overrides when enabled. In this case, multimodal data items are identified by their request id, modality and index rather than their content. Returns a dictionary of modality -> list[str] of overrides, or None if disabled or no multimodal data is present. """ def _extract_mm_data(p: PromptType): if isinstance(p, dict) and "encoder_prompt" in p: enc = p.get("encoder_prompt") if isinstance(enc, dict): return enc.get("multi_modal_data") return None if isinstance(p, dict): return p.get("multi_modal_data") return None mm_data = _extract_mm_data(prompt) if not mm_data: return None mm_uuids: dict[str, list[str | None] | str] = {} for modality, data in mm_data.items(): # Hash each item for embedding inputs. n = ( len(data) if isinstance(data, list) or MultiModalDataParser.is_embeddings(data) else 1 ) mm_uuids[modality] = [f"{request_id}-{modality}-{i}" for i in range(n)] return mm_uuids def _get_mm_identifier( self, mm_hash: str, lora_request: LoRARequest | None, ) -> str: """ When enable_tower_connector_lora is True, multi-modal embeddings vary depending on the LoRA request. Therefore, the mm_hash must be generated based on the LoRA request to prevent incorrect cache hits. """ if ( lora_request is None or self.lora_config is None or not self.lora_config.enable_tower_connector_lora ): return mm_hash return f"{lora_request.lora_name}:{mm_hash}" @staticmethod def assign_request_id(request: EngineCoreRequest): """Replace the externally supplied request ID with an internal request ID that adds 8 random characters in order to ensure uniquness. """ if request.external_req_id is not None: raise ValueError( "The external_req_id field should not be set on EngineCoreRequests" " passed to vLLM; use the request_id field." ) request.external_req_id = request.request_id request.request_id = f"{request.external_req_id}-{random_uuid():.8}" def process_inputs( self, request_id: str, prompt: PromptType, params: SamplingParams | PoolingParams, arrival_time: float | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, ) -> EngineCoreRequest: self._validate_lora(lora_request) self._validate_params(params) data_parallel_size = self.vllm_config.parallel_config.data_parallel_size if data_parallel_rank is not None and not ( 0 <= data_parallel_rank < data_parallel_size ): raise ValueError( f"data_parallel_rank {data_parallel_rank} " f"is out of range [0, {data_parallel_size})." ) if arrival_time is None: arrival_time = time.time() # Optionally generate multimodal hash overrides to avoid hashing # multimodal data items by their content as their identifiers. # NOTE: when users explicitly turn off BOTH prefix caching and input # processing caching, no multimodal features or embeddings will be # reused across requests, therefore identifying multimodal data items # by their content is no longer necessary, and we create uuids with # request id-modality-index as multimodal hash overrides. if ( self.model_config.multimodal_config and self.model_config.multimodal_config.mm_processor_cache_gb == 0 and not self.cache_config.enable_prefix_caching ): mm_uuids = self._maybe_build_mm_uuids(request_id, prompt) else: # Otherwise, use user-provided uuids as multimodal hash overrides # if provided. self._validate_multi_modal_uuids(prompt) if isinstance(prompt, dict): mm_uuids = cast( MultiModalUUIDDict | None, prompt.get("multi_modal_uuids") ) else: mm_uuids = None # Process inputs, which includes: # 1. Tokenize text prompt, with LoRA request if one exists. # 2. For multimodal models with a merged preprocessor, preprocess # multimodal data and expand prompt token ids accordingly. with set_request_id(request_id): processed_inputs: ProcessorInputs = self.input_preprocessor.preprocess( prompt, tokenization_kwargs=tokenization_kwargs, mm_uuids=mm_uuids, ) from vllm.platforms import current_platform current_platform.validate_request( prompt=prompt, params=params, processed_inputs=processed_inputs, ) eos_token_id = self.input_preprocessor.get_eos_token_id() encoder_inputs, decoder_inputs = split_enc_dec_inputs(processed_inputs) self._validate_model_inputs(encoder_inputs, decoder_inputs) # Mypy can be conservative for TypedDict unions; normalize access. if decoder_inputs["type"] == "embeds": prompt_token_ids = None prompt_embeds = decoder_inputs["prompt_embeds"] else: prompt_token_ids = decoder_inputs["prompt_token_ids"] prompt_embeds = None sampling_params = None pooling_params = None if isinstance(params, SamplingParams): # TODO: can we avoid cloning here in multiproc case? sampling_params = params.clone() # If unset max tokens, then generate up to the max_model_len. if sampling_params.max_tokens is None: seq_len = length_from_prompt_token_ids_or_embeds( prompt_token_ids, prompt_embeds ) sampling_params.max_tokens = self.model_config.max_model_len - seq_len sampling_params.update_from_generation_config( self.generation_config_fields, eos_token_id ) if self.tokenizer is not None: sampling_params.update_from_tokenizer(self.tokenizer) else: pooling_params = params.clone() # Multimodal related. mm_features: list[MultiModalFeatureSpec] | None = None if decoder_inputs["type"] == "multimodal": decoder_mm_inputs = decoder_inputs["mm_kwargs"] decoder_mm_positions = decoder_inputs["mm_placeholders"] decoder_mm_hashes = decoder_inputs["mm_hashes"] # Merge and flatten multimodal placeholders, hashes and inputs # from dictionaries to lists, and sort them by each item's position # in the input sequence. sorted_mm_idxs = argsort_mm_positions(decoder_mm_positions) mm_features = [] for modality, idx in sorted_mm_idxs: mm_features.append( MultiModalFeatureSpec( data=decoder_mm_inputs[modality][idx], modality=modality, identifier=self._get_mm_identifier( decoder_mm_hashes[modality][idx], lora_request, ), mm_position=decoder_mm_positions[modality][idx], ) ) return EngineCoreRequest( request_id=request_id, prompt_token_ids=prompt_token_ids, prompt_embeds=prompt_embeds, mm_features=mm_features, sampling_params=sampling_params, pooling_params=pooling_params, eos_token_id=eos_token_id, arrival_time=arrival_time, lora_request=lora_request, cache_salt=decoder_inputs.get("cache_salt"), priority=priority, data_parallel_rank=data_parallel_rank, trace_headers=trace_headers, ) def _validate_model_inputs( self, encoder_inputs: SingletonInputs | None, decoder_inputs: SingletonInputs ): if encoder_inputs is not None: self._validate_model_input(encoder_inputs, prompt_type="encoder") self._validate_model_input(decoder_inputs, prompt_type="decoder") def _validate_model_input( self, prompt_inputs: SingletonInputs, *, prompt_type: Literal["encoder", "decoder"], ): model_config = self.model_config prompt_ids = ( None if prompt_inputs["type"] == "embeds" else prompt_inputs["prompt_token_ids"] ) prompt_embeds = ( prompt_inputs["prompt_embeds"] if prompt_inputs["type"] == "embeds" else None ) prompt_len = length_from_prompt_token_ids_or_embeds(prompt_ids, prompt_embeds) if not prompt_ids: if prompt_type == "encoder" and model_config.is_multimodal_model: pass # Mllama may have empty encoder inputs for text-only data elif prompt_inputs["type"] == "embeds": pass # Prompt embeds should not have prompt_ids. else: raise ValueError(f"The {prompt_type} prompt cannot be empty") tokenizer = self.tokenizer if tokenizer is not None: max_input_id = max(prompt_ids or (), default=0) # NOTE: tokenizer.max_token_id is the tokenizer’s vocab size while # self.model_config.get_vocab_size() is the model’s vocab size. # For Qwen3 models, the language model has extra tokens that do # not exist in the tokenizer, and vice versa for multimodal # placeholder tokens in some multimodal models. # See https://github.com/QwenLM/Qwen3/issues/29#issuecomment-1933720399 # noqa: E501 # and https://github.com/vllm-project/vllm/pull/22471#discussion_r2312251421 # noqa: E501 # Here we take the max of the two to determine if a token id is # truly out-of-vocabulary. if max_input_id > max( tokenizer.max_token_id, self.model_config.get_vocab_size() - 1 ): raise ValueError(f"Token id {max_input_id} is out of vocabulary") max_prompt_len = self.model_config.max_model_len if prompt_len > max_prompt_len: if prompt_type == "encoder" and model_config.is_multimodal_model: mm_registry = self.input_preprocessor.mm_registry mm_processor = mm_registry.create_processor( model_config, self.vllm_config.observability_config, tokenizer=tokenizer, ) assert isinstance(mm_processor, EncDecMultiModalProcessor) if mm_processor.pad_dummy_encoder_prompt: return # Skip encoder length check for Whisper if model_config.is_multimodal_model: suggestion = ( "Make sure that `max_model_len` is no smaller than the " "number of text tokens plus multimodal tokens. For image " "inputs, the number of image tokens depends on the number " "of images, and possibly their aspect ratios as well." ) else: suggestion = ( "Make sure that `max_model_len` is no smaller than the " "number of text tokens." ) raise ValueError( f"The {prompt_type} prompt (length {prompt_len}) is " f"longer than the maximum model length of {max_prompt_len}. " f"{suggestion}" ) # TODO: Find out how many placeholder tokens are there so we can # check that chunked prefill does not truncate them # max_batch_len = self.scheduler_config.max_num_batched_tokens if ( prompt_len == max_prompt_len and prompt_type == "decoder" and not model_config.is_multimodal_model and self.model_config.runner_type != "pooling" ): suggestion = ( "Make sure that `max_model_len` is no smaller than the " "number of text tokens (prompt + requested output tokens)." ) raise ValueError( f"The {prompt_type} prompt (length {prompt_len}) plus the number of " f"requested output tokens (at least 1) is longer than the maximum " f"model length of {max_prompt_len}. {suggestion}" ) def stat_mm_cache(self) -> MultiModalCacheStats | None: return self.input_preprocessor.stat_mm_cache() def clear_mm_cache(self) -> None: self.input_preprocessor.clear_mm_cache()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/engine/output_processor.py
vllm/v1/engine/output_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio from collections import defaultdict from collections.abc import Iterable from dataclasses import dataclass from typing import Any, cast import torch from vllm.lora.request import LoRARequest from vllm.outputs import ( CompletionOutput, PoolingOutput, PoolingRequestOutput, RequestOutput, ) from vllm.sampling_params import RequestOutputKind from vllm.tokenizers import TokenizerLike from vllm.tracing import SpanAttributes, SpanKind, Tracer, extract_trace_context from vllm.utils import length_from_prompt_token_ids_or_embeds from vllm.v1.engine import EngineCoreOutput, EngineCoreRequest, FinishReason from vllm.v1.engine.detokenizer import IncrementalDetokenizer from vllm.v1.engine.logprobs import LogprobsProcessor from vllm.v1.engine.parallel_sampling import ParentRequest from vllm.v1.metrics.stats import ( IterationStats, LoRARequestStates, RequestStateStats, SchedulerStats, ) class RequestOutputCollector: """ Collects streamed RequestOutputs per individual request, for hand-off to the consuming asyncio generate task. When streaming deltas, RequestOutputs are merged if the producer gets ahead of the consumer. """ def __init__(self, output_kind: RequestOutputKind, request_id: str): self.aggregate = output_kind == RequestOutputKind.DELTA self.request_id = request_id self.output: RequestOutput | PoolingRequestOutput | Exception | None = None self.ready = asyncio.Event() def put(self, output: RequestOutput | PoolingRequestOutput | Exception) -> None: """Non-blocking put operation.""" if self.output is None or isinstance(output, Exception): self.output = output self.ready.set() elif isinstance(self.output, RequestOutput) and isinstance( output, RequestOutput ): # This ensures that request outputs with different request indexes # (if n > 1) do not override each other. self.output.add(output, aggregate=self.aggregate) elif isinstance(self.output, PoolingRequestOutput) and isinstance( output, PoolingRequestOutput ): self.output = output async def get(self) -> RequestOutput | PoolingRequestOutput: """Get operation blocks on put event.""" while (output := self.output) is None: await self.ready.wait() self.output = None self.ready.clear() if isinstance(output, Exception): raise output return output def get_nowait(self) -> RequestOutput | PoolingRequestOutput | None: """Non-blocking get operation.""" output = self.output if output is not None: self.output = None self.ready.clear() if isinstance(output, Exception): raise output return output @dataclass class OutputProcessorOutput: request_outputs: list[RequestOutput | PoolingRequestOutput] reqs_to_abort: list[str] class RequestState: def __init__( self, request_id: str, external_req_id: str, parent_req: ParentRequest | None, request_index: int, lora_request: LoRARequest | None, output_kind: RequestOutputKind, prompt: str | None, prompt_token_ids: list[int] | None, prompt_embeds: torch.Tensor | None, logprobs_processor: LogprobsProcessor | None, detokenizer: IncrementalDetokenizer | None, max_tokens_param: int | None, arrival_time: float, queue: RequestOutputCollector | None, log_stats: bool, stream_interval: int, top_p: float | None = None, n: int | None = None, temperature: float | None = None, ): self.request_id = request_id self.external_req_id = external_req_id self.parent_req = parent_req self.request_index = request_index self.lora_request = lora_request self.lora_name = lora_request.lora_name if lora_request is not None else None self.output_kind = output_kind self.prompt = prompt self.prompt_token_ids = prompt_token_ids self.prompt_embeds = prompt_embeds self.prompt_len = length_from_prompt_token_ids_or_embeds( self.prompt_token_ids, self.prompt_embeds ) self.logprobs_processor = logprobs_processor self.detokenizer = detokenizer self.max_tokens_param = max_tokens_param self.top_p = top_p self.n = n self.temperature = temperature self.is_prefilling = True self.queue = queue self.num_cached_tokens = 0 self.stats = RequestStateStats(arrival_time=arrival_time) if log_stats else None # Stream Interval self.stream_interval = stream_interval self.sent_tokens_offset = 0 # Offset of sent tokens @classmethod def from_new_request( cls, tokenizer: TokenizerLike | None, request: EngineCoreRequest, prompt: str | None, parent_req: ParentRequest | None, request_index: int, queue: RequestOutputCollector | None, log_stats: bool, stream_interval: int, ) -> "RequestState": if sampling_params := request.sampling_params: if not sampling_params.detokenize: tokenizer = None output_kind = sampling_params.output_kind logprobs_processor = LogprobsProcessor.from_new_request( tokenizer=tokenizer, request=request, ) detokenizer = IncrementalDetokenizer.from_new_request( tokenizer=tokenizer, request=request, ) max_tokens_param = sampling_params.max_tokens top_p = sampling_params.top_p n = sampling_params.n temperature = sampling_params.temperature else: logprobs_processor = None detokenizer = None max_tokens_param = None top_p = None n = None temperature = None assert request.pooling_params is not None output_kind = request.pooling_params.output_kind assert request.external_req_id is not None return cls( request_id=request.request_id, external_req_id=request.external_req_id, parent_req=parent_req, request_index=request_index, lora_request=request.lora_request, output_kind=output_kind, prompt=prompt, prompt_token_ids=request.prompt_token_ids, prompt_embeds=request.prompt_embeds, logprobs_processor=logprobs_processor, detokenizer=detokenizer, max_tokens_param=max_tokens_param, top_p=top_p, n=n, temperature=temperature, arrival_time=request.arrival_time, queue=queue, log_stats=log_stats, stream_interval=stream_interval, ) def make_request_output( self, new_token_ids: list[int], pooling_output: torch.Tensor | None, finish_reason: FinishReason | None, stop_reason: int | str | None, kv_transfer_params: dict[str, Any] | None = None, ) -> RequestOutput | PoolingRequestOutput | None: finished = finish_reason is not None final_only = self.output_kind == RequestOutputKind.FINAL_ONLY if not finished and final_only: # Only the final output is required in FINAL_ONLY mode. return None if self.stream_interval > 1: assert self.detokenizer is not None # Send output request only when # 1. It has finished, or # 2. It is the first token, or # 3. It has reached the stream interval number of tokens if not ( finished or self.sent_tokens_offset == 0 or len(self.detokenizer.output_token_ids) - self.sent_tokens_offset >= self.stream_interval ): return None if self.output_kind == RequestOutputKind.DELTA: # Send tokens from the offset in DELTA mode, otherwise all # tokens are sent. new_token_ids = self.detokenizer.output_token_ids[ self.sent_tokens_offset : ] self.sent_tokens_offset = len(self.detokenizer.output_token_ids) external_req_id = self.external_req_id if pooling_output is not None: return self._new_request_output( external_req_id, [self._new_pooling_output(pooling_output)], finished, ) output = self._new_completion_output(new_token_ids, finish_reason, stop_reason) if self.parent_req is None: outputs = [output] else: outputs, finished = self.parent_req.get_outputs(self.request_id, output) if not outputs: return None external_req_id = self.parent_req.external_req_id return self._new_request_output( external_req_id, outputs, finished, kv_transfer_params ) def _new_request_output( self, external_req_id: str, outputs: list[CompletionOutput] | list[PoolingOutput], finished: bool, kv_transfer_params: dict[str, Any] | None = None, ) -> RequestOutput | PoolingRequestOutput: first_output = outputs[0] if isinstance(first_output, PoolingOutput): assert len(outputs) == 1 # Prompt embeddings are currently not supported by pooling requests. assert self.prompt_token_ids is not None return PoolingRequestOutput( request_id=external_req_id, outputs=first_output, num_cached_tokens=self.num_cached_tokens, prompt_token_ids=self.prompt_token_ids, finished=finished, ) assert self.logprobs_processor is not None if self.output_kind == RequestOutputKind.DELTA: # Side effect: logprobs processor forgets prompt logprobs prompt_logprobs = self.logprobs_processor.pop_prompt_logprobs() else: prompt_logprobs = self.logprobs_processor.prompt_logprobs # If prompt embeds were used, put placeholder prompt token ids prompt_token_ids = self.prompt_token_ids if prompt_token_ids is None and self.prompt_embeds is not None: prompt_token_ids = [0] * len(self.prompt_embeds) return RequestOutput( request_id=external_req_id, # request_id is what was provided externally lora_request=self.lora_request, prompt=self.prompt, prompt_token_ids=prompt_token_ids, prompt_logprobs=prompt_logprobs, outputs=cast(list[CompletionOutput], outputs), finished=finished, kv_transfer_params=kv_transfer_params, num_cached_tokens=self.num_cached_tokens, metrics=self.stats, ) def _new_completion_output( self, token_ids: list[int], finish_reason: FinishReason | None, stop_reason: int | str | None, ) -> CompletionOutput: assert self.detokenizer is not None assert self.logprobs_processor is not None finished = finish_reason is not None delta = self.output_kind == RequestOutputKind.DELTA # Prepare text and token_ids, based on delta mode text = self.detokenizer.get_next_output_text(finished, delta) if not delta: token_ids = self.detokenizer.output_token_ids # Prepare logprobs, based on delta mode logprobs = self.logprobs_processor.logprobs if delta and logprobs: logprobs = logprobs[-len(token_ids) :] return CompletionOutput( index=self.request_index, text=text, token_ids=token_ids, logprobs=logprobs, cumulative_logprob=self.logprobs_processor.cumulative_logprob, finish_reason=str(finish_reason) if finished else None, stop_reason=stop_reason if finished else None, ) def _new_pooling_output(self, pooling_output: torch.Tensor) -> PoolingOutput: return PoolingOutput(data=pooling_output) class OutputProcessor: """Process EngineCoreOutputs into RequestOutputs.""" def __init__( self, tokenizer: TokenizerLike | None, log_stats: bool, stream_interval: int = 1, ): self.log_stats = log_stats self.tokenizer = tokenizer self.stream_interval = stream_interval self.request_states: dict[str, RequestState] = {} self.parent_requests: dict[str, ParentRequest] = {} self.external_req_ids: defaultdict[str, list[str]] = defaultdict(list) self.lora_states = LoRARequestStates(log_stats) self.tracer: Tracer | None = None self._requests_drained = asyncio.Event() self._requests_drained.set() def get_num_unfinished_requests(self): return len(self.request_states) def has_unfinished_requests(self) -> bool: return len(self.request_states) > 0 async def wait_for_requests_to_drain(self) -> None: if not self.request_states: return await self._requests_drained.wait() def propagate_error(self, e: Exception): """Propagate error to all generate() tasks.""" for _, state in self.request_states.items(): assert state.queue is not None state.queue.put(e) def abort_requests(self, request_ids: Iterable[str], internal: bool) -> list[str]: """Abort a list of requests. The request_ids may be either external request IDs (those passed to InputProcessor.process_inputs()) or internal request IDs (those randomly generated when creating the EngineCoreRequest). If an external request ID is provided, and that external request ID was used for multiple requests, all requests associated with that external request ID are aborted. In the case of parallel sampling, a request ID may be used to identify a parent request, in which case the associated child requests are aborted also. """ internal_req_ids = [] for request_id in request_ids: if internal: # Internal ID - this may be a parent request internal_req_ids.append(request_id) # Remove internal ID from the external->internal mapping if req_state := self.request_states.get(request_id): external_req_id = req_state.external_req_id internal_ids = self.external_req_ids[external_req_id] internal_ids.remove(request_id) if not internal_ids: del self.external_req_ids[external_req_id] elif internal_ids := self.external_req_ids.pop(request_id, []): # External ID - abort all requests in the external->internal mapping internal_req_ids.extend(internal_ids) request_ids_to_abort = [] for request_id in internal_req_ids: req_state = self.request_states.pop(request_id, None) if req_state is not None: self.lora_states.request_finished(request_id, req_state.lora_name) request_ids_to_abort.append(request_id) # Produce final abort output. if req_state.queue is not None and ( request_output := req_state.make_request_output( new_token_ids=[], # Set pooling_output is not None to # correctly enter the abort pooling branch pooling_output=torch.randn(0, device="cpu") if req_state.detokenizer is None else None, finish_reason=FinishReason.ABORT, stop_reason=None, kv_transfer_params=None, ) ): req_state.queue.put(request_output) elif parent := self.parent_requests.get(request_id): # Abort children prior to removing the parent. if parent.child_requests: child_reqs = list(parent.child_requests) child_reqs = self.abort_requests(child_reqs, internal=True) request_ids_to_abort.extend(child_reqs) self.parent_requests.pop(request_id, None) if not self.request_states: self._requests_drained.set() return request_ids_to_abort def add_request( self, request: EngineCoreRequest, prompt: str | None, parent_req: ParentRequest | None = None, request_index: int = 0, queue: RequestOutputCollector | None = None, ) -> None: request_id = request.request_id if request_id in self.request_states: raise ValueError(f"Request id {request_id} already running.") req_state = RequestState.from_new_request( tokenizer=self.tokenizer, request=request, prompt=prompt, parent_req=parent_req, request_index=request_index, queue=queue, log_stats=self.log_stats, stream_interval=self.stream_interval, ) if self._requests_drained.is_set(): self._requests_drained.clear() self.request_states[request_id] = req_state if parent_req: self.parent_requests[parent_req.request_id] = parent_req # Track the external_req_id -> [internal_req_id, ...] mapping self.external_req_ids[req_state.external_req_id].append(request_id) def process_outputs( self, engine_core_outputs: list[EngineCoreOutput], engine_core_timestamp: float | None = None, iteration_stats: IterationStats | None = None, ) -> OutputProcessorOutput: """ Process the EngineCoreOutputs: 1) Compute stats for logging 2) Detokenize 3) Create and handle RequestOutput objects: * If there is a queue (for usage with AsyncLLM), put the RequestOutput objects into the queue for handling by the per-request generate() tasks. * If there is no queue (for usage with LLMEngine), return a list of RequestOutput objects. NOTE FOR DEVELOPERS vLLM V1 minimizes the number of python loops over the full batch to ensure system overheads are minimized. This is the only function that should loop over EngineCoreOutputs. If you need to touch every element of the batch, do it from within the loop below. """ request_outputs: list[RequestOutput | PoolingRequestOutput] = [] reqs_to_abort: list[str] = [] for engine_core_output in engine_core_outputs: req_id = engine_core_output.request_id req_state = self.request_states.get(req_id) if req_state is None: # Ignore output for already-aborted request. continue # 1) Compute stats for this iteration. self._update_stats_from_output( req_state, engine_core_output, engine_core_timestamp, iteration_stats ) new_token_ids = engine_core_output.new_token_ids pooling_output = engine_core_output.pooling_output finish_reason = engine_core_output.finish_reason stop_reason = engine_core_output.stop_reason kv_transfer_params = engine_core_output.kv_transfer_params req_state.num_cached_tokens = engine_core_output.num_cached_tokens req_state.is_prefilling = False if pooling_output is None: assert req_state.detokenizer is not None assert req_state.logprobs_processor is not None # 2) Detokenize the token ids into text and perform stop checks. stop_string = req_state.detokenizer.update( new_token_ids, finish_reason == FinishReason.STOP ) if stop_string: finish_reason = FinishReason.STOP stop_reason = stop_string # 3) Compute sample and prompt logprobs for request, # if required. req_state.logprobs_processor.update_from_output(engine_core_output) # 4) Create and handle RequestOutput objects. if request_output := req_state.make_request_output( new_token_ids, pooling_output, finish_reason, stop_reason, kv_transfer_params, ): if req_state.queue is not None: # AsyncLLM: put into queue for handling by generate(). req_state.queue.put(request_output) else: # LLMEngine: return list of RequestOutputs. request_outputs.append(request_output) # Free completed requests. if finish_reason is not None: self.request_states.pop(req_id) internal_ids = self.external_req_ids[req_state.external_req_id] internal_ids.remove(req_id) if not internal_ids: del self.external_req_ids[req_state.external_req_id] # Remove parent request if applicable. parent_req = req_state.parent_req if parent_req and not parent_req.child_requests: self.parent_requests.pop(parent_req.request_id, None) if not self.request_states: self._requests_drained.set() if not engine_core_output.finished: # If req not finished in EngineCore, but Detokenizer # detected stop string, abort needed in EngineCore. reqs_to_abort.append(req_id) # Track per-request stats self._update_stats_from_finished( req_state, finish_reason, iteration_stats ) if self.tracer: self.do_tracing(engine_core_output, req_state, iteration_stats) return OutputProcessorOutput( request_outputs=request_outputs, reqs_to_abort=reqs_to_abort, ) def update_scheduler_stats(self, scheduler_stats: SchedulerStats | None): self.lora_states.update_scheduler_stats(scheduler_stats) def do_tracing( self, engine_core_output: EngineCoreOutput, req_state: RequestState, iteration_stats: IterationStats | None, ) -> None: assert req_state.stats is not None assert iteration_stats is not None assert self.tracer is not None arrival_time_nano_seconds = int(req_state.stats.arrival_time * 1e9) trace_context = extract_trace_context(engine_core_output.trace_headers) prompt_length = length_from_prompt_token_ids_or_embeds( req_state.prompt_token_ids, req_state.prompt_embeds ) with self.tracer.start_as_current_span( "llm_request", kind=SpanKind.SERVER, context=trace_context, start_time=arrival_time_nano_seconds, ) as span: metrics = req_state.stats e2e_time = iteration_stats.iteration_timestamp - metrics.arrival_time queued_time = metrics.scheduled_ts - metrics.queued_ts prefill_time = metrics.first_token_ts - metrics.scheduled_ts decode_time = metrics.last_token_ts - metrics.first_token_ts inference_time = metrics.last_token_ts - metrics.scheduled_ts span.set_attribute( SpanAttributes.GEN_AI_LATENCY_TIME_TO_FIRST_TOKEN, metrics.first_token_latency, ) span.set_attribute(SpanAttributes.GEN_AI_LATENCY_E2E, e2e_time) span.set_attribute(SpanAttributes.GEN_AI_LATENCY_TIME_IN_QUEUE, queued_time) span.set_attribute(SpanAttributes.GEN_AI_USAGE_PROMPT_TOKENS, prompt_length) span.set_attribute( SpanAttributes.GEN_AI_USAGE_COMPLETION_TOKENS, metrics.num_generation_tokens, ) span.set_attribute( SpanAttributes.GEN_AI_LATENCY_TIME_IN_MODEL_PREFILL, prefill_time ) span.set_attribute( SpanAttributes.GEN_AI_LATENCY_TIME_IN_MODEL_DECODE, decode_time ) span.set_attribute( SpanAttributes.GEN_AI_LATENCY_TIME_IN_MODEL_INFERENCE, inference_time ) # meta span.set_attribute( SpanAttributes.GEN_AI_REQUEST_ID, req_state.external_req_id ) if req_state.top_p: span.set_attribute(SpanAttributes.GEN_AI_REQUEST_TOP_P, req_state.top_p) if req_state.max_tokens_param: span.set_attribute( SpanAttributes.GEN_AI_REQUEST_MAX_TOKENS, req_state.max_tokens_param ) if req_state.temperature: span.set_attribute( SpanAttributes.GEN_AI_REQUEST_TEMPERATURE, req_state.temperature ) if req_state.n: span.set_attribute(SpanAttributes.GEN_AI_REQUEST_N, req_state.n) def _update_stats_from_output( self, req_state: RequestState, engine_core_output: EngineCoreOutput, engine_core_timestamp: float | None, iteration_stats: IterationStats | None, ): if iteration_stats is None: return assert engine_core_timestamp is not None assert req_state.stats is not None iteration_stats.update_from_output( engine_core_output, engine_core_timestamp, req_state.is_prefilling, req_state.prompt_len, req_state.stats, self.lora_states, req_state.lora_name, ) def _update_stats_from_finished( self, req_state: RequestState, finish_reason: FinishReason | None, iteration_stats: IterationStats | None, ): if iteration_stats is None: return assert finish_reason is not None assert req_state.stats is not None iteration_stats.update_from_finished_request( finish_reason=finish_reason, num_prompt_tokens=req_state.prompt_len, max_tokens_param=req_state.max_tokens_param, req_stats=req_state.stats, num_cached_tokens=req_state.num_cached_tokens, ) self.lora_states.request_finished(req_state.request_id, req_state.lora_name) ParentRequest.observe_finished_request( req_state.parent_req, iteration_stats, req_state.stats.num_generation_tokens )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/sampler.py
vllm/v1/sample/sampler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """A layer that samples the next tokens from the model's outputs.""" import torch import torch.nn as nn from vllm.config.model import LogprobsMode from vllm.utils.platform_utils import is_pin_memory_available from vllm.v1.outputs import LogprobsTensors, SamplerOutput from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.ops.bad_words import apply_bad_words from vllm.v1.sample.ops.logprobs import batched_count_greater_than from vllm.v1.sample.ops.penalties import apply_all_penalties from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler _SAMPLING_EPS = 1e-5 class Sampler(nn.Module): """ A layer that samples the next tokens from the model's outputs with the following steps in order: 1. If logprobs are requested: a) If `logprobs_mode` is `raw_logprobs`, compute logprobs as the final logprobs to return. b) If `logprobs_mode` is `raw_logits`, clone the logits as the final logprobs to return. 2. Convert logits to float32. 3. Apply allowed token ids whitelist. 4. Apply bad words exclusion. 5. Apply logit processors which are not argmax-invariant, i.e. that can impact greedy sampling. a) Min tokens processor b) Logit bias processor 6. Apply penalties a) Repetition penalty b) Frequency penalty c) Presence penalty 7. Sample the next tokens. `sample` method performs the following steps: a) If not `all_random`, perform greedy sampling. If `all_greedy`, return the greedily sampled tokens and final logprobs if requested. b) Apply temperature. c) Apply logit processors which are argmax-invariant, by default the min_p processor. d) Apply top_k and/or top_p. e) Sample the next tokens with the probability distribution. f) If `all_random` or temperature >= epsilon (1e-5), return the randomly sampled tokens and final logprobs if requested. Else, return the greedily sampled tokens and logprobs if requested. 8. Gather the logprobs of the top `max_num_logprobs` and sampled token (if requested). Note that if the sampled token is within the top `max_num_logprobs`, the logprob will be eventually merged in `LogprobsProcessor` during output processing. Therefore, the final output may contain either `max_num_logprobs + 1` or `max_num_logprobs` logprobs. 9. Return the final `SamplerOutput`. """ def __init__(self, logprobs_mode: LogprobsMode = "raw_logprobs"): super().__init__() self.topk_topp_sampler = TopKTopPSampler(logprobs_mode) self.pin_memory = is_pin_memory_available() self.logprobs_mode = logprobs_mode def forward( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, predict_bonus_token: bool = False, logprobs_mode_override: LogprobsMode | None = None, ) -> SamplerOutput: logprobs_mode = logprobs_mode_override or self.logprobs_mode # NOTE(woosuk): Use the original logits (before any penalties or # temperature scaling) for the top-k logprobs. # This is different from the V0 sampler, which uses the logits that # is used for sampling (after penalties and temperature scaling). num_logprobs = sampling_metadata.max_num_logprobs if num_logprobs is not None: if logprobs_mode == "raw_logprobs": raw_logprobs = self.compute_logprobs(logits) elif logprobs_mode == "raw_logits": if logits.dtype == torch.float32: raw_logprobs = logits.clone() else: raw_logprobs = logits.to(torch.float32) # Use float32 for the logits. logits = logits.to(torch.float32) logits = self.apply_logits_processors( logits, sampling_metadata, predict_bonus_token ) # Sample the next token. sampled, processed_logprobs = self.sample(logits, sampling_metadata) if processed_logprobs is not None: raw_logprobs = processed_logprobs # Convert sampled token ids to int64 (long) type to ensure compatibility # with subsequent operations that may use these values as indices. # This conversion is necessary because FlashInfer sampling operations # return int32 (while PyTorch argmax and topk return int64). sampled = sampled.long() if num_logprobs is None: logprobs_tensors = None elif num_logprobs == -1: # Return the full unsorted and unranked logprobs. logprobs_tensors = LogprobsTensors( torch.empty(0), raw_logprobs, torch.empty(0) ) else: # Gather the logprobs and ranks of the topk and sampled token. logprobs_tensors = self.gather_logprobs( raw_logprobs, num_logprobs, token_ids=sampled ) # Use int32 to reduce the tensor size. sampled = sampled.to(torch.int32) # These are GPU tensors. sampler_output = SamplerOutput( # The sampled tokens are expanded to 2D tensor with shape # [num_requests, 1], where each row represents one generated # token per request. sampled_token_ids=sampled.unsqueeze(-1), logprobs_tensors=logprobs_tensors, ) return sampler_output @staticmethod def apply_temperature( logits: torch.Tensor, temp: torch.Tensor, all_random: bool, ) -> torch.Tensor: # Use in-place division to avoid creating a new tensor. # Avoid division by zero if there are greedy requests. if not all_random: temp = torch.where(temp < _SAMPLING_EPS, 1.0, temp) return logits.div_(temp.unsqueeze(dim=1)) @staticmethod def greedy_sample(logits: torch.Tensor) -> torch.Tensor: return logits.argmax(dim=-1).view(-1) def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, logprobs_mode_override: LogprobsMode | None = None, ) -> tuple[torch.Tensor, torch.Tensor | None]: """Sample logits based on sampling metadata. The various logits processing functions called in this method may update the logits tensor in-place. """ logprobs_mode = logprobs_mode_override or self.logprobs_mode assert not (sampling_metadata.all_greedy and sampling_metadata.all_random) if sampling_metadata.all_random: greedy_sampled = None else: greedy_sampled = self.greedy_sample(logits) if sampling_metadata.all_greedy: processed_logprobs = None if sampling_metadata.max_num_logprobs is not None: if logprobs_mode == "processed_logits": processed_logprobs = logits elif logprobs_mode == "processed_logprobs": processed_logprobs = self.compute_logprobs(logits) return greedy_sampled, processed_logprobs assert sampling_metadata.temperature is not None # Apply temperature. logits = self.apply_temperature( logits, sampling_metadata.temperature, sampling_metadata.all_random ) # Apply logits processors that only apply to random sampling # (argmax invariant) for processor in sampling_metadata.logitsprocs.argmax_invariant: logits = processor.apply(logits) # Apply top_k and/or top_p. random_sampled, processed_logprobs = self.topk_topp_sampler( logits, sampling_metadata.generators, sampling_metadata.top_k, sampling_metadata.top_p, ) if greedy_sampled is None: return random_sampled, processed_logprobs sampled = torch.where( sampling_metadata.temperature < _SAMPLING_EPS, greedy_sampled, random_sampled, out=greedy_sampled, # Reuse tensor ) return sampled, processed_logprobs @staticmethod def compute_logprobs(logits: torch.Tensor) -> torch.Tensor: return logits.log_softmax(dim=-1, dtype=torch.float32) @staticmethod def gather_logprobs( logprobs: torch.Tensor, num_logprobs: int, token_ids: torch.Tensor, ) -> LogprobsTensors: """ Gather logprobs for topk and sampled/prompt token. Args: logprobs: (num tokens) x (vocab) tensor num_logprobs: minimum number of logprobs to retain per token token_ids: prompt tokens (if prompt logprobs) or sampled tokens (if sampled logprobs); 1D token ID tensor with (num tokens) elements Must be int64. Returns: Top-k int indices tensor, (num tokens) x (num_logprobs + 1) Top-k float logprobs tensor, (num tokens) x (num_logprobs + 1) Sampled token rank tensor, (num tokens) """ assert token_ids.dtype == torch.int64 # Find the topK values. topk_logprobs, topk_indices = torch.topk(logprobs, num_logprobs, dim=-1) # Get with the logprob of the prompt or sampled token. token_ids = token_ids.unsqueeze(-1) token_logprobs = logprobs.gather(-1, token_ids) # Compute the ranks of the actual token. token_ranks = batched_count_greater_than(logprobs, token_logprobs) # Concatenate together with the topk. indices = torch.cat((token_ids, topk_indices), dim=1) logprobs = torch.cat((token_logprobs, topk_logprobs), dim=1) # Use int32 to reduce the tensor size. indices = indices.to(torch.int32) return LogprobsTensors(indices, logprobs, token_ranks) @staticmethod def _combine_outputs_with_spec_tokens( output_token_ids: list[list[int]], spec_token_ids: list[list[int]] | None = None, ) -> list[list[int]]: if spec_token_ids is None: return output_token_ids return [ [*out, *spec] if spec else out for out, spec in zip(output_token_ids, spec_token_ids) ] def apply_logits_processors( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, predict_bonus_token: bool, ) -> torch.Tensor: bad_words_token_ids = sampling_metadata.bad_words_token_ids any_penalties_or_bad_words = ( bool(bad_words_token_ids) or not sampling_metadata.no_penalties ) output_token_ids = sampling_metadata.output_token_ids if predict_bonus_token and any_penalties_or_bad_words: # Combine base outputs with spec tokens when speculative decoding # is enabled. output_token_ids = self._combine_outputs_with_spec_tokens( output_token_ids, sampling_metadata.spec_token_ids, ) # Apply allowed token ids. if sampling_metadata.allowed_token_ids_mask is not None: logits.masked_fill_(sampling_metadata.allowed_token_ids_mask, float("-inf")) # Apply bad words exclusion. if bad_words_token_ids: apply_bad_words(logits, bad_words_token_ids, output_token_ids) # Apply logits processors which can impact greedy sampling. for processor in sampling_metadata.logitsprocs.non_argmax_invariant: logits = processor.apply(logits) # Apply penalties (e.g., freq_penalties). logits = self.apply_penalties(logits, sampling_metadata, output_token_ids) return logits @staticmethod def apply_penalties( logits: torch.Tensor, sampling_metadata: SamplingMetadata, output_token_ids: list[list[int]], ) -> torch.Tensor: if sampling_metadata.no_penalties: return logits assert sampling_metadata.prompt_token_ids is not None return apply_all_penalties( logits, sampling_metadata.prompt_token_ids, sampling_metadata.presence_penalties, sampling_metadata.frequency_penalties, sampling_metadata.repetition_penalties, output_token_ids, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/metadata.py
vllm/v1/sample/metadata.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import torch from vllm.v1.sample.logits_processor import LogitsProcessors @dataclass class SamplingMetadata: temperature: torch.Tensor | None all_greedy: bool all_random: bool top_p: torch.Tensor | None top_k: torch.Tensor | None generators: dict[int, torch.Generator] # None means no logprobs, 0 means sampled token logprobs only max_num_logprobs: int | None no_penalties: bool prompt_token_ids: torch.Tensor | None frequency_penalties: torch.Tensor presence_penalties: torch.Tensor repetition_penalties: torch.Tensor output_token_ids: list[list[int]] # `allowed_token_ids_mask` is a 2D bool tensor of shape (max batch size, # vocab size). allowed_token_ids_mask: torch.Tensor | None # req_index -> bad_words_token_ids bad_words_token_ids: dict[int, list[list[int]]] # Loaded logits processors logitsprocs: LogitsProcessors # Speculative token ids spec_token_ids: list[list[int]] | None = None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/__init__.py
vllm/v1/sample/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/rejection_sampler.py
vllm/v1/sample/rejection_sampler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Sequence from dataclasses import replace import torch import torch.nn as nn from vllm.logger import init_logger from vllm.triton_utils import tl, triton from vllm.v1.outputs import LogprobsTensors, SamplerOutput from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.ops.bad_words import apply_bad_words_with_drafts from vllm.v1.sample.ops.penalties import apply_all_penalties from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p from vllm.v1.sample.sampler import Sampler from vllm.v1.spec_decode.metadata import SpecDecodeMetadata logger = init_logger(__name__) PLACEHOLDER_TOKEN_ID: tl.constexpr = -1 GREEDY_TEMPERATURE: tl.constexpr = 0 # Maximum number of speculative draft tokens allowed per request in a single # step. This value is chosen to be large enough to handle typical use cases. MAX_SPEC_LEN = 128 class RejectionSampler(nn.Module): """ The implementation strictly follows the algorithm described in https://arxiv.org/abs/2211.17192. However, we want to clarify the terminology used in the implementation: accepted tokens: tokens that are accepted based on the relationship between the "raw" draft and target probabilities. recovered tokens: tokens that are sampled based on the adjusted probability distribution, which is derived from both the draft and target probabilities. bonus tokens: If all proposed tokens are accepted, the bonus token is added to the end of the sequence. The bonus token is only sampled from the target probabilities. We pass in the bonus tokens instead of sampling them in the rejection sampler to allow for more flexibility in the sampling process. For example, we can use top_p, top_k sampling for bonus tokens, while spec decode does not support these sampling strategies. output tokens: Tokens are finally generated with the rejection sampler. output tokens = accepted tokens + recovered tokens + bonus tokens """ def __init__(self, sampler: Sampler): super().__init__() self.sampler = sampler logprobs_mode = self.sampler.logprobs_mode self.is_processed_logprobs_mode = logprobs_mode.startswith("processed") self.is_logits_logprobs_mode = logprobs_mode.endswith("logits") def forward( self, metadata: SpecDecodeMetadata, # [num_tokens, vocab_size] draft_probs: torch.Tensor | None, # [num_tokens + batch_size, vocab_size] logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> SamplerOutput: """ Args: metadata: Metadata for spec decoding. draft_probs (Optional[torch.Tensor]): Probability distribution for the draft tokens. Shape is [num_tokens, vocab_size]. Can be None if probabilities are not provided, which is the case for ngram spec decode. logits (torch.Tensor): Target model's logits probability distribution. Shape is [num_tokens + batch_size, vocab_size]. Here, probabilities from different requests are flattened into a single tensor because this is the shape of the output logits. NOTE: `logits` can be updated in place to save memory. sampling_metadata (vllm.v1.sample.metadata.SamplingMetadata): Additional metadata needed for sampling, such as temperature, top-k/top-p parameters, or other relevant information. Returns: SamplerOutput: Contains the final output token IDs and their logprobs if requested. """ assert metadata.max_spec_len <= MAX_SPEC_LEN bonus_logits_indices = metadata.bonus_logits_indices target_logits_indices = metadata.target_logits_indices # When indexing with a tensor (bonus_logits_indices), PyTorch # creates a new tensor with separate storage from the original # logits tensor. This means any in-place operations on bonus_logits # won't affect the original logits tensor. assert logits is not None bonus_logits = logits[bonus_logits_indices] bonus_sampler_output = self.sampler( logits=bonus_logits, sampling_metadata=replace( sampling_metadata, max_num_logprobs=-1, ), predict_bonus_token=True, # Override the logprobs mode to return logits because they are # needed later to compute the accepted token logprobs. logprobs_mode_override="processed_logits" if self.is_processed_logprobs_mode else "raw_logits", ) bonus_token_ids = bonus_sampler_output.sampled_token_ids # Just like `bonus_logits`, `target_logits` is a new tensor with # separate storage from the original `logits` tensor. Therefore, # it is safe to update `target_logits` in place. raw_target_logits = logits[target_logits_indices] # Use float32 for the target_logits. raw_target_logits = raw_target_logits.to(torch.float32) target_logits = raw_target_logits if not self.is_processed_logprobs_mode: # Clone raw_target_logits before applying processors to preserve # the original raw logits for logprobs computation, since # apply_logits_processors modifies the tensor in-place. target_logits = target_logits.clone() target_logits = self.apply_logits_processors( target_logits, sampling_metadata, metadata ) # [num_tokens, vocab_size] # NOTE(woosuk): `target_logits` can be updated in place inside the # `apply_sampling_constraints` function. target_logits = apply_sampling_constraints( target_logits, metadata.cu_num_draft_tokens, sampling_metadata, ) # Compute probability distribution from target logits. target_probs = target_logits.softmax(dim=-1, dtype=torch.float32) output_token_ids = rejection_sample( metadata.draft_token_ids, metadata.num_draft_tokens, metadata.max_spec_len, metadata.cu_num_draft_tokens, draft_probs, target_probs, bonus_token_ids, sampling_metadata, ) logprobs_tensors = None if sampling_metadata.max_num_logprobs is not None: logprobs_tensors = self._get_logprobs_tensors( sampling_metadata.max_num_logprobs, metadata, logits, target_logits if self.is_processed_logprobs_mode else raw_target_logits, bonus_sampler_output.logprobs_tensors.logprobs, output_token_ids, ) return SamplerOutput( sampled_token_ids=output_token_ids, logprobs_tensors=logprobs_tensors, ) def _get_logprobs_tensors( self, max_num_logprobs: int, metadata: SpecDecodeMetadata, logits: torch.Tensor, target_logits: torch.Tensor, bonus_logits: torch.Tensor, sampled_token_ids: torch.Tensor, ) -> LogprobsTensors: cu_num_sampled_tokens = torch.zeros_like(metadata.cu_num_sampled_tokens) cu_num_sampled_tokens[1:] = metadata.cu_num_sampled_tokens[:-1] # Collect target and bonus logits. bonus_logits_indices = metadata.bonus_logits_indices target_logits_indices = metadata.target_logits_indices final_logits = torch.zeros_like(logits, dtype=torch.float32) final_logits[target_logits_indices] = target_logits.to(torch.float32) final_logits[bonus_logits_indices] = bonus_logits.to(torch.float32) # Compute accepted token indices. accepted_mask = sampled_token_ids != PLACEHOLDER_TOKEN_ID num_accepted_tokens = accepted_mask.sum(dim=-1) accepted_logit_indices = accepted_mask.nonzero(as_tuple=True)[1] accepted_logit_indices += cu_num_sampled_tokens.repeat_interleave( num_accepted_tokens ) # Compute logprobs for accepted tokens. accepted_logits = final_logits[accepted_logit_indices] accepted_logprobs = ( accepted_logits if self.is_logits_logprobs_mode else self.sampler.compute_logprobs(accepted_logits) ) accepted_tokens = sampled_token_ids[accepted_mask] return self.sampler.gather_logprobs( accepted_logprobs, max_num_logprobs, accepted_tokens.to(torch.int64), ) @staticmethod def parse_output( output_token_ids: torch.Tensor, vocab_size: int, discard_req_indices: Sequence[int] = (), return_cu_num_tokens: bool = False, ) -> tuple[list[list[int]], list[int] | None]: """Parse the output of the rejection sampler. Args: output_token_ids: The sampled token IDs in shape [batch_size, max_spec_len + 1]. The rejected tokens are replaced with `PLACEHOLDER_TOKEN_ID` by the rejection sampler and will be filtered out in this function. vocab_size: The size of the vocabulary. discard_req_indices: Optional row indices to discard tokens in. return_cu_num_tokens: Whether to also return cumulative token counts. Returns: A list of lists of token IDs. """ output_token_ids_np = output_token_ids.cpu().numpy() # Create mask for valid tokens. valid_mask = (output_token_ids_np != PLACEHOLDER_TOKEN_ID) & ( output_token_ids_np < vocab_size ) cu_num_tokens = None if return_cu_num_tokens: cu_num_tokens = [0] + valid_mask.sum(axis=1).cumsum().tolist() if len(discard_req_indices) > 0: valid_mask[discard_req_indices] = False outputs = [ row[valid_mask[i]].tolist() for i, row in enumerate(output_token_ids_np) ] return outputs, cu_num_tokens def apply_logits_processors( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, metadata: SpecDecodeMetadata, ) -> torch.Tensor: has_penalties = not sampling_metadata.no_penalties any_penalties_or_bad_words = ( sampling_metadata.bad_words_token_ids or has_penalties ) output_token_ids = sampling_metadata.output_token_ids if any_penalties_or_bad_words: output_token_ids = self._combine_outputs_with_spec_tokens( output_token_ids, sampling_metadata.spec_token_ids, ) # Calculate indices of target logits. if sampling_metadata.allowed_token_ids_mask is not None or has_penalties: num_requests = len(sampling_metadata.output_token_ids) num_draft_tokens = torch.tensor(metadata.num_draft_tokens, device="cpu") original_indices = torch.arange(num_requests, device="cpu") repeat_indices_cpu = original_indices.repeat_interleave(num_draft_tokens) repeat_indices = repeat_indices_cpu.to( device=logits.device, non_blocking=True ) logits = self.apply_penalties( logits, sampling_metadata, metadata, repeat_indices, output_token_ids ) # Apply allowed token ids. if sampling_metadata.allowed_token_ids_mask is not None: token_mask = sampling_metadata.allowed_token_ids_mask[repeat_indices] logits.masked_fill_(token_mask, float("-inf")) # Apply bad words exclusion. if bad_words_token_ids := sampling_metadata.bad_words_token_ids: apply_bad_words_with_drafts( logits, bad_words_token_ids, output_token_ids, metadata.num_draft_tokens ) return logits @staticmethod def apply_penalties( logits: torch.Tensor, sampling_metadata: SamplingMetadata, metadata: SpecDecodeMetadata, repeat_indices: torch.Tensor, output_token_ids: list[list[int]], ) -> torch.Tensor: if sampling_metadata.no_penalties: return logits assert sampling_metadata.prompt_token_ids is not None prompt_token_ids = sampling_metadata.prompt_token_ids[repeat_indices] presence_penalties = sampling_metadata.presence_penalties[repeat_indices] frequency_penalties = sampling_metadata.frequency_penalties[repeat_indices] repetition_penalties = sampling_metadata.repetition_penalties[repeat_indices] logits = apply_all_penalties( logits, prompt_token_ids, presence_penalties, frequency_penalties, repetition_penalties, output_token_ids, ) return logits @staticmethod def _combine_outputs_with_spec_tokens( output_token_ids: list[list[int]], spec_token_ids: list[list[int]] | None = None, ) -> list[list[int]]: if spec_token_ids is None: return output_token_ids result = [] for out, spec in zip(output_token_ids, spec_token_ids): if len(spec) == 0: continue result.append(out) for i in range(len(spec) - 1): result.append([*result[-1], spec[i]]) return result def rejection_sample( # [num_tokens] draft_token_ids: torch.Tensor, # [batch_size] num_draft_tokens: list[int], max_spec_len: int, # [batch_size] cu_num_draft_tokens: torch.Tensor, # [num_tokens, vocab_size] draft_probs: torch.Tensor | None, # [num_tokens, vocab_size] target_probs: torch.Tensor, # [batch_size, 1] bonus_token_ids: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> torch.Tensor: assert draft_token_ids.ndim == 1 assert draft_probs is None or draft_probs.ndim == 2 assert cu_num_draft_tokens.ndim == 1 assert target_probs.ndim == 2 batch_size = len(num_draft_tokens) num_tokens = draft_token_ids.shape[0] vocab_size = target_probs.shape[-1] device = target_probs.device assert draft_token_ids.is_contiguous() assert draft_probs is None or draft_probs.is_contiguous() assert target_probs.is_contiguous() assert bonus_token_ids.is_contiguous() assert target_probs.shape == (num_tokens, vocab_size) # Create output buffer. output_token_ids = torch.full( (batch_size, max_spec_len + 1), PLACEHOLDER_TOKEN_ID, dtype=torch.int32, # Consistent with SamplerOutput.sampled_token_ids. device=device, ) if sampling_metadata.all_greedy: is_greedy = None else: is_greedy = sampling_metadata.temperature == GREEDY_TEMPERATURE if not sampling_metadata.all_random: # Rejection sampling for greedy sampling requests. target_argmax = target_probs.argmax(dim=-1) rejection_greedy_sample_kernel[(batch_size,)]( output_token_ids, cu_num_draft_tokens, draft_token_ids, target_argmax, bonus_token_ids, is_greedy, max_spec_len, ) if sampling_metadata.all_greedy: return output_token_ids # Generate uniform probabilities for rejection sampling. # [num_tokens] uniform_probs = generate_uniform_probs( num_tokens, num_draft_tokens, sampling_metadata.generators, device, ) # Sample recovered tokens for each position. # [num_tokens] recovered_token_ids = sample_recovered_tokens( max_spec_len, num_draft_tokens, cu_num_draft_tokens, draft_token_ids, draft_probs, target_probs, sampling_metadata, device, ) # Rejection sampling for random sampling requests. rejection_random_sample_kernel[(batch_size,)]( output_token_ids, cu_num_draft_tokens, draft_token_ids, draft_probs, target_probs, bonus_token_ids, recovered_token_ids, uniform_probs, is_greedy, max_spec_len, vocab_size, NO_DRAFT_PROBS=draft_probs is None, ) return output_token_ids def apply_sampling_constraints( logits: torch.Tensor, # [num_tokens, vocab_size] cu_num_draft_tokens: torch.Tensor, # [batch_size] sampling_metadata: SamplingMetadata, ) -> torch.Tensor: """Process logits based on sampling metadata. This function applies temperature scaling to the logits, as well as top-k and top-p. For greedy decoding, it returns the original logits. Args: logits: Input logits tensor to be processed. cu_num_draft_tokens: Cumulative number of draft tokens. sampling_metadata: Metadata containing sampling parameters such as temperature and whether greedy sampling is used. Returns: torch.Tensor: Processed logits if non-greedy sampling is used, otherwise returns the original logits. """ assert logits.ndim == 2 assert cu_num_draft_tokens.ndim == 1 if sampling_metadata.all_greedy: return logits num_tokens = logits.shape[0] temperature = expand_batch_to_tokens( sampling_metadata.temperature, cu_num_draft_tokens, num_tokens, replace_from=GREEDY_TEMPERATURE, replace_to=1, ) # NOTE(woosuk): Update `logits` in place to avoid allocating a new tensor. logits.div_(temperature.unsqueeze(-1)) # Get expanded top_k and top_p tensors. top_k = None if sampling_metadata.top_k is not None: top_k = expand_batch_to_tokens( sampling_metadata.top_k, cu_num_draft_tokens, num_tokens, ) top_p = None if sampling_metadata.top_p is not None: top_p = expand_batch_to_tokens( sampling_metadata.top_p, cu_num_draft_tokens, num_tokens, ) # NOTE(woosuk): `apply_top_k_top_p` uses sorting to calculate the mask, # which is slow for large vocab sizes. This may cause performance issues. return apply_top_k_top_p(logits, top_k, top_p) def expand_batch_to_tokens( x: torch.Tensor, # [batch_size] cu_num_tokens: torch.Tensor, # [batch_size] num_tokens: int, replace_from: int = 0, replace_to: int = 0, ) -> torch.Tensor: """Expand [batch_size] tensor to [num_tokens] tensor based on the number of tokens per batch in cu_num_tokens. For example, if x = [a, b, c] and cu_num_tokens = [2, 5, 6], then num_tokens = 6, and expanded_x = [a, a, b, b, b, c]. Args: x: [batch_size] tensor to expand. cu_num_tokens: [batch_size] tensor containing the cumulative number of tokens per batch. Each element represents the total number of tokens up to and including that batch. num_tokens: Total number of tokens. replace_from: int = 0 Value to be replaced if it is found in x. replace_to: int = 0 Value to replace with when replace_from is found. Returns: expanded_x: [num_tokens] tensor. """ batch_size = x.shape[0] assert cu_num_tokens.shape[0] == batch_size expanded_x = x.new_empty(num_tokens) expand_kernel[(batch_size,)]( expanded_x, x, cu_num_tokens, replace_from, replace_to, MAX_NUM_TOKENS=MAX_SPEC_LEN, # To avoid recompilation. ) return expanded_x def generate_uniform_probs( num_tokens: int, num_draft_tokens: list[int], generators: dict[int, torch.Generator], device: torch.device, ) -> torch.Tensor: """ Generates a batch of uniform random samples, with optional seeding if available. This method creates a tensor of shape `(num_tokens, )` filled with uniform random values in the range [0, 1). If `generators` is provided, the requests with their own seeds will use the provided `torch.Generator` for reproducibility. The samples for the other requests will be generated without a seed. Args: num_tokens: int Total number of tokens. num_draft_tokens: List[List[int]] Number of draft tokens per request. generators: Optional[Dict[int, torch.Generator]] A dictionary mapping indices in the batch to `torch.Generator` objects. device: torch.device The device on which to allocate the tensor. Returns: uniform_rand: torch.Tensor A tensor of shape `(num_tokens, )` containing uniform random values in the range [0, 1). """ # NOTE(woosuk): We deliberately use float64 instead of float32 here # because when using float32, there's a non-negligible chance that # uniform_prob is sampled to be exact 0.0 as reported in # https://github.com/pytorch/pytorch/issues/16706. Using float64 # mitigates the issue. uniform_probs = torch.rand( (num_tokens,), dtype=torch.float64, device=device, ) start_idx = 0 for req_idx, n in enumerate(num_draft_tokens): # Do not generate random numbers for requests with no draft tokens. # This can be important for reproducibility. if n == 0: continue end_idx = start_idx + n generator = generators.get(req_idx) if generator is not None: uniform_probs[start_idx:end_idx].uniform_(generator=generator) start_idx = end_idx return uniform_probs def sample_recovered_tokens( max_spec_len: int, num_draft_tokens: list[int], # [batch_size] cu_num_draft_tokens: torch.Tensor, # [num_tokens] draft_token_ids: torch.Tensor, # [num_tokens, vocab_size] draft_probs: torch.Tensor | None, # [num_tokens, vocab_size] target_probs: torch.Tensor, sampling_metadata: SamplingMetadata, device: torch.device, ) -> torch.Tensor: # NOTE(woosuk): Create only one distribution for each request. batch_size = len(num_draft_tokens) vocab_size = target_probs.shape[-1] q = torch.empty( (batch_size, vocab_size), dtype=torch.float32, device=device, ) q.exponential_() for i, generator in sampling_metadata.generators.items(): # Do not generate random numbers for requests with no draft tokens. # This can be important for reproducibility. if num_draft_tokens[i] > 0: q[i].exponential_(generator=generator) recovered_token_ids = torch.empty_like(draft_token_ids) sample_recovered_tokens_kernel[(batch_size, max_spec_len)]( recovered_token_ids, cu_num_draft_tokens, draft_token_ids, draft_probs, target_probs, q, vocab_size, triton.next_power_of_2(vocab_size), NO_DRAFT_PROBS=draft_probs is None, ) return recovered_token_ids # NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation. @triton.jit(do_not_specialize=["max_spec_len"]) def rejection_greedy_sample_kernel( output_token_ids_ptr, # [batch_size, max_spec_len + 1] cu_num_draft_tokens_ptr, # [batch_size] draft_token_ids_ptr, # [num_tokens] target_argmax_ptr, # [num_tokens] bonus_token_ids_ptr, # [batch_size] is_greedy_ptr, # [batch_size] or None max_spec_len, ): req_idx = tl.program_id(0) # FIXME(woosuk): Because is_greedy_ptr is not None at profiling run, # re-compilation may happen during runtime when is_greedy_ptr is None. is_greedy = True if is_greedy_ptr is None else tl.load(is_greedy_ptr + req_idx) if not is_greedy: # Early exit for non-greedy sampling requests. return start_idx = 0 if req_idx == 0 else tl.load(cu_num_draft_tokens_ptr + req_idx - 1) end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx) num_draft_tokens = end_idx - start_idx rejected = False for pos in range(num_draft_tokens): if not rejected: draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos) target_argmax_id = tl.load(target_argmax_ptr + start_idx + pos) tl.store( output_token_ids_ptr + req_idx * (max_spec_len + 1) + pos, target_argmax_id, ) if draft_token_id != target_argmax_id: # Reject. rejected = True if not rejected: # If all tokens are accepted, append the bonus token. bonus_token_id = tl.load(bonus_token_ids_ptr + req_idx) tl.store( output_token_ids_ptr + req_idx * (max_spec_len + 1) + num_draft_tokens, bonus_token_id, ) # NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation. @triton.jit(do_not_specialize=["max_spec_len"]) def rejection_random_sample_kernel( output_token_ids_ptr, # [batch_size, max_spec_len + 1] cu_num_draft_tokens_ptr, # [batch_size] draft_token_ids_ptr, # [num_tokens] draft_probs_ptr, # [num_tokens, vocab_size] or None target_probs_ptr, # [num_tokens, vocab_size] bonus_token_ids_ptr, # [batch_size] recovered_token_ids_ptr, # [num_tokens] uniform_probs_ptr, # [num_tokens] is_greedy_ptr, # [batch_size] max_spec_len, vocab_size, NO_DRAFT_PROBS: tl.constexpr, ): req_idx = tl.program_id(0) is_greedy = tl.load(is_greedy_ptr + req_idx) if is_greedy: # Early exit for greedy sampling requests. return start_idx = 0 if req_idx == 0 else tl.load(cu_num_draft_tokens_ptr + req_idx - 1) end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx) num_draft_tokens = end_idx - start_idx rejected = False for pos in range(num_draft_tokens): if not rejected: draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos) if NO_DRAFT_PROBS: draft_prob = 1 else: draft_prob = tl.load( draft_probs_ptr + (start_idx + pos) * vocab_size + draft_token_id ) target_prob = tl.load( target_probs_ptr + (start_idx + pos) * vocab_size + draft_token_id ) uniform_prob = tl.load(uniform_probs_ptr + start_idx + pos) # NOTE(woosuk): While the draft probability should never be 0, # we check it to avoid NaNs. If it happens to be 0, we reject. if draft_prob > 0 and target_prob / draft_prob >= uniform_prob: # Accept. token_id = draft_token_id else: # Reject. Use recovered token. rejected = True token_id = tl.load(recovered_token_ids_ptr + start_idx + pos) tl.store( output_token_ids_ptr + req_idx * (max_spec_len + 1) + pos, token_id ) if not rejected: # If all tokens are accepted, append the bonus token. bonus_token_id = tl.load(bonus_token_ids_ptr + req_idx) tl.store( output_token_ids_ptr + req_idx * (max_spec_len + 1) + num_draft_tokens, bonus_token_id, ) # NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation. @triton.jit(do_not_specialize=["replace_from", "replace_to"]) def expand_kernel( output_ptr, # [num_tokens] input_ptr, # [batch_size] cu_num_tokens_ptr, # [batch_size] replace_from, replace_to, MAX_NUM_TOKENS: tl.constexpr, ): req_idx = tl.program_id(0) if req_idx == 0: # noqa: SIM108 start_idx = 0 else: start_idx = tl.load(cu_num_tokens_ptr + req_idx - 1) end_idx = tl.load(cu_num_tokens_ptr + req_idx) num_tokens = end_idx - start_idx src_val = tl.load(input_ptr + req_idx) src_val = tl.where(src_val == replace_from, replace_to, src_val) offset = tl.arange(0, MAX_NUM_TOKENS) tl.store(output_ptr + start_idx + offset, src_val, mask=offset < num_tokens) @triton.jit def sample_recovered_tokens_kernel( output_token_ids_ptr, # [num_tokens] cu_num_draft_tokens_ptr, # [batch_size] draft_token_ids_ptr, # [num_tokens] draft_probs_ptr, # [num_tokens, vocab_size] or None target_probs_ptr, # [num_tokens, vocab_size] q_ptr, # [batch_size, vocab_size] vocab_size, PADDED_VOCAB_SIZE: tl.constexpr, NO_DRAFT_PROBS: tl.constexpr, ): req_idx = tl.program_id(0) start_idx = 0 if req_idx == 0 else tl.load(cu_num_draft_tokens_ptr + req_idx - 1) end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx) num_draft_tokens = end_idx - start_idx # Early exit for out-of-range positions. pos = tl.program_id(1) if pos >= num_draft_tokens: return vocab_offset = tl.arange(0, PADDED_VOCAB_SIZE) if NO_DRAFT_PROBS: draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos) prob = tl.load( target_probs_ptr + (start_idx + pos) * vocab_size + vocab_offset, mask=((vocab_offset < vocab_size) & (vocab_offset != draft_token_id)), other=0, ) else: draft_prob = tl.load( draft_probs_ptr + (start_idx + pos) * vocab_size + vocab_offset, mask=vocab_offset < vocab_size, other=0, ) target_prob = tl.load( target_probs_ptr + (start_idx + pos) * vocab_size + vocab_offset, mask=vocab_offset < vocab_size, other=0, ) prob = tl.maximum(target_prob - draft_prob, 0) # NOTE(woosuk): We don't need `prob = prob / tl.sum(prob)` here because # `tl.argmax` will select the maximum value. q = tl.load( q_ptr + req_idx * vocab_size + vocab_offset, mask=vocab_offset < vocab_size, other=float("-inf"), ) recovered_id = tl.argmax(prob / q, axis=-1) tl.store(output_token_ids_ptr + start_idx + pos, recovered_id)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/tpu/sampler.py
vllm/v1/sample/tpu/sampler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Sampler layer implementing TPU supported operations.""" import torch import torch.nn as nn from vllm.v1.outputs import LogprobsTensors, SamplerOutput from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata _SAMPLING_EPS = 1e-5 class Sampler(nn.Module): def __init__(self): # TODO(houseroad): Add support for logprobs_mode. super().__init__() def forward( self, logits: torch.Tensor, sampling_metadata: TPUSupportedSamplingMetadata, ) -> SamplerOutput: # Use float32 for the logits. logits = logits.to(torch.float32) # Sample the next token. sampled = self.sample(logits, sampling_metadata) # These are TPU tensors. sampler_output = SamplerOutput( # The sampled tokens are expanded to 2D tensor with shape # [num_requests, 1], where each row represents one generated # token per request. sampled_token_ids=sampled.unsqueeze(-1), logprobs_tensors=None, ) return sampler_output def apply_temperature( self, logits: torch.Tensor, temp: torch.Tensor, all_random: bool = False, ) -> torch.Tensor: # Avoid division by zero for greedy sampling (temperature ~ 0.0). if not all_random: temp = torch.where(temp < _SAMPLING_EPS, 1.0, temp) return logits.div_(temp.unsqueeze(dim=1)) def greedy_sample(self, logits: torch.Tensor) -> torch.Tensor: return logits.argmax(dim=-1).view(-1) def sample( self, logits: torch.Tensor, sampling_metadata: TPUSupportedSamplingMetadata, ) -> torch.Tensor: greedy_sampled = self.greedy_sample(logits) assert sampling_metadata.temperature is not None # Apply temperature. logits = self.apply_temperature( logits, sampling_metadata.temperature, sampling_metadata.all_random ) # Apply min_p. if sampling_metadata.min_p is not None: logits = self.apply_min_p(logits, sampling_metadata.min_p) # Apply top_k and/or top_p. logits = apply_top_k_top_p( logits, sampling_metadata.top_k, sampling_metadata.top_p, ) # Random sample. probs = logits.softmax(dim=-1, dtype=torch.float32) random_sampled = self.random_sample(probs, sampling_metadata.generators) sampled = torch.where( sampling_metadata.temperature < _SAMPLING_EPS, greedy_sampled, random_sampled, ) return sampled def compute_logprobs(self, logits: torch.Tensor) -> torch.Tensor: return logits.log_softmax(dim=-1, dtype=torch.float32) def gather_logprobs( self, logprobs: torch.Tensor, num_logprobs: int, token_ids: torch.Tensor, ) -> LogprobsTensors: """ Gather logprobs for topk and sampled/prompt token. Args: logprobs: (num tokens) x (vocab) tensor num_logprobs: minimum number of logprobs to retain per token token_ids: prompt tokens (if prompt logprobs) or sampled tokens (if sampled logprobs); 1D token ID tensor with (num tokens) elements Returns: Top-k int indices tensor, (num tokens) x (num_logprobs + 1) Top-k float logprobs tensor, (num tokens) x (num_logprobs + 1) Sampled token rank tensor, (num tokens) """ # Find the topK values. topk_logprobs, topk_indices = torch.topk(logprobs, num_logprobs, dim=-1) # Get with the logprob of the prompt or sampled token. token_ids = token_ids.unsqueeze(-1) token_logprobs = logprobs.gather(-1, token_ids) # Compute the ranks of the actual token. token_ranks = (logprobs >= token_logprobs).sum(-1) # Concatenate together with the topk. indices = torch.cat((token_ids, topk_indices), dim=1) logprobs = torch.cat((token_logprobs, topk_logprobs), dim=1) # Use int32 to reduce the tensor size. indices = indices.to(torch.int32) return LogprobsTensors(indices, logprobs, token_ranks) def apply_min_p( self, logits: torch.Tensor, min_p: torch.Tensor, ) -> torch.Tensor: """ Filters logits using adaptive probability thresholding. """ # Convert logits to probability distribution probability_values = torch.nn.functional.softmax(logits, dim=-1) # Calculate maximum probabilities per sequence max_probabilities = torch.amax(probability_values, dim=-1, keepdim=True) # Reshape min_p for broadcasting adjusted_min_p = min_p.unsqueeze(1) * max_probabilities # Identify valid tokens using threshold comparison valid_token_mask = probability_values >= adjusted_min_p # Apply mask using boolean indexing (xla friendly) logits.masked_fill_(~valid_token_mask, -float("inf")) return logits def random_sample( self, probs: torch.Tensor, generators: dict[int, torch.Generator], ) -> torch.Tensor: q = torch.empty_like(probs) # NOTE(woosuk): To batch-process the requests without their own seeds, # which is the common case, we first assume that every request does # not have its own seed. Then, we overwrite the values for the requests # that have their own seeds. q.exponential_() if generators: for i, generator in generators.items(): q[i].exponential_(generator=generator) return probs.div_(q).argmax(dim=-1).view(-1) def apply_top_k_top_p( logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, ) -> torch.Tensor: """ Apply top-k and top-p optimized for TPU. This algorithm avoids using torch.scatter which is extremely slow on TPU. This is achieved by finding a "cut-off" element in the original logit, and after thresholding the logit using this cut-off, the remaining elements shall constitute the top-p set. Note: in the case of tie (i.e. multiple cut-off elements present in the logit), all tie elements are included in the top-p set. In other words, this function does not break ties. Instead, these tie tokens have equal chance of being chosen during final sampling, so we can consider the tie being broken then. """ probs = logits.softmax(dim=-1) probs_sort, _ = probs.sort(dim=-1, descending=False) if k is not None: top_k_count = probs_sort.size(1) - k.to(torch.long) # shape: (batch, ) top_k_count = top_k_count.unsqueeze(dim=1) top_k_cutoff = probs_sort.gather(-1, top_k_count) # Make sure the no top-k rows are no-op. no_top_k_mask = (k == logits.shape[1]).unsqueeze(dim=1) top_k_cutoff.masked_fill_(no_top_k_mask, -float("inf")) elements_to_discard = probs < top_k_cutoff logits.masked_fill_(elements_to_discard, -float("inf")) if p is not None: cumprob = torch.cumsum(probs_sort, dim=-1) top_p_mask = cumprob <= 1 - p.unsqueeze(dim=1) top_p_mask[:, -1] = False # at least one top_p_count = top_p_mask.sum(dim=-1).unsqueeze(1) top_p_cutoff = probs_sort.gather(-1, top_p_count) elements_to_discard = probs < top_p_cutoff logits.masked_fill_(elements_to_discard, -float("inf")) return logits
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/tpu/metadata.py
vllm/v1/sample/tpu/metadata.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass, field import torch from vllm.v1.worker.tpu_input_batch import InputBatch DEFAULT_SAMPLING_PARAMS = dict( temperature=-1.0, min_p=0.0, # strictly disabled for now top_k=0, top_p=1.0, # frequency_penalties=0.0, # presence_penalties=0.0, # repetition_penalties=0.0, ) @dataclass class TPUSupportedSamplingMetadata: # This class exposes a more xla-friendly interface than SamplingMetadata # on TPU, in particular all arguments should be traceable and no optionals # are allowed, to avoid graph recompilation on Nones. temperature: torch.Tensor = None min_p: torch.Tensor = None top_k: torch.Tensor = None top_p: torch.Tensor = None all_greedy: bool = True all_random: bool = False # Whether logprobs are to be gathered in this batch of request. To balance # out compile time and runtime, a fixed `max_number_logprobs` value is used # when gathering logprobs, regardless of the values specified in the batch. logprobs: bool = False # TODO No penalties for now no_penalties: bool = True prompt_token_ids = None frequency_penalties = None presence_penalties = None repetition_penalties = None # should use tensor output_token_ids: list[list[int]] = field(default_factory=lambda: list()) min_tokens = None # impl is not vectorized logit_bias: list[dict[int, float] | None] = field(default_factory=lambda: list()) allowed_token_ids_mask = None bad_words_token_ids = None # Generator not supported by xla _generators: dict[int, torch.Generator] = field(default_factory=lambda: dict()) @property def generators(self) -> dict[int, torch.Generator]: # Generator not supported by torch/xla. This field must be immutable. return self._generators @classmethod def from_input_batch( cls, input_batch: InputBatch, padded_num_reqs: int, xla_device: torch.device, generate_params_if_all_greedy: bool = False, ) -> "TPUSupportedSamplingMetadata": """ Copy sampling tensors slices from `input_batch` to on device tensors. `InputBatch._make_sampling_metadata` causes recompilation on XLA as it slices dynamic shapes on device tensors. This impl moves the dynamic ops to CPU and produces tensors of fixed `padded_num_reqs` size. Args: input_batch: The input batch containing sampling parameters. padded_num_reqs: The padded number of requests. xla_device: The XLA device. generate_params_if_all_greedy: If True, generate sampling parameters even if all requests are greedy. this is useful for cases where we want to pre-compile a graph with sampling parameters, even if they are not strictly needed for greedy decoding. """ needs_logprobs = ( input_batch.max_num_logprobs > 0 if input_batch.max_num_logprobs else False ) # Early return to avoid unnecessary cpu to tpu copy if input_batch.all_greedy is True and generate_params_if_all_greedy is False: return cls(all_greedy=True, logprobs=needs_logprobs) num_reqs = input_batch.num_reqs def fill_slice(cpu_tensor: torch.Tensor, fill_val) -> torch.Tensor: # Pad value is the default one. cpu_tensor[num_reqs:padded_num_reqs] = fill_val fill_slice( input_batch.temperature_cpu_tensor, DEFAULT_SAMPLING_PARAMS["temperature"] ) fill_slice(input_batch.min_p_cpu_tensor, DEFAULT_SAMPLING_PARAMS["min_p"]) fill_slice(input_batch.top_k_cpu_tensor, DEFAULT_SAMPLING_PARAMS["top_k"]) fill_slice(input_batch.top_p_cpu_tensor, DEFAULT_SAMPLING_PARAMS["top_p"]) # Slice persistent device tensors to a fixed pre-compiled padded shape. return cls( temperature=input_batch.temperature_cpu_tensor[:padded_num_reqs].to( xla_device ), all_greedy=input_batch.all_greedy, all_random=input_batch.all_random, # TODO enable more and avoid returning None values top_p=input_batch.top_p_cpu_tensor[:padded_num_reqs].to(xla_device), top_k=input_batch.top_k_cpu_tensor[:padded_num_reqs].to(xla_device), min_p=input_batch.min_p_cpu_tensor[:padded_num_reqs].to(xla_device), logprobs=needs_logprobs, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/tpu/__init__.py
vllm/v1/sample/tpu/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/logits_processor/builtin.py
vllm/v1/sample/logits_processor/builtin.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, TypeVar import torch from vllm import SamplingParams from vllm.v1.sample.logits_processor.interface import ( BatchUpdate, LogitsProcessor, MoveDirectionality, ) if TYPE_CHECKING: from vllm.config import VllmConfig T = TypeVar("T") class MinPLogitsProcessor(LogitsProcessor): def __init__( self, vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool ): max_num_reqs = vllm_config.scheduler_config.max_num_seqs self.min_p_count: int = 0 self.min_p_cpu_tensor = torch.zeros( (max_num_reqs,), dtype=torch.float32, device="cpu", pin_memory=is_pin_memory ) self.min_p_cpu = self.min_p_cpu_tensor.numpy() self.use_double_tensor = torch.device(device).type != "cpu" if self.use_double_tensor: # Pre-allocated device tensor self.min_p_device: torch.Tensor = torch.empty( (max_num_reqs,), dtype=torch.float32, device=device ) else: self.min_p_device = self.min_p_cpu_tensor # Current slice of the device tensor self.min_p: torch.Tensor = self.min_p_device[:0] def is_argmax_invariant(self) -> bool: """Min-p never impacts greedy sampling""" return True def get_min_p_by_index(self, index: int) -> float: return float(self.min_p_cpu[index]) def update_state(self, batch_update: BatchUpdate | None): if not batch_update: return needs_update = False # Process added requests. for index, params, _, _ in batch_update.added: min_p = params.min_p min_p_before = self.min_p_cpu[index] if min_p_before != min_p: needs_update = True self.min_p_cpu[index] = min_p if min_p and not min_p_before: self.min_p_count += 1 elif not min_p and min_p_before: self.min_p_count -= 1 if self.min_p_count: # Process removed requests. if batch_update.removed: needs_update = True for index in batch_update.removed: if self.min_p_cpu[index]: self.min_p_cpu[index] = 0 self.min_p_count -= 1 # Process moved requests, unidirectional (a->b) and swap (a<->b). for adx, bdx, direct in batch_update.moved: min_p_a, min_p_b = self.min_p_cpu[adx], self.min_p_cpu[bdx] if min_p_a != min_p_b: needs_update = True self.min_p_cpu[bdx] = min_p_a if direct == MoveDirectionality.SWAP: self.min_p_cpu[adx] = min_p_b if direct == MoveDirectionality.UNIDIRECTIONAL: if min_p_a: self.min_p_cpu[adx] = 0 if min_p_b: self.min_p_count -= 1 # Update tensors if needed. size = batch_update.batch_size if self.min_p_count and (needs_update or self.min_p.shape[0] != size): self.min_p = self.min_p_device[:size] if self.use_double_tensor: self.min_p.copy_(self.min_p_cpu_tensor[:size], non_blocking=True) self.min_p.unsqueeze_(1) def apply(self, logits: torch.Tensor) -> torch.Tensor: if not self.min_p_count: return logits # Convert logits to probability distribution probability_values = torch.nn.functional.softmax(logits, dim=-1) # Calculate maximum probabilities per sequence max_probabilities = torch.amax(probability_values, dim=-1, keepdim=True) # Adjust min_p adjusted_min_p = max_probabilities.mul_(self.min_p) # Identify valid tokens using threshold comparison invalid_token_mask = probability_values < adjusted_min_p # Apply mask using boolean indexing logits.masked_fill_(invalid_token_mask, -float("inf")) return logits class LogitBiasLogitsProcessor(LogitsProcessor): def __init__(self, _, device: torch.device, is_pin_memory: bool): self.device = device self.pin_memory = is_pin_memory self.biases: dict[int, dict[int, float]] = {} self.bias_tensor: torch.Tensor = torch.tensor(()) self.logits_slice = ( self._device_tensor([], torch.int32), self._device_tensor([], torch.int32), ) def is_argmax_invariant(self) -> bool: """Logit bias can rebalance token probabilities and change the outcome of argmax in greedy sampling.""" return False def update_state(self, batch_update: BatchUpdate | None): needs_update = process_dict_updates( self.biases, batch_update, lambda params, _, __: params.logit_bias or None ) # Update tensors if needed. if needs_update: reqs: list[int] = [] tok_ids: list[int] = [] biases: list[float] = [] for req, lb in self.biases.items(): reqs.extend([req] * len(lb)) tok_ids.extend(lb.keys()) biases.extend(lb.values()) self.bias_tensor = self._device_tensor(biases, torch.float32) self.logits_slice = ( self._device_tensor(reqs, torch.int32), self._device_tensor(tok_ids, torch.int32), ) def _device_tensor(self, data: list, dtype: torch.dtype) -> torch.Tensor: return torch.tensor( data, device="cpu", dtype=dtype, pin_memory=self.pin_memory ).to(device=self.device, non_blocking=True) def apply(self, logits: torch.Tensor) -> torch.Tensor: if self.biases: logits[self.logits_slice] += self.bias_tensor return logits class MinTokensLogitsProcessor(LogitsProcessor): def __init__( self, vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool ): # index -> (min_toks, output_token_ids, stop_token_ids) self.device = device self.pin_memory = is_pin_memory self.min_toks: dict[int, tuple[int, Sequence[int], set[int]]] = {} # (req_idx_tensor,eos_tok_id_tensor) self.logits_slice: tuple[torch.Tensor, torch.Tensor] = ( self._device_tensor([], torch.int32), self._device_tensor([], torch.int32), ) self.neg_inf_tensor = torch.tensor( -float("inf"), dtype=torch.float32, device=self.device ) def is_argmax_invariant(self) -> bool: """By censoring stop tokens, min-tokens can change the outcome of the argmax operation in greedy sampling.""" return False @staticmethod def add_request( params: SamplingParams, _: list[int] | None, output_tok_ids: list[int] ) -> tuple[int, Sequence[int], set[int]] | None: min_tokens = params.min_tokens if not min_tokens or len(output_tok_ids) >= min_tokens: return None return min_tokens, output_tok_ids, params.all_stop_token_ids def update_state(self, batch_update: BatchUpdate | None): needs_update = process_dict_updates( self.min_toks, batch_update, self.add_request ) if self.min_toks: # Check for any requests that have attained their min tokens. to_remove = tuple( index for index, (min_toks, out_tok_ids, _) in self.min_toks.items() if len(out_tok_ids) >= min_toks ) if to_remove: needs_update = True for index in to_remove: del self.min_toks[index] # Update tensors if needed. if needs_update: reqs: list[int] = [] tok_ids: list[int] = [] for req, (_, _, stop_tok_ids) in self.min_toks.items(): reqs.extend([req] * len(stop_tok_ids)) tok_ids.extend(stop_tok_ids) self.logits_slice = ( self._device_tensor(reqs, torch.int32), self._device_tensor(tok_ids, torch.int32), ) def _device_tensor(self, data: list, dtype: torch.dtype) -> torch.Tensor: return torch.tensor( data, device="cpu", dtype=dtype, pin_memory=self.pin_memory ).to(device=self.device, non_blocking=True) def apply(self, logits: torch.Tensor) -> torch.Tensor: if self.min_toks: # Inhibit EOS token for requests which have not reached min length logits.index_put_(self.logits_slice, self.neg_inf_tensor) return logits def process_dict_updates( req_entries: dict[int, T], batch_update: BatchUpdate | None, new_state: Callable[[SamplingParams, list[int] | None, list[int]], T | None], ) -> bool: """Utility function to update dict state for sparse LogitsProcessors.""" if not batch_update: # Nothing to do. return False updated = False for index, params, prompt_tok_ids, output_tok_ids in batch_update.added: if (state := new_state(params, prompt_tok_ids, output_tok_ids)) is not None: req_entries[index] = state updated = True elif req_entries.pop(index, None) is not None: updated = True if req_entries: # Process removed requests. for index in batch_update.removed: if req_entries.pop(index, None): updated = True # Process moved requests, unidirectional (a->b) and # swapped (a<->b) for a_index, b_index, direct in batch_update.moved: a_entry = req_entries.pop(a_index, None) b_entry = req_entries.pop(b_index, None) if a_entry is not None: req_entries[b_index] = a_entry updated = True if b_entry is not None: updated = True if direct == MoveDirectionality.SWAP: req_entries[a_index] = b_entry return updated
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/logits_processor/state.py
vllm/v1/sample/logits_processor/state.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterator from itertools import chain from typing import TYPE_CHECKING from vllm.v1.sample.logits_processor.interface import ( AddedRequest, BatchUpdate, MovedRequest, RemovedRequest, ) if TYPE_CHECKING: from vllm.v1.sample.logits_processor.interface import LogitsProcessor class BatchUpdateBuilder: """Helps track persistent batch state changes and build a batch update data structure for logitsprocs Assumptions: * All information about requests removed from persistent batch during a step is aggregated in self._removed through calls to self.removed_append() at the beginning of a step. This must happen before the first time that self.removed, self.pop_removed() or self.peek_removed() are invoked in a given step * After the first time that self.removed, self.pop_removed() or self.peek_removed() are read in a step, no new removals are registered using self.removed_append() * Elements of self._removed are never directly modified, added or removed (i.e. modification is only via self.removed_append() and self.pop_removed()) Guarantees under above assumptions: * self.removed is always sorted in descending order * self.pop_removed() and self.peek_removed() both return the lowest removed request index in the current step """ _removed: list[RemovedRequest] _is_removed_sorted: bool added: list[AddedRequest] moved: list[MovedRequest] def __init__( self, removed: list[RemovedRequest] | None = None, added: list[AddedRequest] | None = None, moved: list[MovedRequest] | None = None, ) -> None: self._removed = removed or [] self.added = added or [] self.moved = moved or [] self._is_removed_sorted = False # Used to track changes in the pooling case # where we don't populate the added list. self.batch_changed = False def _ensure_removed_sorted(self) -> None: """Sort removed request indices in descending order. Idempotent after first call in a given step, until reset. """ if not self._is_removed_sorted: self._removed.sort(reverse=True) self._is_removed_sorted = True @property def removed(self) -> list[RemovedRequest]: """Removed request indices sorted in descending order""" self._ensure_removed_sorted() return self._removed def removed_append(self, index: int) -> None: """Register the removal of a request from the persistent batch. Must not be called after the first time self.removed, self.pop_removed() or self.peek_removed() are invoked. Args: index: request index """ if self._is_removed_sorted: raise RuntimeError( "Cannot register new removed request after self.removed has been read." ) self._removed.append(index) self.batch_changed = True def has_removed(self) -> bool: return bool(self._removed) def peek_removed(self) -> int | None: """Return lowest removed request index""" if self.has_removed(): self._ensure_removed_sorted() return self._removed[-1] return None def pop_removed(self) -> int | None: """Pop lowest removed request index""" if self.has_removed(): self._ensure_removed_sorted() return self._removed.pop() return None def reset(self) -> bool: """Returns True if there were any changes to the batch.""" self._is_removed_sorted = False self._removed.clear() self.added.clear() self.moved.clear() batch_changed = self.batch_changed self.batch_changed = False return batch_changed def get_and_reset(self, batch_size: int) -> BatchUpdate | None: """Generate a logitsprocs batch update data structure and reset internal batch update builder state. Args: batch_size: current persistent batch size Returns: Frozen logitsprocs batch update instance; `None` if no updates """ # Reset removal-sorting logic self._is_removed_sorted = False self.batch_changed = False if not any((self._removed, self.moved, self.added)): # No update; short-circuit return None # Build batch state update batch_update = BatchUpdate( batch_size=batch_size, removed=self._removed, moved=self.moved, added=self.added, ) self._removed = [] self.moved = [] self.added = [] return batch_update class LogitsProcessors: """Encapsulates initialized logitsproc objects.""" def __init__(self, logitsprocs: Iterator["LogitsProcessor"] | None = None) -> None: self.argmax_invariant: list[LogitsProcessor] = [] self.non_argmax_invariant: list[LogitsProcessor] = [] if logitsprocs: for logitproc in logitsprocs: ( self.argmax_invariant if logitproc.is_argmax_invariant() else self.non_argmax_invariant ).append(logitproc) @property def all(self) -> Iterator["LogitsProcessor"]: """Iterator over all logits processors.""" return chain(self.argmax_invariant, self.non_argmax_invariant)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/logits_processor/interface.py
vllm/v1/sample/logits_processor/interface.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from collections.abc import Sequence from dataclasses import dataclass from enum import Enum, auto from typing import TYPE_CHECKING, Optional import torch from vllm import SamplingParams if TYPE_CHECKING: from vllm.config import VllmConfig class MoveDirectionality(Enum): # One-way i1->i2 req move within batch UNIDIRECTIONAL = auto() # Two-way i1<->i2 req swap within batch SWAP = auto() # Batch indices of any removed requests. RemovedRequest = int # (index, params, prompt_tok_ids, output_tok_ids) tuples for new # requests added to the batch. AddedRequest = tuple[int, SamplingParams, list[int] | None, list[int]] # (index 1, index 2, directionality) tuples representing # one-way moves or two-way swaps of requests in batch MovedRequest = tuple[int, int, MoveDirectionality] @dataclass(frozen=True) class BatchUpdate: """Persistent batch state change info for logitsprocs""" batch_size: int # Current num reqs in batch # Metadata for requests added to, removed from, and moved # within the persistent batch. # # Key assumption: the `output_tok_ids` list (which is an element of each # tuple in `added`) is a reference to the request's running output tokens # list; via this reference, the logits processors always see the latest # list of generated output tokens. # # NOTE: # * Added or moved requests may replace existing requests with the same # index. # * Operations should be processed in the following order: # - removed, added, moved removed: Sequence[RemovedRequest] added: Sequence[AddedRequest] moved: Sequence[MovedRequest] class LogitsProcessor(ABC): @classmethod def validate_params(cls, sampling_params: SamplingParams): """Validate sampling params for this logits processor. Raise ValueError for invalid ones. """ return None @abstractmethod def __init__( self, vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool ) -> None: raise NotImplementedError @abstractmethod def apply(self, logits: torch.Tensor) -> torch.Tensor: """Apply LogitsProcessor to batch logits tensor. The updated tensor must be returned but may be modified in-place. """ raise NotImplementedError @abstractmethod def is_argmax_invariant(self) -> bool: """True if logits processor has no impact on the argmax computation in greedy sampling. NOTE: may or may not have the same value for all instances of a given LogitsProcessor subclass, depending on subclass implementation. """ raise NotImplementedError @abstractmethod def update_state( self, batch_update: Optional["BatchUpdate"], ) -> None: """Called when there are new output tokens, prior to each forward pass. Args: batch_update: Non-None iff there have been changes to the batch makeup. """ raise NotImplementedError
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/logits_processor/__init__.py
vllm/v1/sample/logits_processor/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import importlib import inspect import itertools from abc import abstractmethod from collections.abc import Sequence from functools import lru_cache, partial from typing import TYPE_CHECKING import torch from vllm.logger import init_logger from vllm.logits_process import LogitsProcessor as RequestLogitsProcessor from vllm.sampling_params import SamplingParams from vllm.utils.torch_utils import guard_cuda_initialization from vllm.v1.sample.logits_processor.builtin import ( LogitBiasLogitsProcessor, MinPLogitsProcessor, MinTokensLogitsProcessor, process_dict_updates, ) from vllm.v1.sample.logits_processor.interface import ( BatchUpdate, LogitsProcessor, MoveDirectionality, ) from vllm.v1.sample.logits_processor.state import BatchUpdateBuilder, LogitsProcessors if TYPE_CHECKING: from vllm.config import VllmConfig logger = init_logger(__name__) # Error message when the user tries to initialize vLLM with a pooling model # and custom logitsproces STR_POOLING_REJECTS_LOGITSPROCS = ( "Pooling models do not support custom logits processors." ) # Error message when the user tries to initialize vLLM with a speculative # decoding enabled and custom logitsproces STR_SPEC_DEC_REJECTS_LOGITSPROCS = ( "Custom logits processors are not supported when speculative decoding is enabled." ) LOGITSPROCS_GROUP = "vllm.logits_processors" BUILTIN_LOGITS_PROCESSORS: list[type[LogitsProcessor]] = [ MinTokensLogitsProcessor, LogitBiasLogitsProcessor, MinPLogitsProcessor, ] def _load_logitsprocs_plugins() -> list[type[LogitsProcessor]]: """Load all installed logit processor plugins""" from importlib.metadata import entry_points installed_logitsprocs_plugins = entry_points(group=LOGITSPROCS_GROUP) if len(installed_logitsprocs_plugins) == 0: logger.debug("No logitsprocs plugins installed (group %s).", LOGITSPROCS_GROUP) return [] # Load logitsprocs plugins logger.debug("Loading installed logitsprocs plugins (group %s):", LOGITSPROCS_GROUP) classes: list[type[LogitsProcessor]] = [] for entrypoint in installed_logitsprocs_plugins: try: logger.debug( "- Loading logitproc plugin entrypoint=%s target=%s", entrypoint.name, entrypoint.value, ) with guard_cuda_initialization(): classes.append(entrypoint.load()) except Exception as e: logger.error("Failed to load LogitsProcessor plugin %s: %s", entrypoint, e) raise RuntimeError( f"Failed to load LogitsProcessor plugin {entrypoint}" ) from e return classes def _load_logitsprocs_by_fqcns( logits_processors: Sequence[str | type[LogitsProcessor]] | None, ) -> list[type[LogitsProcessor]]: """Load logit processor types, identifying them by fully-qualified class names (FQCNs). Effectively, a mixed list of logitproc types and FQCN strings is converted into a list of entirely logitproc types, by loading from the FQCNs. FQCN syntax is <module>:<type> i.e. x.y.z:CustomLogitProc Already-loaded logitproc types must be subclasses of LogitsProcessor Args: logits_processors: Potentially mixed list of logitsprocs types and FQCN strings for logitproc types Returns: List of logitproc types """ if not logits_processors: return [] logger.debug( "%s additional custom logits processors specified, checking whether " "they need to be loaded.", len(logits_processors), ) classes: list[type[LogitsProcessor]] = [] for ldx, logitproc in enumerate(logits_processors): if isinstance(logitproc, type): logger.debug(" - Already-loaded logit processor: %s", logitproc.__name__) if not issubclass(logitproc, LogitsProcessor): raise ValueError( f"{logitproc.__name__} is not a subclass of LogitsProcessor" ) classes.append(logitproc) continue logger.debug("- Loading logits processor %s", logitproc) module_path, qualname = logitproc.split(":") try: # Load module with guard_cuda_initialization(): module = importlib.import_module(module_path) except Exception as e: logger.error( "Failed to load %sth LogitsProcessor plugin %s: %s", ldx, logitproc, e, ) raise RuntimeError( f"Failed to load {ldx}th LogitsProcessor plugin {logitproc}" ) from e # Walk down dotted name to get logitproc class obj = module for attr in qualname.split("."): obj = getattr(obj, attr) if not isinstance(obj, type): raise ValueError("Loaded logit processor must be a type.") if not issubclass(obj, LogitsProcessor): raise ValueError(f"{obj.__name__} must be a subclass of LogitsProcessor") classes.append(obj) return classes def _load_custom_logitsprocs( logits_processors: Sequence[str | type[LogitsProcessor]] | None, ) -> list[type[LogitsProcessor]]: """Load all custom logits processors. * First load all installed logitproc plugins * Second load custom logitsprocs pass by the user at initialization time Args: logits_processors: potentially mixed list of logitproc types and logitproc type fully-qualified names (FQCNs) which need to be loaded Returns: A list of all loaded logitproc types """ from vllm.platforms import current_platform if current_platform.is_tpu(): # No logitsprocs specified by caller # TODO(andy) - vLLM V1 on TPU does not support custom logitsprocs return [] return _load_logitsprocs_plugins() + _load_logitsprocs_by_fqcns(logits_processors) def build_logitsprocs( vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool, is_pooling_model: bool, custom_logitsprocs: Sequence[str | type[LogitsProcessor]] = (), ) -> LogitsProcessors: if is_pooling_model: if custom_logitsprocs: raise ValueError(STR_POOLING_REJECTS_LOGITSPROCS) logger.debug( "Skipping logits processor loading because pooling models" " do not support logits processors." ) return LogitsProcessors() # Check if speculative decoding is enabled. if vllm_config.speculative_config: if custom_logitsprocs: raise ValueError(STR_SPEC_DEC_REJECTS_LOGITSPROCS) logger.warning( "min_p, logit_bias, and min_tokens parameters won't currently work " "with speculative decoding enabled." ) return LogitsProcessors() custom_logitsprocs_classes = _load_custom_logitsprocs(custom_logitsprocs) return LogitsProcessors( ctor(vllm_config, device, is_pin_memory) for ctor in itertools.chain( BUILTIN_LOGITS_PROCESSORS, custom_logitsprocs_classes ) ) cached_load_custom_logitsprocs = lru_cache(_load_custom_logitsprocs) def validate_logits_processors_parameters( logits_processors: Sequence[str | type[LogitsProcessor]] | None, sampling_params: SamplingParams, ): logits_processors = ( tuple(logits_processors) if logits_processors is not None else None ) for logits_procs in cached_load_custom_logitsprocs(logits_processors): logits_procs.validate_params(sampling_params) class AdapterLogitsProcessor(LogitsProcessor): """Wrapper for per-request logits processors To wrap a specific per-request logits processor, * Subclass `AdapterLogitsProcessor` * Implement `self.is_argmax_invariant()` base-class method * Implement `self.new_req_logits_processor(params)` `self.__init__(vllm_config, device, is_pin_memory)` does not need to be overridden in general. However, to implement custom constructor behavior - especially any logic which operates on or stores `vllm_config`, `device`, or `is_pin_memory` - `self.__init__(vllm_config, device, is_pin_memory)` must be overridden and the override must call `super().__init__(vllm_config, device, is_pin_memory)` """ def __init__( self, vllm_config: "VllmConfig", device: torch.device, is_pin_memory: bool ): """Subclass must invoke `super().__init__(vllm_config, device, is_pin_memory)`. Subclass constructor may find it useful to utilize the `vllm_config`, `device` and `is_pin_memory` argument. However regardless of whether these arguments are used, the vLLM logits processor interface requires all three arguments to be present. """ # Map req index -> logits processor state # # State representation is a partial[Tensor] comprising a request-level # logits processor with the output token ids argument and (if required) # the prompt token ids argument pre-populated # # Note that the partial carries a *reference* to output token ids, and # will thus always operate on the list as it is currently, not as it # was when the partial was created. self.req_info: dict[int, partial[torch.Tensor]] = {} @abstractmethod def new_req_logits_processor( self, params: SamplingParams, ) -> RequestLogitsProcessor | None: """Consume request info; return a per-request logits processor. Return None if logits processor does not need to be applied to request Args: params: request sampling params Returns: None if logits processor should not be applied to request; otherwise returns a `RequestLogitsProcessor` instance """ raise NotImplementedError def _new_state( self, params: SamplingParams, prompt_ids: list[int] | None, output_ids: list[int], ) -> partial[torch.Tensor] | None: """Return state representation for new request Returns None if logits processor is not applicable to request Args: params: request sampling params prompt_ids: request prompt token ids output_ids: decoded tokens so far for this request Returns: logits processor partial[Tensor] or None """ if req_lp := self.new_req_logits_processor(params): args = ( [prompt_ids, output_ids] if (len(inspect.signature(req_lp).parameters) == 3) else [output_ids] ) return partial(req_lp, *args) # type: ignore[misc] return None def update_state(self, batch_update: BatchUpdate | None): process_dict_updates( self.req_info, batch_update, self._new_state, ) def apply(self, logits: torch.Tensor) -> torch.Tensor: if self.req_info: # Apply per-request logits processors to corresponding rows of # logits tensor for req_idx, req_lp in self.req_info.items(): req_logits = logits[req_idx] new_logits = req_lp(req_logits) if new_logits is not req_logits: # Modify logits tensor row in-place if necessary logits[req_idx] = new_logits return logits __all__ = [ "LogitsProcessor", "LogitBiasLogitsProcessor", "MinPLogitsProcessor", "MinTokensLogitsProcessor", "BatchUpdate", "BatchUpdateBuilder", "MoveDirectionality", "LogitsProcessors", "build_logitsprocs", "STR_POOLING_REJECTS_LOGITSPROCS", "LOGITSPROCS_GROUP", "AdapterLogitsProcessor", ]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/ops/penalties.py
vllm/v1/sample/ops/penalties.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm.model_executor.layers.utils import apply_penalties from vllm.utils.platform_utils import is_pin_memory_available from vllm.utils.torch_utils import make_tensor_with_pad def apply_all_penalties( logits: torch.Tensor, prompt_token_ids: torch.Tensor, presence_penalties: torch.Tensor, frequency_penalties: torch.Tensor, repetition_penalties: torch.Tensor, output_token_ids: list[list[int]], ) -> torch.Tensor: """ Applies presence, frequency and repetition penalties to the logits. """ _, vocab_size = logits.shape output_tokens_t = _convert_to_tensors(output_token_ids, vocab_size, logits.device) # In the async scheduling case, rows that won't have penalties applied may contain # -1 placeholder token ids. We must replace these with valid token ids so that the # scatter done in apply_penalties is valid. # NOTE(nick): The penalties implementation is currently quite inefficient and # will be reworked anyhow. output_tokens_t.masked_fill_(output_tokens_t == -1, vocab_size) return apply_penalties( logits, prompt_token_ids, output_tokens_t, presence_penalties, frequency_penalties, repetition_penalties, ) def _convert_to_tensors( output_token_ids: list[list[int]], vocab_size: int, device: torch.device ) -> torch.Tensor: """ Convert the different list data structures to tensors. """ output_tokens_tensor = make_tensor_with_pad( output_token_ids, # Use the value of vocab_size as a pad since we don't have a # token_id of this value. pad=vocab_size, device="cpu", dtype=torch.int64, pin_memory=is_pin_memory_available(), ) return output_tokens_tensor.to(device, non_blocking=True)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/ops/logprobs.py
vllm/v1/sample/ops/logprobs.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Some utilities for logprobs, including logits.""" import torch from vllm.platforms import current_platform @torch.compile(dynamic=True, backend=current_platform.simple_compile_backend) def batched_count_greater_than(x: torch.Tensor, values: torch.Tensor) -> torch.Tensor: """ Counts elements in each row of x that are greater than the corresponding value in values. Use torch.compile to generate an optimized kernel for this function. otherwise, it will create additional copies of the input tensors and cause memory issues. Args: x (torch.Tensor): A 2D tensor of shape (batch_size, n_elements). values (torch.Tensor): A 2D tensor of shape (batch_size, 1). Returns: torch.Tensor: A 1D tensor of shape (batch_size,) with the counts. """ return (x >= values).sum(-1)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/ops/bad_words.py
vllm/v1/sample/ops/bad_words.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch _SMALLEST_LOGIT = float("-inf") def _apply_bad_words_single_batch( logits: torch.Tensor, bad_words_token_ids: list[list[int]], past_tokens_ids: list[int], ) -> None: for bad_word_ids in bad_words_token_ids: if len(bad_word_ids) > len(past_tokens_ids) + 1: continue prefix_length = len(bad_word_ids) - 1 last_token_id = bad_word_ids[-1] actual_prefix = past_tokens_ids[-prefix_length:] if prefix_length > 0 else [] expected_prefix = bad_word_ids[:prefix_length] assert len(actual_prefix) == len(expected_prefix) if actual_prefix == expected_prefix: logits[last_token_id] = _SMALLEST_LOGIT def apply_bad_words( logits: torch.Tensor, bad_words_token_ids: dict[int, list[list[int]]], past_tokens_ids: list[list[int]], ) -> None: for i, bad_words_ids in bad_words_token_ids.items(): _apply_bad_words_single_batch(logits[i], bad_words_ids, past_tokens_ids[i]) def apply_bad_words_with_drafts( logits: torch.Tensor, bad_words_token_ids: dict[int, list[list[int]]], past_tokens_ids: list[list[int]], num_draft_tokens: list[int], ) -> None: start_idx = 0 for i, bad_words_ids in bad_words_token_ids.items(): for draft_idx in range(num_draft_tokens[i]): _apply_bad_words_single_batch( logits[start_idx + draft_idx], bad_words_ids, past_tokens_ids[start_idx + draft_idx], ) start_idx += num_draft_tokens[i]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/ops/__init__.py
vllm/v1/sample/ops/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/v1/sample/ops/topk_topp_sampler.py
vllm/v1/sample/ops/topk_topp_sampler.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch import torch.nn as nn from packaging import version from vllm import envs from vllm._aiter_ops import rocm_aiter_ops from vllm.config.model import LogprobsMode from vllm.logger import init_logger from vllm.platforms import CpuArchEnum, current_platform logger = init_logger(__name__) class TopKTopPSampler(nn.Module): """ Module that performs optional top-k and top-p filtering followed by weighted random sampling of logits. Implementations may update the logits tensor in-place. """ def __init__(self, logprobs_mode: LogprobsMode = "raw_logprobs") -> None: super().__init__() self.logprobs_mode = logprobs_mode # flashinfer optimization does not apply if intermediate # logprobs/logits after top_k/top_p need to be returned if ( logprobs_mode not in ("processed_logits", "processed_logprobs") and current_platform.is_cuda() ): if envs.VLLM_USE_FLASHINFER_SAMPLER: from vllm.v1.attention.backends.flashinfer import FlashInferBackend capability = current_platform.get_device_capability() assert capability is not None if not FlashInferBackend.supports_compute_capability(capability): capability_str = capability.as_version_str() raise RuntimeError( "FlashInfer does not support compute capability " f"{capability_str}, unset VLLM_USE_FLASHINFER_SAMPLER=1." ) # Users must opt in explicitly via VLLM_USE_FLASHINFER_SAMPLER=1. logger.info_once( "Using FlashInfer for top-p & top-k sampling.", scope="global", ) self.forward = self.forward_cuda else: logger.debug_once( "FlashInfer top-p/top-k sampling is available but disabled " "by default. Set VLLM_USE_FLASHINFER_SAMPLER=1 to opt in " "after verifying accuracy for your workloads." ) self.forward = self.forward_native elif current_platform.is_cpu(): arch = current_platform.get_cpu_architecture() # Fall back to native implementation for POWERPC and RISCV. # On PowerPC argmax produces incorrect output with torch.compile. # PR: https://github.com/vllm-project/vllm/pull/26987 if arch in (CpuArchEnum.RISCV, CpuArchEnum.POWERPC): self.forward = self.forward_native else: self.forward = self.forward_cpu elif ( logprobs_mode not in ("processed_logits", "processed_logprobs") and rocm_aiter_ops.is_enabled() ): try: import aiter.ops.sampling # noqa: F401 self.aiter_ops = torch.ops.aiter logger.info_once( "Using aiter sampler on ROCm (lazy import, sampling-only)." ) self.forward = self.forward_hip except ImportError: logger.warning_once( "aiter.ops.sampling is not available on ROCm. " "Falling back to forward_native implementation." ) self.forward = self.forward_native else: self.forward = self.forward_native self.apply_top_k_top_p = apply_top_k_top_p def forward_native( self, logits: torch.Tensor, generators: dict[int, torch.Generator], k: torch.Tensor | None, p: torch.Tensor | None, ) -> tuple[torch.Tensor, torch.Tensor | None]: """ PyTorch-native implementation of top-k and top-p sampling. The logits tensor may be updated in-place. """ logits = self.apply_top_k_top_p(logits, k, p) logits_to_return = None if self.logprobs_mode == "processed_logits": logits_to_return = logits elif self.logprobs_mode == "processed_logprobs": logits_to_return = logits.log_softmax(dim=-1, dtype=torch.float32) probs = logits.softmax(dim=-1, dtype=torch.float32) return random_sample(probs, generators), logits_to_return def forward_cuda( self, logits: torch.Tensor, generators: dict[int, torch.Generator], k: torch.Tensor | None, p: torch.Tensor | None, ) -> tuple[torch.Tensor, torch.Tensor | None]: """More optimized implementation for top-k and top-p sampling.""" # We prefer `random_sample` over `flashinfer_sample` when sorting is # not needed. This is because `random_sample` does not require # CPU-GPU synchronization while `flashinfer_sample` does. if (k is None and p is None) or generators: if generators: logger.debug_once( "FlashInfer 0.2.3+ does not support " "per-request generators. Falling back to " "PyTorch-native implementation." ) return self.forward_native(logits, generators, k, p) assert self.logprobs_mode not in ("processed_logits", "processed_logprobs"), ( "FlashInfer does not support returning logits/logprobs" ) # flashinfer sampling functions expect contiguous logits. # In flex_attn/triton_attn fp32 inference, logits can be non-contiguous # because of slicing operation in logits_processor. return flashinfer_sample(logits.contiguous(), k, p, generators), None def forward_cpu( self, logits: torch.Tensor, generators: dict[int, torch.Generator], k: torch.Tensor | None, p: torch.Tensor | None, ) -> tuple[torch.Tensor, torch.Tensor | None]: """ PyTorch-native implementation of top-k and top-p sampling for CPU. The logits tensor may be updated in-place. """ logits = self.apply_top_k_top_p(logits, k, p) logits_to_return = None if self.logprobs_mode == "processed_logits": logits_to_return = logits elif self.logprobs_mode == "processed_logprobs": logits_to_return = logits.log_softmax(dim=-1, dtype=torch.float32) if len(generators) != logits.shape[0]: return compiled_random_sample(logits), logits_to_return else: probs = logits.softmax(dim=-1, dtype=torch.float32) q = torch.empty_like(probs) q.exponential_() for i, generator in generators.items(): q[i].exponential_(generator=generator) return probs.div_(q).argmax(dim=-1).view(-1), logits_to_return def forward_hip( self, logits: torch.Tensor, generators: dict[int, torch.Generator], k: torch.Tensor | None, p: torch.Tensor | None, ) -> tuple[torch.Tensor, torch.Tensor | None]: """Optimized ROCm/aiter path (same structure as forward_cuda).""" if (k is None and p is None) or generators: if generators: logger.warning_once( "aiter sampler does not support per-request generators; " "falling back to PyTorch-native." ) return self.forward_native(logits, generators, k, p) assert self.logprobs_mode not in ( "processed_logits", "processed_logprobs", ), "aiter sampler does not support returning logits/logprobs." return self.aiter_sample(logits, k, p, generators), None def aiter_sample( self, logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, generators: dict[int, torch.Generator], ) -> torch.Tensor: """Sample from logits using aiter ops.""" use_top_k = k is not None use_top_p = p is not None # Joint k+p path if use_top_p and use_top_k: probs = logits.softmax(dim=-1, dtype=torch.float32).contiguous() next_token_ids = self.aiter_ops.top_k_top_p_sampling_from_probs( probs, None, *_to_tensor_scalar_tuple(k), *_to_tensor_scalar_tuple(p), deterministic=True, ) return next_token_ids.view(-1) # Top-p only path elif use_top_p: probs = logits.softmax(dim=-1, dtype=torch.float32).contiguous() next_token_ids = self.aiter_ops.top_p_sampling_from_probs( probs, None, *_to_tensor_scalar_tuple(p), deterministic=True ) return next_token_ids.view(-1) # Top-k only path elif use_top_k: probs = logits.softmax(dim=-1, dtype=torch.float32).contiguous() renorm_probs = self.aiter_ops.top_k_renorm_probs( probs, *_to_tensor_scalar_tuple(k) ) return torch.multinomial(renorm_probs, num_samples=1).view(-1) raise RuntimeError("aiter_sample was called with no active top-k or top-p.") # Note: this is a workaround for # https://github.com/pytorch/pytorch/pull/151218 @torch.compile(dynamic=True) def compiled_random_sample(logits: torch.Tensor) -> torch.Tensor: probs = logits.softmax(dim=-1, dtype=torch.float32) q = torch.empty_like(probs) q.exponential_() return probs.div(q).argmax(dim=-1).view(-1) def apply_top_k_top_p( logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, ) -> torch.Tensor: """Apply top-k and top-p masks to the logits. If a top-p is used, this function will sort the logits tensor, which can be slow for large batches. The logits tensor may be updated in-place. """ if p is None: if k is None: return logits # Avoid sorting vocab for top-k only case. return apply_top_k_only(logits, k) logits_sort, logits_idx = logits.sort(dim=-1, descending=False) if k is not None: # Apply top-k. top_k_mask = logits_sort.size(1) - k.to(torch.long) # shape: B # Get all the top_k values. top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1)) top_k_mask = logits_sort < top_k_mask logits_sort.masked_fill_(top_k_mask, -float("inf")) if p is not None: # Apply top-p. probs_sort = logits_sort.softmax(dim=-1) probs_sum = torch.cumsum(probs_sort, dim=-1, out=probs_sort) top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1) # at least one top_p_mask[:, -1] = False logits_sort.masked_fill_(top_p_mask, -float("inf")) # Re-sort the probabilities. logits = logits_sort.scatter(dim=-1, index=logits_idx, src=logits_sort) return logits def apply_top_k_only( logits: torch.Tensor, k: torch.Tensor, ) -> torch.Tensor: """ Apply top-k mask to the logits. This implementation doesn't involve sorting the entire vocab. The logits tensor may be updated in-place. """ no_top_k_mask = k == logits.shape[1] # Set non-top-k rows to 1 so that we can gather. k = k.masked_fill(no_top_k_mask, 1) max_top_k = k.max() # topk.values tensor has shape [batch_size, max_top_k]. # Convert top k to 0-based index in range [0, max_top_k). k_index = k.sub_(1).unsqueeze(1) top_k_mask = logits.topk(max_top_k, dim=1).values.gather(1, k_index.long()) # Handle non-topk rows. top_k_mask.masked_fill_(no_top_k_mask.unsqueeze(1), -float("inf")) logits.masked_fill_(logits < top_k_mask, -float("inf")) return logits def random_sample( probs: torch.Tensor, generators: dict[int, torch.Generator], ) -> torch.Tensor: """Randomly sample from the probabilities. We use this function instead of torch.multinomial because torch.multinomial causes CPU-GPU synchronization. """ q = torch.empty_like(probs) # NOTE(woosuk): To batch-process the requests without their own seeds, # which is the common case, we first assume that every request does # not have its own seed. Then, we overwrite the values for the requests # that have their own seeds. if len(generators) != probs.shape[0]: q.exponential_() if generators: # TODO(woosuk): This can be slow because we handle each request # one by one. Optimize this. for i, generator in generators.items(): q[i].exponential_(generator=generator) return probs.div_(q).argmax(dim=-1).view(-1) def flashinfer_sample( logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, generators: dict[int, torch.Generator], ) -> torch.Tensor: """Sample from the logits using FlashInfer. Statistically, this function is equivalent to the `random_sample` function. However, this function is faster because it avoids sorting the logits tensor via rejection sampling. NOTE: The outputs of this function do not necessarily match the outputs of the `random_sample` function. It only guarantees that the outputs are statistically equivalent. NOTE: This function includes CPU-GPU synchronization, while `random_sample` does not. Call this function at the end of the forward pass to minimize the synchronization overhead. """ import flashinfer if version.parse(flashinfer.__version__) < version.parse("0.2.3"): raise ImportError( "FlashInfer version >= 0.2.3 required for top-k and top-p sampling. " ) assert not (k is None and p is None) if k is None: # Top-p only. probs = logits.softmax(dim=-1, dtype=torch.float32) next_token_ids = flashinfer.sampling.top_p_sampling_from_probs( probs, p, deterministic=True ) elif p is None: # Top-k only. probs = logits.softmax(dim=-1, dtype=torch.float32) next_token_ids = flashinfer.sampling.top_k_sampling_from_probs( probs, k, deterministic=True ) else: # Both top-k and top-p. next_token_ids = flashinfer.sampling.top_k_top_p_sampling_from_logits( logits, k, p, deterministic=True ) return next_token_ids.view(-1) def _to_tensor_scalar_tuple(x): if isinstance(x, torch.Tensor): return (x, 0) else: return (None, x)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/cuda.py
vllm/platforms/cuda.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Code inside this file can safely assume cuda platform, e.g. importing pynvml. However, it should not initialize cuda context. """ import os from collections.abc import Callable from functools import cache, wraps from typing import TYPE_CHECKING, Optional, TypeVar import torch from typing_extensions import ParamSpec # import custom ops, trigger op registration import vllm._C # noqa from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from vllm.utils.import_utils import import_pynvml from vllm.utils.torch_utils import cuda_device_count_stateless from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig from vllm.config.cache import CacheDType else: VllmConfig = None CacheDType = None logger = init_logger(__name__) _P = ParamSpec("_P") _R = TypeVar("_R") pynvml = import_pynvml() # pytorch 2.5 uses cudnn sdpa by default, which will cause crash on some models # see https://github.com/huggingface/diffusers/issues/9704 for details torch.backends.cuda.enable_cudnn_sdp(False) @cache def _get_backend_priorities( use_mla: bool, device_capability: DeviceCapability, ) -> list[AttentionBackendEnum]: """Get backend priorities with lazy import to avoid circular dependency.""" if use_mla: if device_capability.major == 10: return [ AttentionBackendEnum.CUTLASS_MLA, AttentionBackendEnum.FLASHINFER_MLA, AttentionBackendEnum.FLASH_ATTN_MLA, AttentionBackendEnum.FLASHMLA, AttentionBackendEnum.TRITON_MLA, AttentionBackendEnum.FLASHMLA_SPARSE, ] else: return [ AttentionBackendEnum.FLASH_ATTN_MLA, AttentionBackendEnum.FLASHMLA, AttentionBackendEnum.FLASHINFER_MLA, AttentionBackendEnum.TRITON_MLA, AttentionBackendEnum.FLASHMLA_SPARSE, ] else: if device_capability.major == 10: return [ AttentionBackendEnum.FLASHINFER, AttentionBackendEnum.FLASH_ATTN, AttentionBackendEnum.TRITON_ATTN, AttentionBackendEnum.FLEX_ATTENTION, ] else: return [ AttentionBackendEnum.FLASH_ATTN, AttentionBackendEnum.FLASHINFER, AttentionBackendEnum.TRITON_ATTN, AttentionBackendEnum.FLEX_ATTENTION, ] def with_nvml_context(fn: Callable[_P, _R]) -> Callable[_P, _R]: @wraps(fn) def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: pynvml.nvmlInit() try: return fn(*args, **kwargs) finally: pynvml.nvmlShutdown() return wrapper class CudaPlatformBase(Platform): _enum = PlatformEnum.CUDA device_name: str = "cuda" device_type: str = "cuda" dispatch_key: str = "CUDA" ray_device_key: str = "GPU" dist_backend: str = "nccl" device_control_env_var: str = "CUDA_VISIBLE_DEVICES" @property def supported_dtypes(self) -> list[torch.dtype]: if self.has_device_capability(80): # Ampere and Hopper or later NVIDIA GPUs. return [torch.bfloat16, torch.float16, torch.float32] if self.has_device_capability(60): # Pascal, Volta and Turing NVIDIA GPUs, BF16 is not supported return [torch.float16, torch.float32] # Kepler and Maxwell NVIDIA GPUs, only FP32 is supported, # though vLLM doesn't support these GPUs. return [torch.float32] @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ torch.cuda.set_device(device) # With this trick we can force the device to be set eagerly # see https://github.com/pytorch/pytorch/issues/155668 # for why and when it is needed _ = torch.zeros(1, device=device) @classmethod def get_device_capability(cls, device_id: int = 0) -> DeviceCapability | None: raise NotImplementedError @classmethod def get_device_name(cls, device_id: int = 0) -> str: raise NotImplementedError @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError @classmethod def is_fully_connected(cls, device_ids: list[int]) -> bool: raise NotImplementedError @classmethod def log_warnings(cls): pass @classmethod def check_and_update_config(cls, vllm_config: "VllmConfig") -> None: from vllm.attention.backends.registry import AttentionBackendEnum parallel_config = vllm_config.parallel_config model_config = vllm_config.model_config if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker" cache_config = vllm_config.cache_config if cache_config and cache_config.block_size is None: cache_config.block_size = 16 # TODO(lucas): handle this more gracefully # Note: model_config may be None during testing # Note: block_size is initialized in # HybridAttentionMambaModelConfig.verify_and_update_config # for models with both attention and mamba, # and doesn't need to be reinitialized here if ( model_config is not None and model_config.use_mla and cache_config.block_size is not None ): use_sparse = hasattr(vllm_config.model_config.hf_config, "index_topk") # If `--attention-config.backend` is not set and we are using MLA, # then we default to FlashMLA backend for non-blackwell GPUs, # else we default to CutlassMLA. For each case, we force the # required block_size. use_flashmla = False use_cutlass_mla = False use_flashinfer_mla = False if vllm_config.attention_config.backend is None: # Default case if cls.is_device_capability_family(100) and not use_sparse: # Blackwell => Force CutlassMLA (unless sparse, i.e. DSv3.2). use_cutlass_mla = True # Set the backend in AttentionConfig so it's used during # backend selection vllm_config.attention_config.backend = ( AttentionBackendEnum.CUTLASS_MLA ) else: # Not Blackwell use_flashmla = True else: # Forced case backend = vllm_config.attention_config.backend use_flashmla = backend == AttentionBackendEnum.FLASHMLA use_cutlass_mla = backend == AttentionBackendEnum.CUTLASS_MLA use_flashinfer_mla = backend == AttentionBackendEnum.FLASHINFER_MLA from vllm.attention.ops.flashmla import is_flashmla_dense_supported if ( use_flashmla and is_flashmla_dense_supported()[0] and cache_config.block_size % 64 != 0 ): cache_config.block_size = 64 logger.info("Forcing kv cache block size to 64 for FlashMLA backend.") if use_cutlass_mla and cache_config.block_size % 128 != 0: cache_config.block_size = 128 logger.info( "Forcing kv cache block size to 128 for CUTLASS_MLA backend." ) if ( use_flashinfer_mla and cache_config.block_size != 32 and cache_config.block_size % 64 != 0 ): cache_config.block_size = 64 logger.info( "Forcing kv cache block size to 64 for FlashInferMLA backend." ) # TODO(Chen): remove this hacky code if use_sparse and cache_config.block_size != 64: cache_config.block_size = 64 logger.info( "Forcing kv cache block size to 64 for FlashMLASparse backend." ) scheduler_config = vllm_config.scheduler_config # Note: model_config may be None during testing if ( model_config is not None and model_config.is_mm_prefix_lm and scheduler_config.is_multimodal_model and not scheduler_config.disable_chunked_mm_input ): logger.warning( "Forcing --disable_chunked_mm_input for models " "with multimodal-bidirectional attention." ) scheduler_config.disable_chunked_mm_input = True @classmethod def get_current_memory_usage( cls, device: torch.types.Device | None = None ) -> float: torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats(device) return torch.cuda.max_memory_allocated(device) @classmethod def get_valid_backends( cls, device_capability: DeviceCapability, attn_selector_config: "AttentionSelectorConfig", ) -> tuple[ list[tuple["AttentionBackendEnum", int]], dict["AttentionBackendEnum", list[str]], ]: valid_backends_priorities = [] invalid_reasons = {} backend_priorities = _get_backend_priorities( attn_selector_config.use_mla, device_capability ) for priority, backend in enumerate(backend_priorities): try: backend_class = backend.get_class() invalid_reasons_i = backend_class.validate_configuration( device_capability=device_capability, **attn_selector_config._asdict(), ) except ImportError: invalid_reasons_i = ["ImportError"] if invalid_reasons_i: invalid_reasons[backend] = invalid_reasons_i else: valid_backends_priorities.append((backend, priority)) return valid_backends_priorities, invalid_reasons @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: device_capability = cls.get_device_capability() assert device_capability is not None attn_selector_config = attn_selector_config._replace(block_size=None) # First try checking just the selected backend, if there is one. if selected_backend is not None: try: backend_class = selected_backend.get_class() invalid_reasons = backend_class.validate_configuration( device_capability=device_capability, **attn_selector_config._asdict(), ) except ImportError: invalid_reasons = ["ImportError"] if invalid_reasons: raise ValueError( f"Selected backend {selected_backend} is not valid for " f"this configuration. Reason: {invalid_reasons}" ) else: logger.info("Using %s backend.", selected_backend) return selected_backend.get_path() # No selected backend or the selected backend is invalid, # so we try finding a valid backend. valid_backends_priorities, invalid_reasons = cls.get_valid_backends( device_capability=device_capability, attn_selector_config=attn_selector_config, ) reasons_str = ( "{" + ", ".join( f"{backend.name}: [{', '.join(reasons)}]" for backend, reasons in invalid_reasons.items() ) + "}" ) config_str = attn_selector_config.__repr__() logger.debug_once( f"Some attention backends are not valid for {cls.device_name} with " f"{config_str}. Reasons: {reasons_str}." ) if len(valid_backends_priorities) == 0: raise ValueError( f"No valid attention backend found for {cls.device_name} " f"with {config_str}. Reasons: {reasons_str}." ) # We have found some valid backends. Select the one with the # highest priority. sorted_indices = sorted( range(len(valid_backends_priorities)), key=lambda i: valid_backends_priorities[i][1], ) selected_index = sorted_indices[0] selected_backend = valid_backends_priorities[selected_index][0] logger.info_once( "Using %s attention backend out of potential backends: %s", selected_backend.name, tuple(b[0].name for b in valid_backends_priorities), scope="local", ) return selected_backend.get_path() @classmethod def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: return [ AttentionBackendEnum.TORCH_SDPA, AttentionBackendEnum.FLASH_ATTN, ] @classmethod def get_vit_attn_backend( cls, head_size: int, dtype: torch.dtype, backend: Optional["AttentionBackendEnum"] = None, ) -> "AttentionBackendEnum": if backend is not None: assert backend in cls.get_supported_vit_attn_backends(), ( f"Backend {backend} is not supported for vit attention. " f"Supported backends are: {cls.get_supported_vit_attn_backends()}" ) logger.info_once(f"Using backend {backend} for vit attention") return backend # Try FlashAttention first if (cc := cls.get_device_capability()) and cc.major >= 8: try: backend_class = AttentionBackendEnum.FLASH_ATTN.get_class() if backend_class.supports_head_size( head_size ) and backend_class.supports_dtype(dtype): return AttentionBackendEnum.FLASH_ATTN except ImportError: pass return AttentionBackendEnum.TORCH_SDPA @classmethod def get_punica_wrapper(cls) -> str: return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU" @classmethod def get_device_communicator_cls(cls) -> str: return ( "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator" # noqa ) @classmethod def supports_fp8(cls) -> bool: return cls.has_device_capability(89) @classmethod def use_custom_allreduce(cls) -> bool: return True @classmethod def opaque_attention_op(cls) -> bool: return True @classmethod def get_static_graph_wrapper_cls(cls) -> str: return "vllm.compilation.cuda_graph.CUDAGraphWrapper" @classmethod def device_count(cls) -> int: return cuda_device_count_stateless() @classmethod def check_if_supports_dtype(cls, dtype: torch.dtype): if dtype == torch.bfloat16: # noqa: SIM102 if not cls.has_device_capability(80): capability = cls.get_device_capability() gpu_name = cls.get_device_name() if capability is None: compute_str = "does not have a compute capability" else: version_str = capability.as_version_str() compute_str = f"has compute capability {version_str}" raise ValueError( "Bfloat16 is only supported on GPUs " "with compute capability of at least 8.0. " f"Your {gpu_name} GPU {compute_str}. " "You can use float16 instead by explicitly setting the " "`dtype` flag in CLI, for example: --dtype=half." ) @classmethod def insert_blocks_to_device( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: """Copy blocks from src_cache to dst_cache on GPU.""" _src_cache = src_cache[:, src_block_indices] dst_cache[:, dst_block_indices] = _src_cache.to(dst_cache.device) @classmethod def swap_out_blocks_to_host( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: """Copy blocks from GPU to host (CPU).""" _src_cache = src_cache[:, src_block_indices] dst_cache[:, dst_block_indices] = _src_cache.cpu() @classmethod def support_hybrid_kv_cache(cls) -> bool: return True @classmethod def support_static_graph_mode(cls) -> bool: return True # NVML utils # Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`, # all the related functions work on real physical device ids. # the major benefit of using NVML is that it will not initialize CUDA class NvmlCudaPlatform(CudaPlatformBase): @classmethod @cache @with_nvml_context def get_device_capability(cls, device_id: int = 0) -> DeviceCapability | None: try: physical_device_id = cls.device_id_to_physical_device_id(device_id) handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) return DeviceCapability(major=major, minor=minor) except RuntimeError: return None @classmethod @with_nvml_context def has_device_capability( cls, capability: tuple[int, int] | int, device_id: int = 0, ) -> bool: try: return super().has_device_capability(capability, device_id) except RuntimeError: return False @classmethod @with_nvml_context def get_device_name(cls, device_id: int = 0) -> str: physical_device_id = cls.device_id_to_physical_device_id(device_id) return cls._get_physical_device_name(physical_device_id) @classmethod @with_nvml_context def get_device_uuid(cls, device_id: int = 0) -> str: physical_device_id = cls.device_id_to_physical_device_id(device_id) handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) return pynvml.nvmlDeviceGetUUID(handle) @classmethod @with_nvml_context def get_device_total_memory(cls, device_id: int = 0) -> int: physical_device_id = cls.device_id_to_physical_device_id(device_id) handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) return int(pynvml.nvmlDeviceGetMemoryInfo(handle).total) @classmethod @with_nvml_context def is_fully_connected(cls, physical_device_ids: list[int]) -> bool: """ query if the set of gpus are fully connected by nvlink (1 hop) """ handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in physical_device_ids] for i, handle in enumerate(handles): for j, peer_handle in enumerate(handles): if i < j: try: p2p_status = pynvml.nvmlDeviceGetP2PStatus( handle, peer_handle, pynvml.NVML_P2P_CAPS_INDEX_NVLINK, ) if p2p_status != pynvml.NVML_P2P_STATUS_OK: return False except pynvml.NVMLError: logger.exception( "NVLink detection failed. This is normal if" " your machine has no NVLink equipped." ) return False return True @classmethod def _get_physical_device_name(cls, device_id: int = 0) -> str: handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) return pynvml.nvmlDeviceGetName(handle) @classmethod @with_nvml_context def log_warnings(cls): device_ids: int = pynvml.nvmlDeviceGetCount() if device_ids > 1: device_names = [cls._get_physical_device_name(i) for i in range(device_ids)] if ( len(set(device_names)) > 1 and os.environ.get("CUDA_DEVICE_ORDER") != "PCI_BUS_ID" ): logger.warning( "Detected different devices in the system: %s. Please" " make sure to set `CUDA_DEVICE_ORDER=PCI_BUS_ID` to " "avoid unexpected behavior.", ", ".join(device_names), ) class NonNvmlCudaPlatform(CudaPlatformBase): @classmethod @cache def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: major, minor = torch.cuda.get_device_capability(device_id) return DeviceCapability(major=major, minor=minor) @classmethod def get_device_name(cls, device_id: int = 0) -> str: return torch.cuda.get_device_name(device_id) @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory @classmethod def is_fully_connected(cls, physical_device_ids: list[int]) -> bool: logger.exception( "NVLink detection not possible, as context support was" " not found. Assuming no NVLink available." ) return False # Autodetect either NVML-enabled or non-NVML platform # based on whether NVML is available. nvml_available = False try: try: pynvml.nvmlInit() nvml_available = True except Exception: # On Jetson, NVML is not supported. nvml_available = False finally: if nvml_available: pynvml.nvmlShutdown() CudaPlatform = NvmlCudaPlatform if nvml_available else NonNvmlCudaPlatform CudaPlatform.log_warnings()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/xpu.py
vllm/platforms/xpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib import os from typing import TYPE_CHECKING, Optional import torch from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig else: VllmConfig = None logger = init_logger(__name__) class XPUPlatform(Platform): _enum = PlatformEnum.XPU device_name: str = "xpu" device_type: str = "xpu" dispatch_key: str = "XPU" # Intel XPU's device key is "GPU" for Ray. # see https://github.com/ray-project/ray/blob/6a5eb5865eeb9ccf058a79b44f107e327e360673/python/ray/_private/accelerators/intel_gpu.py#L20 # noqa: E501 ray_device_key: str = "GPU" dist_backend: str = "ccl" # ccl | xccl device_control_env_var: str = "ZE_AFFINITY_MASK" @classmethod def import_kernels(cls) -> None: # Do not import vllm._C with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: from vllm.v1.attention.backends.utils import set_kv_cache_layout set_kv_cache_layout("NHD") logger.info( "Setting VLLM_KV_CACHE_LAYOUT to 'NHD' for XPU; " "only NHD layout is supported by XPU attention kernels." ) if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on XPU.") if selected_backend == AttentionBackendEnum.TRITON_ATTN: logger.info_once("Using Triton backend.") return AttentionBackendEnum.TRITON_ATTN.get_path() elif selected_backend == AttentionBackendEnum.FLASH_ATTN: logger.info_once("Using Flash Attention backend.") return AttentionBackendEnum.FLASH_ATTN.get_path() elif selected_backend: raise ValueError( f"Invalid attention backend for {cls.device_name}, " f"with use_mla: {attn_selector_config.use_mla}" ) logger.info("Using Flash Attention backend.") return AttentionBackendEnum.FLASH_ATTN.get_path() @classmethod def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: # XPU only supports FLASH_ATTN for vision attention. return [ AttentionBackendEnum.FLASH_ATTN, ] @classmethod def get_vit_attn_backend( cls, head_size: int, dtype: torch.dtype, backend: Optional["AttentionBackendEnum"] = None, ) -> "AttentionBackendEnum": if backend is not None: assert backend in cls.get_supported_vit_attn_backends(), ( f"Backend {backend} is not supported for vit attention. " f"Supported backends are: " f"{cls.get_supported_vit_attn_backends()}." ) logger.info_once(f"Using backend {backend} for vit attention") return backend logger.info_once( f"Using backend {AttentionBackendEnum.FLASH_ATTN} for vit attention" ) return AttentionBackendEnum.FLASH_ATTN @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ torch.xpu.set_device(device) @classmethod def get_device_capability( cls, device_id: int = 0, ) -> DeviceCapability | None: # capacity format differs from cuda's and will cause unexpected # failure, so use None directly return None @classmethod def get_device_name(cls, device_id: int = 0) -> str: return torch.xpu.get_device_name(device_id) @classmethod def get_punica_wrapper(cls) -> str: xpu_use_triton_kernel = os.getenv("XPU_USE_TRITON_KERNEL", "0") == "1" if not xpu_use_triton_kernel: return "vllm.lora.punica_wrapper.punica_xpu.PunicaWrapperXPU" else: return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU" @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.xpu.get_device_properties(device_id) return device_props.total_memory @classmethod def inference_mode(cls): return torch.no_grad() @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config model_config = vllm_config.model_config # in V1(or with ipex chunked prefill) block_size is 64 if cache_config and cache_config.block_size is None: cache_config.block_size = 64 # lazy import to avoid circular import from vllm.config import CompilationMode, CUDAGraphMode compilation_config = vllm_config.compilation_config if compilation_config.compile_sizes is None: compilation_config.compile_sizes = [] assert compilation_config.cudagraph_mode == CUDAGraphMode.NONE, ( "CUDA graph mode should be NONE on XPU" ) if vllm_config.lora_config is not None: compilation_config.mode = CompilationMode.NONE # decrease triton kernel compilation scratch space for speculative decoding if vllm_config.speculative_config is not None: os.environ["IGC_ForceOCLSIMDWidth"] = "16" # noqa: SIM112 # check and update parallel config parallel_config = vllm_config.parallel_config # Only override worker_cls if it's still the default "auto" # This allows custom workers (like vllm-omni workers) to be used on XPU if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.v1.worker.xpu_worker.XPUWorker" if vllm_config.kv_transfer_config is not None: vllm_config.kv_transfer_config.enable_permute_local_kv = True if model_config and model_config.use_mla: logger.info( "MLA is enabled on a non-GPU platform; forcing chunked " "prefill and prefix caching to be disabled." ) vllm_config.scheduler_config.enable_chunked_prefill = False vllm_config.scheduler_config.max_num_batched_tokens = max( vllm_config.model_config.max_model_len, vllm_config.scheduler_config.DEFAULT_MAX_NUM_BATCHED_TOKENS, ) @classmethod def support_hybrid_kv_cache(cls) -> bool: return True @classmethod def support_static_graph_mode(cls) -> bool: return False @classmethod def is_pin_memory_available(cls): return True @classmethod def get_current_memory_usage( cls, device: torch.types.Device | None = None ) -> float: torch.xpu.reset_peak_memory_stats(device) return torch.xpu.max_memory_allocated(device) @classmethod def fp8_dtype(cls) -> torch.dtype: return torch.float8_e5m2 @classmethod def is_data_center_gpu(cls) -> bool: device_name = cls.get_device_name().lower() return device_name.count("data center gpu") > 0 @classmethod def get_device_communicator_cls(cls) -> str: return "vllm.distributed.device_communicators.xpu_communicator.XpuCommunicator" # noqa @classmethod def device_count(cls) -> int: return torch.xpu.device_count() @classmethod def check_if_supports_dtype(cls, dtype: torch.dtype): if dtype == torch.bfloat16: # noqa: SIM102 device_name = cls.get_device_name().lower() # client gpu a770 if device_name.count("a770") > 0: raise ValueError( "Intel Arc A770 have bfloat16 accuracy known issue. " "You can use float16 instead by explicitly setting the " "`dtype` flag in CLI, for example: --dtype=half." ) @classmethod def opaque_attention_op(cls) -> bool: return True @classmethod def insert_blocks_to_device( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: """Copy blocks from src_cache to dst_cache on XPU.""" _src_cache = src_cache[:, src_block_indices] dst_cache[:, dst_block_indices] = _src_cache.to(dst_cache.device) @classmethod def swap_out_blocks_to_host( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: """Copy blocks from XPU to host (CPU).""" _src_cache = src_cache[:, src_block_indices] dst_cache[:, dst_block_indices] = _src_cache.cpu()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/cpu.py
vllm/platforms/cpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import glob import json import os import platform import subprocess import sys from dataclasses import dataclass from typing import TYPE_CHECKING import psutil import regex as re import torch from vllm import envs from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from .interface import CpuArchEnum, Platform, PlatformEnum logger = init_logger(__name__) if TYPE_CHECKING: from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig else: VllmConfig = None def get_max_threads(pid=0): if hasattr(os, "sched_getaffinity"): return len(os.sched_getaffinity(pid)) elif platform.system() == "Darwin": return os.cpu_count() else: raise NotImplementedError("Unsupported OS") @dataclass class LogicalCPUInfo: id: int = -1 physical_core: int = -1 numa_node: int = -1 @classmethod def _int(cls, value: str) -> int: try: int_value = int(value) except Exception: int_value = -1 return int_value @staticmethod def json_decoder(obj_dict: dict): id = obj_dict.get("cpu") physical_core = obj_dict.get("core") numa_node = obj_dict.get("node") if not (id is None or physical_core is None or numa_node is None): return LogicalCPUInfo( id=LogicalCPUInfo._int(id), physical_core=LogicalCPUInfo._int(physical_core), numa_node=LogicalCPUInfo._int(numa_node), ) else: return obj_dict class CpuPlatform(Platform): _enum = PlatformEnum.CPU device_name: str = "cpu" device_type: str = "cpu" dispatch_key: str = "CPU" dist_backend: str = "gloo" device_control_env_var = "CPU_VISIBLE_MEMORY_NODES" @property def supported_dtypes(self) -> list[torch.dtype]: if self.get_cpu_architecture() == CpuArchEnum.POWERPC: return [torch.bfloat16, torch.float32] elif self.get_cpu_architecture() == CpuArchEnum.ARM and sys.platform.startswith( "darwin" ): if ( subprocess.check_output( ["sysctl -n hw.optional.arm.FEAT_BF16"], shell=True ).strip() == b"1" ): return [torch.bfloat16, torch.float16, torch.float32] return [torch.float16, torch.float32] elif self.get_cpu_architecture() == CpuArchEnum.RISCV: # Workaround for Issue #25655: RISC-V scheduler bug with float16 # # Background: # - RISC-V currently uses scalar code path # - There is a latent bug in the vLLM scheduler that provides # invalid # physical_block_idx values under certain conditions # - This bug causes segmentation faults when using float16 # dtype on RISC-V # - Testing shows that forcing float32 successfully bypasses # this issue # # Technical details: # - The bug manifests as out-of-bounds physical_block_idx in # block_tables # - Only occurs on RISC-V hardware # tested on Sophgo SG2044 # - Does not reproduce on x86 or other architectures # - Root cause is in Python-level scheduling logic, # not C++ kernels # # This is a temporary workaround until the scheduler bug is fixed. # See: https://github.com/vllm-project/vllm/issues/25655 return [torch.float32] # x86/aarch64 CPU has supported both bf16 and fp16 natively. return [torch.bfloat16, torch.float16, torch.float32] @classmethod def get_device_name(cls, device_id: int = 0) -> str: return "cpu" @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: if selected_backend and selected_backend != AttentionBackendEnum.CPU_ATTN: logger.info("Cannot use %s backend on CPU.", selected_backend) if attn_selector_config.use_mla: raise NotImplementedError("MLA is not supported on CPU.") if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on CPU.") return AttentionBackendEnum.CPU_ATTN.get_path() @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: from vllm.utils.mem_constants import GiB_bytes kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE node_dir = "/sys/devices/system/node" if kv_cache_space is None: nodes = ( [d for d in os.listdir(node_dir) if d.startswith("node")] if os.path.exists(node_dir) else [] ) num_numa_nodes = len(nodes) or 1 free_cpu_memory = psutil.virtual_memory().total // num_numa_nodes DEFAULT_CPU_MEM_UTILIZATION = 0.5 kv_cache_space = int(free_cpu_memory * DEFAULT_CPU_MEM_UTILIZATION) kv_cache_space_gib = kv_cache_space / GiB_bytes logger.warning_once( "VLLM_CPU_KVCACHE_SPACE not set. Using " f"{kv_cache_space_gib:.2f} GiB for KV cache." ) else: kv_cache_space *= GiB_bytes return kv_cache_space @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ torch.cpu.set_device(device) @classmethod def inference_mode(cls): return torch.no_grad() @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: model_config = vllm_config.model_config if model_config is not None: model_config.disable_cascade_attn = True cache_config = vllm_config.cache_config if cache_config.block_size is None: cache_config.block_size = 128 if cache_config.block_size % 32 != 0: logger.warning( "CPU backend prefers block_size is multiples of 32, " "otherwise the performance is not optimized." ) scheduler_config = vllm_config.scheduler_config # async scheduling is not required on CPU scheduler_config.async_scheduling = False if ( scheduler_config.enable_chunked_prefill or cache_config.enable_prefix_caching ) and cache_config.cache_dtype != "auto": raise RuntimeError( "Chunked-prefill and prefix-cache on the CPU " "backend is not compatible with FP8 KV cache." ) if cache_config.cache_dtype != "auto": logger.warning( "CPU backend doesn't support KV cache quantization fallback to auto." ) cache_config.cache_dtype = "auto" cache_config.cpu_kvcache_space_bytes = CpuPlatform.get_device_total_memory() parallel_config = vllm_config.parallel_config if ( parallel_config.world_size > 1 and parallel_config.distributed_executor_backend is not None and parallel_config.distributed_executor_backend != "mp" ): logger.warning( ( "%s is not supported on CPU, fallback to mp " "distributed executor backend." ), parallel_config.distributed_executor_backend, ) parallel_config.distributed_executor_backend = "mp" if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.v1.worker.cpu_worker.CPUWorker" # Disable DBO if parallel_config.enable_dbo: logger.warning("Dual-Batch Overlap is not supported on CPU, disabled.") parallel_config.enable_dbo = False # Note: workaround for v1 gpu_model_runner from vllm.config import CompilationMode vllm_config.compilation_config.cudagraph_capture_sizes = [] compilation_config = vllm_config.compilation_config if vllm_config.compilation_config.mode == CompilationMode.VLLM_COMPILE: # Note: vLLM V1 is using PIECEWISE level compilation, which will # take time to compile kernels just-in-time with the inductor # backend. For CPU CI tests, most of them are executed fast and # compilations consume too much time, even with torch compile # cache. So use VLLM_CPU_CI_ENV to indicate the CI environment, # and just execute model with dynamo + eager mode to save time. # VLLM_CPU_CI_ENV is only used as an internal variable. if os.environ.get("VLLM_CPU_CI_ENV", "0") != "0": backend = "eager" else: backend = "inductor" compilation_config.mode = CompilationMode.DYNAMO_TRACE_ONCE compilation_config.backend = backend compilation_config.inductor_compile_config.update( { "dce": True, "size_asserts": False, "nan_asserts": False, "epilogue_fusion": True, } ) if vllm_config.lora_config is not None: compilation_config.mode = CompilationMode.NONE assert vllm_config.device_config.device_type == "cpu" # # Environment variables for CPU executor # os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" # Note: to avoid the error 'nthreads cannot be larger than environment # variable "NUMEXPR_MAX_THREADS" (64)'. os.environ["NUMEXPR_MAX_THREADS"] = str(get_max_threads()) if envs.VLLM_CPU_OMP_THREADS_BIND != "nobind": # Set default threads num for OpenMP parallel os.environ["OMP_NUM_THREADS"] = str(torch.get_num_threads()) else: # In this case, setting the OpenMP configuration via # OMP_NUM_THREADS is up to the user. logger.info("Disabling binding processes to CPU cores...") # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" # Disable multi-stream for shared experts as no Stream on CPU os.environ["VLLM_DISABLE_SHARED_EXPERTS_STREAM"] = "1" # Intel OpenMP setting ld_preload_str = os.getenv("LD_PRELOAD", "") if "libiomp5.so" in ld_preload_str: # The time(milliseconds) that a thread should wait after # completing the execution of a parallel region, before sleeping. os.environ["KMP_BLOCKTIME"] = "1" # Prevents the CPU to run into low performance state os.environ["KMP_TPAUSE"] = "0" # Provides fine granularity parallelism os.environ["KMP_FORKJOIN_BARRIER_PATTERN"] = "dist,dist" os.environ["KMP_PLAIN_BARRIER_PATTERN"] = "dist,dist" os.environ["KMP_REDUCTION_BARRIER_PATTERN"] = "dist,dist" if ( platform.system() == "Linux" and Platform.get_cpu_architecture() in (CpuArchEnum.ARM, CpuArchEnum.POWERPC) and not ("libomp" in ld_preload_str or "libgomp" in ld_preload_str) ): # We need to LD_PRELOAD PyTorch's libgomp, otherwise only # one core will be properly utilized when we thread-bind # See: https://github.com/vllm-project/vllm/issues/27369 # TODO: Remove once: # https://github.com/pytorch/pytorch/issues/166087 is fixed # We need to find the location of PyTorch's libgomp torch_pkg = os.path.dirname(torch.__file__) site_root = os.path.dirname(torch_pkg) # Search both torch.libs and torch/lib - See: https://github.com/vllm-project/vllm/issues/30470 torch_libs_paths = [ os.path.join(site_root, "torch.libs"), os.path.join(torch_pkg, "lib"), ] pytorch_libgomp_so_candidates = [] for torch_libs in torch_libs_paths: pytorch_libgomp_so_candidates.extend( glob.glob(os.path.join(torch_libs, "libgomp*.so*")) ) if pytorch_libgomp_so_candidates: pytorch_libgomp_so = pytorch_libgomp_so_candidates[0] if ld_preload_str: ld_preload_str += ":" ld_preload_str += pytorch_libgomp_so os.environ["LD_PRELOAD"] = ld_preload_str # To hint IPEX uses shared memory based AllReduce os.environ["LOCAL_WORLD_SIZE"] = str( vllm_config.parallel_config.tensor_parallel_size ) if model_config is not None and model_config.use_mla: logger.info( "MLA is enabled on a non-GPU platform; forcing chunked " "prefill and prefix caching to be disabled." ) vllm_config.scheduler_config.enable_chunked_prefill = False vllm_config.scheduler_config.max_num_batched_tokens = max( vllm_config.model_config.max_model_len, vllm_config.scheduler_config.DEFAULT_MAX_NUM_BATCHED_TOKENS, ) @classmethod def get_allowed_cpu_core_node_list(cls) -> tuple[list[int], list[LogicalCPUInfo]]: assert platform.system() == "Linux" # Init LogicalCPUInfo from lscpu lscpu_output = subprocess.check_output( "lscpu -J -e=CPU,CORE,NODE", shell=True, text=True ) lscpu_output = re.sub(r'"node":\s*-\s*(,|\n)', r'"node": 0\1', lscpu_output) logical_cpu_list: list[LogicalCPUInfo] = json.loads( lscpu_output, object_hook=LogicalCPUInfo.json_decoder )["cpus"] # Filter CPUs with invalid attributes logical_cpu_list = [ x for x in logical_cpu_list if -1 not in (x.id, x.physical_core, x.numa_node) ] # Filter allowed CPUs if hasattr(os, "sched_getaffinity"): allowed_cpu_id_list = os.sched_getaffinity(0) else: raise NotImplementedError("Unsupported OS") logical_cpu_list = [x for x in logical_cpu_list if x.id in allowed_cpu_id_list] # Get allowed NUMA nodes allowed_numa_nodes = set() for x in logical_cpu_list: allowed_numa_nodes.add(x.numa_node) # type: ignore allowed_numa_nodes_list = sorted(allowed_numa_nodes) env_key = CpuPlatform.device_control_env_var if env_key in os.environ and os.environ[env_key] != "": visible_nodes = [int(s) for s in os.environ[env_key].split(",")] allowed_numa_nodes_list = [ x for x in sorted(list(set(visible_nodes))) if x in allowed_numa_nodes ] return allowed_numa_nodes_list, logical_cpu_list @classmethod def is_pin_memory_available(cls) -> bool: return False @classmethod def get_punica_wrapper(cls) -> str: return "vllm.lora.punica_wrapper.punica_cpu.PunicaWrapperCPU" @classmethod def get_device_communicator_cls(cls) -> str: """ Get device specific communicator class for distributed communication. """ return "vllm.distributed.device_communicators.cpu_communicator.CpuCommunicator" # noqa @classmethod def supports_structured_output(cls) -> bool: return True @classmethod def opaque_attention_op(cls) -> bool: return True @classmethod def support_hybrid_kv_cache(cls) -> bool: return True
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/interface.py
vllm/platforms/interface.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib import enum import os import platform import random import sys from datetime import timedelta from typing import TYPE_CHECKING, Any, NamedTuple, Optional import numpy as np import torch from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger if TYPE_CHECKING: from torch.distributed import PrefixStore, ProcessGroup from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig from vllm.inputs import ProcessorInputs, PromptType from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = object logger = init_logger(__name__) def in_wsl() -> bool: # Reference: https://github.com/microsoft/WSL/issues/4071 return "microsoft" in " ".join(platform.uname()).lower() class PlatformEnum(enum.Enum): CUDA = enum.auto() ROCM = enum.auto() TPU = enum.auto() XPU = enum.auto() CPU = enum.auto() OOT = enum.auto() UNSPECIFIED = enum.auto() class CpuArchEnum(enum.Enum): X86 = enum.auto() ARM = enum.auto() POWERPC = enum.auto() S390X = enum.auto() RISCV = enum.auto() OTHER = enum.auto() UNKNOWN = enum.auto() class DeviceCapability(NamedTuple): major: int minor: int def __lt__(self, other: Any) -> bool: if not isinstance(other, DeviceCapability): return NotImplemented return (self.major, self.minor) < (other.major, other.minor) def __le__(self, other: Any) -> bool: if not isinstance(other, DeviceCapability): return NotImplemented return (self.major, self.minor) <= (other.major, other.minor) def __eq__(self, other: Any) -> bool: if not isinstance(other, DeviceCapability): return NotImplemented return (self.major, self.minor) == (other.major, other.minor) def __ge__(self, other: Any) -> bool: if not isinstance(other, DeviceCapability): return NotImplemented return (self.major, self.minor) >= (other.major, other.minor) def __gt__(self, other: Any) -> bool: if not isinstance(other, DeviceCapability): return NotImplemented return (self.major, self.minor) > (other.major, other.minor) def as_version_str(self) -> str: return f"{self.major}.{self.minor}" def to_int(self) -> int: """ Express device capability as an integer `<major><minor>`. It is assumed that the minor version is always a single digit. """ assert 0 <= self.minor < 10 return self.major * 10 + self.minor class Platform: _enum: PlatformEnum device_name: str device_type: str # available dispatch keys: # check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa # use "CPU" as a fallback for platforms not registered in PyTorch dispatch_key: str = "CPU" # available ray device keys: # https://github.com/ray-project/ray/blob/10ba5adadcc49c60af2c358a33bb943fb491a171/python/ray/_private/ray_constants.py#L438 # noqa # empty string means the device does not support ray ray_device_key: str = "" # platform-agnostic way to specify the device control environment variable, # .e.g. CUDA_VISIBLE_DEVICES for CUDA. # hint: search for "get_visible_accelerator_ids_env_var" in # https://github.com/ray-project/ray/tree/master/python/ray/_private/accelerators # noqa device_control_env_var: str = "VLLM_DEVICE_CONTROL_ENV_VAR_PLACEHOLDER" # The torch.compile backend for compiling simple and # standalone functions. The default value is "inductor" to keep # the same behavior as PyTorch. # NOTE: for the forward part of the model, vLLM has another separate # compilation strategy. simple_compile_backend: str = "inductor" # The backend used for distributed communication. dist_backend: str = "" supported_quantization: list[str] = [] additional_env_vars: list[str] = [] _global_graph_pool: Any | None = None @property def pass_key(self) -> str: """Inductor config key for the PassManager custom pass""" return "post_grad_custom_post_pass" @property def supported_dtypes(self) -> list[torch.dtype]: """Returns the supported dtypes for the current platform.""" # Be careful with the order of the dtypes. The first dtype will # be used as the default dtype fallback for the current platform, # when encountering unsupported dtypes in "auto" dtype. return [torch.bfloat16, torch.float16, torch.float32] def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA def is_rocm(self) -> bool: return self._enum == PlatformEnum.ROCM def is_tpu(self) -> bool: return self._enum == PlatformEnum.TPU def is_xpu(self) -> bool: return self._enum == PlatformEnum.XPU def is_cpu(self) -> bool: return self._enum == PlatformEnum.CPU def is_out_of_tree(self) -> bool: return self._enum == PlatformEnum.OOT def is_unspecified(self) -> bool: return self._enum == PlatformEnum.UNSPECIFIED def get_max_output_tokens(self, prompt_len: int) -> int: return sys.maxsize def is_cuda_alike(self) -> bool: """Stateless version of [torch.cuda.is_available][].""" return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM) def is_sleep_mode_available(self) -> bool: # TODO: Actually only mi3xx has the sleep mode support now # for ROCm, but currently we don't have a way to detect the # exact GPU model statelessly here. So we return True for # all ROCm platforms for now. return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM) @classmethod def get_pass_manager_cls(cls) -> str: """ Get the pass manager class for this platform. It will be registered as a custom pass under the current_platform.pass_key. """ return "vllm.compilation.pass_manager.PostGradPassManager" @classmethod def get_compile_backend(cls) -> str: """ Get the custom compile backend for current platform. """ return cls.simple_compile_backend @classmethod def device_id_to_physical_device_id(cls, device_id: int): # Treat empty device control env var as unset. This is a valid # configuration in Ray setups where the engine is launched in # a CPU-only placement group located on a GPU node. if ( cls.device_control_env_var in os.environ and os.environ[cls.device_control_env_var] != "" ): device_ids = os.environ[cls.device_control_env_var].split(",") physical_device_id = device_ids[device_id] return int(physical_device_id) else: return device_id @classmethod def import_kernels(cls) -> None: """Import any platform-specific C kernels.""" try: import vllm._C # noqa: F401 except ImportError as e: logger.warning("Failed to import from vllm._C: %r", e) with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: """Get the attention backend class of a device.""" return "" @classmethod def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: return [ AttentionBackendEnum.TORCH_SDPA, ] @classmethod def get_vit_attn_backend( cls, head_size: int, dtype: torch.dtype, backend: Optional["AttentionBackendEnum"] = None, ) -> "AttentionBackendEnum": """ Get the vision attention backend class of a device. NOTE: ViT Attention should be checked and override in the platform-specific implementation. we should not override this in any other places, like the model_executor/models/<model_name>.py. We check if the backend is None or not: 1. If not, check if the backend is supported by the platform. 2. If None, continue to the default selection logic. """ if backend is not None: assert backend in cls.get_supported_vit_attn_backends(), ( f"Backend {backend} is not supported for vit attention" f"Supported backends are: {cls.get_supported_vit_attn_backends()}" ) logger.info_once(f"Using backend {backend} for vit attention") return backend logger.info_once( f"Using default backend {AttentionBackendEnum.TORCH_SDPA} for vit attention" ) return AttentionBackendEnum.TORCH_SDPA @classmethod def get_device_capability( cls, device_id: int = 0, ) -> DeviceCapability | None: """Stateless version of [torch.cuda.get_device_capability][].""" return None @classmethod def has_device_capability( cls, capability: tuple[int, int] | int, device_id: int = 0, ) -> bool: """ Test whether this platform is compatible with a device capability. The `capability` argument can either be: - A tuple `(major, minor)`. - An integer `<major><minor>`. (See [`DeviceCapability.to_int`][vllm.platforms.interface.DeviceCapability.to_int]) """ current_capability = cls.get_device_capability(device_id=device_id) if current_capability is None: return False if isinstance(capability, tuple): return current_capability >= capability return current_capability.to_int() >= capability @classmethod def is_device_capability( cls, capability: tuple[int, int] | int, device_id: int = 0, ) -> bool: """ Test whether this platform has exactly the specified device capability. The `capability` argument can either be: - A tuple `(major, minor)`. - An integer `<major><minor>`. (See [`DeviceCapability.to_int`][vllm.platforms.interface.DeviceCapability.to_int]) """ current_capability = cls.get_device_capability(device_id=device_id) if current_capability is None: return False if isinstance(capability, tuple): return current_capability == capability return current_capability.to_int() == capability @classmethod def is_device_capability_family( cls, capability: int, device_id: int = 0, ) -> bool: """ Returns True if the device capability is any <major>.x. Mirrors CUDA 13 'family' architecture semantics (e.g. 10.x, 11.x, 12.x). """ current_capability = cls.get_device_capability(device_id=device_id) if current_capability is None: return False return (current_capability.to_int() // 10) == (capability // 10) @classmethod def get_device_name(cls, device_id: int = 0) -> str: """Get the name of a device.""" raise NotImplementedError @classmethod def get_device_uuid(cls, device_id: int = 0) -> str: """Get the uuid of a device, e.g. the PCI bus ID.""" raise NotImplementedError @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: """Get the total memory of a device in bytes.""" raise NotImplementedError @classmethod def inference_mode(cls): """A device-specific wrapper of `torch.inference_mode`. This wrapper is recommended because some hardware backends such as TPU do not support `torch.inference_mode`. In such a case, they will fall back to `torch.no_grad` by overriding this method. """ return torch.inference_mode(mode=True) @classmethod def seed_everything(cls, seed: int | None = None) -> None: """ Set the seed of each random module. `torch.manual_seed` will set seed on all devices. Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20 """ if seed is not None: random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ raise NotImplementedError @classmethod def pre_register_and_update( cls, parser: FlexibleArgumentParser | None = None ) -> None: """ Do some pre-registration or update action for the current platform. This function is called before global VllmConfig is initialized or cli arguments are parsed. It's used for out-of-tree platforms to register or update the configuration. For example, the out-of-tree quantization config can be imported and registered here dynamically. """ pass @classmethod def check_and_update_config(cls, vllm_config: "VllmConfig") -> None: """ Check and update the configuration for the current platform. It can raise an exception if the configuration is not compatible with the current platform, or it can update the configuration to make it compatible with the current platform. The config is passed by reference, so it can be modified in place. """ pass @classmethod def verify_model_arch(cls, model_arch: str) -> None: """ Verify whether the current platform supports the specified model architecture. - This will raise an Error or Warning based on the model support on the current platform. - By default all models are considered supported. """ pass @classmethod def verify_quantization(cls, quant: str) -> None: """ Verify whether the quantization is supported by the current platform. """ if cls.supported_quantization and quant not in cls.supported_quantization: raise ValueError( f"{quant} quantization is currently not supported in {cls.device_name}." ) @classmethod def get_cpu_architecture(cls) -> CpuArchEnum: """ Determine the CPU architecture of the current system. Returns CpuArchEnum indicating the architecture type. """ machine = platform.machine().lower() if machine in ("x86_64", "amd64", "i386", "i686"): return CpuArchEnum.X86 elif machine.startswith("arm") or machine.startswith("aarch"): return CpuArchEnum.ARM elif machine.startswith("ppc"): return CpuArchEnum.POWERPC elif machine == "s390x": return CpuArchEnum.S390X elif machine.startswith("riscv"): return CpuArchEnum.RISCV return CpuArchEnum.OTHER if machine else CpuArchEnum.UNKNOWN @classmethod def is_pin_memory_available(cls) -> bool: """Checks whether pin memory is available on the current platform.""" if in_wsl(): # Pinning memory in WSL is not supported. # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications logger.warning( "Using 'pin_memory=False' as WSL is detected. " "This may slow down the performance." ) return False return True @classmethod def get_current_memory_usage( cls, device: torch.types.Device | None = None ) -> float: """ Return the memory usage in bytes. """ raise NotImplementedError @classmethod def get_punica_wrapper(cls) -> str: """ Return the punica wrapper for current platform. """ raise NotImplementedError @classmethod def get_infinity_values(cls, dtype: torch.dtype) -> tuple[float, float]: """ Return the platform specific values for (-inf, inf) """ return float("-inf"), float("inf") @classmethod def can_update_inplace(cls) -> bool: """ Checks if the platform allows inplace memory updates """ return True @classmethod def get_lora_vocab_padding_size(cls) -> int: """ Returns how much padding the LoRA logits need for kernels """ return 256 @classmethod def get_device_communicator_cls(cls) -> str: """ Get device specific communicator class for distributed communication. """ return "vllm.distributed.device_communicators.base_device_communicator.DeviceCommunicatorBase" # noqa @classmethod def supports_mx(cls) -> bool: """ Returns whether the current platform supports MX types. """ return False @classmethod def supports_fp8(cls) -> bool: """ Returns whether the current platform supports FP8 types. """ return False @classmethod def is_fp8_fnuz(cls) -> bool: """ Returns whether the preferred FP8 type is FNUZ on the current platform. There are two representations of FP8, OCP FP8 and FNUZ FP8. The OCP specification can be found at https://tinyurl.com/b7jvwpft. The FNUZ specification can be found at https://tinyurl.com/5n6hwwu5. AMD's MI300 and MI325 have native hardware support for FNUZ. All other hardware has converged on the OCP FP8 standard. """ return False @classmethod def fp8_dtype(cls) -> torch.dtype: """ Returns the preferred FP8 type on the current platform. See the documentation for is_fp8_fnuz for details. """ return torch.float8_e4m3fn @classmethod def use_all_gather(cls) -> bool: """ Whether to use allgather in LogitsProcessor to gather the logits. """ return True @classmethod def use_custom_allreduce(cls) -> bool: """ Returns if custom allreduce is supported on the current platform """ return False @classmethod def opaque_attention_op(cls) -> bool: """ Returns True if we register attention as one giant opaque custom op on the current platform """ return False @classmethod def validate_request( cls, prompt: "PromptType", params: "SamplingParams | PoolingParams", processed_inputs: "ProcessorInputs", ) -> None: """Raises if this request is unsupported on this platform""" def __getattr__(self, key: str): device = getattr(torch, self.device_type, None) if device is not None and hasattr(device, key): return getattr(device, key) else: logger.warning( "Current platform %s does not have '%s' attribute.", self.device_type, key, ) return None def get_global_graph_pool(self) -> Any: """ Return the global graph pool for this platform. """ cls = self.__class__ if cls._global_graph_pool is None: cls._global_graph_pool = self.graph_pool_handle() return cls._global_graph_pool @classmethod def get_static_graph_wrapper_cls(cls) -> str: """ Get static graph wrapper class for static graph. """ return "vllm.compilation.base_static_graph.AbstractStaticGraphWrapper" @classmethod def stateless_init_device_torch_dist_pg( cls, backend: str, prefix_store: "PrefixStore", group_rank: int, group_size: int, timeout: timedelta, ) -> "ProcessGroup": """ Init platform-specific torch distributed process group. """ raise NotImplementedError @classmethod def check_if_supports_dtype(cls, dtype: torch.dtype): """ Check if the dtype is supported by the current platform. """ raise NotImplementedError @classmethod def support_hybrid_kv_cache(cls) -> bool: """ Returns if the hybrid kv cache is supported by the current platform. """ return False @classmethod def support_static_graph_mode(cls) -> bool: """ Returns if the graph mode is supported by the current platform. """ return False @classmethod def use_sync_weight_loader(cls) -> bool: """ Returns if the current platform needs to sync weight loader. """ return False @classmethod def make_synced_weight_loader(cls, original_weight_loader): """ Wrap the original weight loader to make it synced. """ if not cls.use_sync_weight_loader(): return original_weight_loader def _synced_weight_loader(param, *args, **kwargs): out = original_weight_loader(param, *args, **kwargs) if param.device != torch.device("cpu"): torch._sync(param) return out return _synced_weight_loader @classmethod def get_nixl_supported_devices(cls) -> dict[str, tuple[str, ...]]: """ Returns a mapping from device_type to a tuple of supported kv_buffer_device for nixl. """ return {} @classmethod def get_nixl_memory_type(cls) -> str | None: """ Returns the nixl memory type for the current platform. """ return None @classmethod def check_max_model_len(cls, max_model_len: int) -> int: """ Check max_model_len for the current platform. """ return max_model_len class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED device_type = ""
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/tpu.py
vllm/platforms/tpu.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib from typing import TYPE_CHECKING, Optional, cast import torch from tpu_info import device from vllm.attention.backends.registry import AttentionBackendEnum from vllm.inputs import ProcessorInputs, PromptType from vllm.logger import init_logger from .interface import Platform, PlatformEnum if TYPE_CHECKING: from typing import TypeAlias from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig from vllm.config.cache import BlockSize from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams ParamsType: TypeAlias = SamplingParams | PoolingParams else: BlockSize = None VllmConfig = None PoolingParams = None ParamsType = None logger = init_logger(__name__) USE_TPU_INFERENCE = False class TpuPlatform(Platform): _enum = PlatformEnum.TPU device_name: str = "tpu" device_type: str = "tpu" dispatch_key: str = "XLA" ray_device_key: str = "TPU" dist_backend: str = "gloo" device_control_env_var: str = "TPU_VISIBLE_CHIPS" simple_compile_backend: str = "openxla" supported_quantization: list[str] = ["fp8", "tpu_int8", "compressed-tensors"] additional_env_vars: list[str] = ["TPU_CHIPS_PER_HOST_BOUNDS", "TPU_HOST_BOUNDS"] @classmethod def import_kernels(cls) -> None: # Do not import vllm._C with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on TPU.") if selected_backend != AttentionBackendEnum.PALLAS: logger.info("Cannot use %s backend on TPU.", selected_backend) logger.info("Using Pallas V1 backend.") return AttentionBackendEnum.PALLAS.get_path() @classmethod def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: return [ AttentionBackendEnum.PALLAS, ] @classmethod def get_vit_attn_backend( cls, head_size: int, dtype: torch.dtype, backend: Optional["AttentionBackendEnum"] = None, ) -> "AttentionBackendEnum": if backend is not None: assert backend in cls.get_supported_vit_attn_backends(), ( f"Backend {backend} is not supported for vit attention" f"Supported backends are: {cls.get_supported_vit_attn_backends()}." ) logger.info_once(f"Using backend {backend} for vit attention.") return backend logger.info_once( f"Using default backend {AttentionBackendEnum.PALLAS} for vit attention." ) return AttentionBackendEnum.PALLAS @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ torch.tpu.set_device(device) @classmethod def get_device_name(cls, device_id: int = 0) -> str: chip_type, _ = device.get_local_chips() return f"TPU {chip_type.name}" @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError @classmethod def get_punica_wrapper(cls) -> str: return "vllm.lora.punica_wrapper.punica_tpu.PunicaWrapperTPU" @classmethod def get_infinity_values(cls, dtype: torch.dtype) -> tuple[float, float]: return torch.finfo(dtype).min, torch.finfo(dtype).max @classmethod def can_update_inplace(cls): return False @classmethod def get_lora_vocab_padding_size(cls) -> int: return 1 @classmethod def inference_mode(cls): return torch.no_grad() @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: from vllm.config import CompilationMode, CUDAGraphMode cache_config = vllm_config.cache_config # For v0, the default block size is 16. if cache_config and cache_config.block_size is None: cache_config.block_size = cast(BlockSize, 16) compilation_config = vllm_config.compilation_config # TPU only supports DYNAMO_TRACE_ONCE compilation mode if compilation_config.mode != CompilationMode.DYNAMO_TRACE_ONCE: logger.info( "[TPU] Forcing DYNAMO_TRACE_ONCE compilation mode, and\ disabling cudagraph." ) compilation_config.mode = CompilationMode.DYNAMO_TRACE_ONCE if ( compilation_config.cudagraph_mode is None or compilation_config.cudagraph_mode.max_cudagraph_mode() != CUDAGraphMode.NONE ): logger.info( "[TPU] CUDA graph is not supported on TPU, disabling cudagraphs." ) compilation_config.cudagraph_mode = CUDAGraphMode.NONE if compilation_config.backend == "": compilation_config.backend = "openxla" assert vllm_config.speculative_config is None, ( "TPU does not support speculative decoding" ) model_config = vllm_config.model_config if model_config is not None and model_config.dtype in ( torch.float16, torch.float32, ): logger.warning( "The TPU backend currently does not support %s. " "Using bfloat16 instead.", model_config.dtype, ) model_config.dtype = torch.bfloat16 from vllm.v1.attention.backends.pallas import PallasAttentionBackend cache_config.block_size = PallasAttentionBackend.get_page_size(vllm_config) # type: ignore[assignment] parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.v1.worker.tpu_worker.TPUWorker" assert not vllm_config.speculative_config, ( "Speculative decoding is not yet supported for TPU backend" ) if ( scheduler_config.is_multimodal_model and not scheduler_config.disable_chunked_mm_input ): logger.warning( "TPU does not support running Multimodal models" " without setting `--disable_chunked_mm_input`. " "Forcing --disable_chunked_mm_input." ) scheduler_config.disable_chunked_mm_input = True if model_config and model_config.use_mla: logger.info( "MLA is enabled on a non-GPU platform; forcing chunked " "prefill and prefix caching to be disabled." ) vllm_config.scheduler_config.enable_chunked_prefill = False vllm_config.scheduler_config.max_num_batched_tokens = max( vllm_config.model_config.max_model_len, vllm_config.scheduler_config.DEFAULT_MAX_NUM_BATCHED_TOKENS, ) @classmethod def is_pin_memory_available(cls): logger.warning("Pin memory is not supported on TPU.") return False @classmethod def get_device_communicator_cls(cls) -> str: return "vllm.distributed.device_communicators.tpu_communicator.TpuCommunicator" # noqa @classmethod def validate_request( cls, prompt: PromptType, params: ParamsType, processed_inputs: ProcessorInputs, ) -> None: """Raises if this request is unsupported on this platform""" from vllm.sampling_params import SamplingParams, SamplingType if ( isinstance(params, SamplingParams) and params.sampling_type == SamplingType.RANDOM_SEED ): raise ValueError("Torch XLA does not support per-request seed.") @classmethod @torch.compile(backend="openxla") def insert_blocks_to_device( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: torch.ops.xla.dynamo_set_buffer_donor_(dst_cache, True) dst_cache[dst_block_indices] = src_cache[src_block_indices].to(dst_cache.device) @classmethod @torch.compile(backend="openxla") def swap_out_blocks_to_host( cls, src_cache: torch.Tensor, dst_cache: torch.Tensor, src_block_indices: torch.Tensor, dst_block_indices: torch.Tensor, ) -> None: """tpu blocks to cpu blocks""" torch.ops.xla.dynamo_set_buffer_donor_(src_cache, True) dst_cache[dst_block_indices] = src_cache[src_block_indices].cpu() @classmethod def use_sync_weight_loader(cls) -> bool: return True @classmethod def check_max_model_len(cls, max_model_len: int) -> int: """ Check max_model_len for the current platform. """ logger.warning( "--max-model-len is not specified, " "it's currently using model's default length %d, " "which might be too large." "Please input with --max-model-len based on your " "request input length and output length, to avoid " "unnecessary degradation.", max_model_len, ) return max_model_len try: from tpu_inference.platforms import ( TpuPlatform as TpuInferencePlatform, ) TpuPlatform = TpuInferencePlatform # type: ignore USE_TPU_INFERENCE = True except ImportError: logger.info("tpu_inference not found, using vLLM's TpuPlatform") pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/rocm.py
vllm/platforms/rocm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os from functools import cache, lru_cache, wraps from typing import TYPE_CHECKING, Optional import torch import vllm.envs as envs from vllm.attention.backends.abstract import AttentionType from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from vllm.utils.torch_utils import cuda_device_count_stateless from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig logger = init_logger(__name__) try: from amdsmi import ( AmdSmiException, amdsmi_get_gpu_asic_info, amdsmi_get_processor_handles, amdsmi_init, amdsmi_shut_down, amdsmi_topo_get_link_type, ) except ImportError as e: logger.warning("Failed to import from amdsmi with %r", e) try: import vllm._C # noqa: F401 except ImportError as e: logger.warning("Failed to import from vllm._C with %r", e) # import custom ops, trigger op registration try: import vllm._rocm_C # noqa: F401 except ImportError as e: logger.warning("Failed to import from vllm._rocm_C with %r", e) # Models not supported by ROCm. _ROCM_UNSUPPORTED_MODELS: list[str] = [] # Models partially supported by ROCm. # Architecture -> Reason. _ROCM_SWA_REASON = () _ROCM_PARTIALLY_SUPPORTED_MODELS: dict[str, str] = {} _ROCM_DEVICE_ID_NAME_MAP: dict[str, str] = { "0x74a0": "AMD_Instinct_MI300A", "0x74a1": "AMD_Instinct_MI300X", "0x74b5": "AMD_Instinct_MI300X", # MI300X VF "0x74a2": "AMD_Instinct_MI308X", "0x74a5": "AMD_Instinct_MI325X", "0x74b9": "AMD_Instinct_MI325X", # MI325X VF "0x74a9": "AMD_Instinct_MI300X_HF", "0x74bd": "AMD_Instinct_MI300X_HF", "0x744c": "AMD_Radeon_RX7900XTX", } # Prevent use of clashing `{CUDA/HIP}_VISIBLE_DEVICES` if "HIP_VISIBLE_DEVICES" in os.environ: val = os.environ["HIP_VISIBLE_DEVICES"] if cuda_val := os.environ.get("CUDA_VISIBLE_DEVICES", None): assert val == cuda_val else: os.environ["CUDA_VISIBLE_DEVICES"] = val # AMDSMI utils # Note that NVML is not affected by `{CUDA/HIP}_VISIBLE_DEVICES`, # all the related functions work on real physical device ids. # the major benefit of using AMDSMI is that it will not initialize CUDA def with_amdsmi_context(fn): @wraps(fn) def wrapper(*args, **kwargs): amdsmi_init() try: return fn(*args, **kwargs) finally: amdsmi_shut_down() return wrapper @cache def on_gfx1x() -> bool: GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName return any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"]) @cache def on_mi3xx() -> bool: GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName return any(arch in GPU_ARCH for arch in ["gfx942", "gfx950"]) @cache def on_gfx9() -> bool: GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName return any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"]) @cache def on_gfx950() -> bool: GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName return any(arch in GPU_ARCH for arch in ["gfx950"]) @cache def use_rocm_custom_paged_attention( qtype: torch.dtype, head_size: int, block_size: int, gqa_ratio: int, max_seq_len: int, sliding_window: int, kv_cache_dtype: str, alibi_slopes: torch.Tensor | None = None, sinks: torch.Tensor | None = None, ) -> bool: GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName ON_GFX9 = any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"]) ON_GFX11_GFX12 = any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"]) # custom paged attn always supported on V0. On V1, requires sliding window # disabled due to observed numerical discrepancy. if ON_GFX9: return ( (sliding_window == 0 or sliding_window == (-1, -1)) and (qtype == torch.half or qtype == torch.bfloat16) and (head_size == 64 or head_size == 128) and (block_size == 16 or block_size == 32) and (gqa_ratio >= 1 and gqa_ratio <= 16) and max_seq_len <= 128 * 1024 and (envs.VLLM_ROCM_CUSTOM_PAGED_ATTN) and sinks is None ) else: return ( ON_GFX11_GFX12 and (sliding_window == 0 or sliding_window == (-1, -1)) and (qtype == torch.half or qtype == torch.bfloat16) and head_size == 128 and block_size == 16 and (gqa_ratio >= 3 and gqa_ratio <= 16) and max_seq_len <= 128 * 1024 and alibi_slopes is None and kv_cache_dtype == "auto" and envs.VLLM_ROCM_CUSTOM_PAGED_ATTN and sinks is None ) class RocmPlatform(Platform): _enum = PlatformEnum.ROCM device_name: str = "rocm" device_type: str = "cuda" dispatch_key: str = "CUDA" ray_device_key: str = "GPU" dist_backend: str = "nccl" # rocm shares the same device control env var as CUDA device_control_env_var: str = "CUDA_VISIBLE_DEVICES" supported_quantization: list[str] = [ "awq", "gptq", "fp8", "compressed-tensors", "fbgemm_fp8", "gguf", "quark", "ptpc_fp8", "mxfp4", "petit_nvfp4", "torchao", ] # bitsandbytes not supported on gfx9 (warp size 64 limitation) if not on_gfx9(): supported_quantization += ["bitsandbytes"] @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", attn_selector_config: "AttentionSelectorConfig", ) -> str: from vllm._aiter_ops import rocm_aiter_ops block_size = attn_selector_config.block_size kv_cache_dtype = attn_selector_config.kv_cache_dtype if attn_selector_config.use_sparse: if kv_cache_dtype and kv_cache_dtype.startswith("fp8"): raise ValueError( "ROCMAiterMLASparseBackend doesn't support fp8 kv_cache_dtype." ) assert block_size == 1, ( "Sparse MLA backend on ROCm only supports block size 1 for now." ) logger.info_once("Using Sparse MLA backend.") return AttentionBackendEnum.ROCM_AITER_MLA_SPARSE.get_path() if attn_selector_config.use_mla: if selected_backend is None: selected_backend = ( AttentionBackendEnum.ROCM_AITER_MLA if rocm_aiter_ops.is_mla_enabled() or block_size == 1 else AttentionBackendEnum.TRITON_MLA ) if selected_backend == AttentionBackendEnum.TRITON_MLA: if block_size != 1: logger.info_once("Using Triton MLA backend.") return AttentionBackendEnum.TRITON_MLA.get_path() raise ValueError( f" The selected backend, {selected_backend.name}," f"does not support block size {block_size}." ) if selected_backend == AttentionBackendEnum.ROCM_AITER_MLA: logger.info("Using AITER MLA backend.") return AttentionBackendEnum.ROCM_AITER_MLA.get_path() if selected_backend == AttentionBackendEnum.ROCM_AITER_TRITON_MLA: logger.info("Using AITER TRITON MLA backend.") return AttentionBackendEnum.ROCM_AITER_TRITON_MLA.get_path() raise ValueError( f" The selected backend, {selected_backend.name}," f"is not MLA type while requested for MLA backend." ) if selected_backend == AttentionBackendEnum.FLEX_ATTENTION: logger.info("Using FlexAttention backend.") return AttentionBackendEnum.FLEX_ATTENTION.get_path() if selected_backend == AttentionBackendEnum.TRITON_ATTN: logger.info("Using Triton Attention backend.") return AttentionBackendEnum.TRITON_ATTN.get_path() if selected_backend == AttentionBackendEnum.ROCM_ATTN: logger.info("Using Rocm Attention backend.") return AttentionBackendEnum.ROCM_ATTN.get_path() if selected_backend == AttentionBackendEnum.ROCM_AITER_FA: if on_gfx9(): logger.info("Using Aiter Flash Attention backend.") return AttentionBackendEnum.ROCM_AITER_FA.get_path() else: raise ValueError( f"The selected backend, {selected_backend.name}, " "is only supported on gfx9 architectures." ) if selected_backend == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN: logger.info("Using Aiter Unified Attention backend.") return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path() # Handle automatic backend selection based on environment variables if selected_backend is None: # Priority 1: Check for AITER Unified Attention (must check before MHA) if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION: logger.info("Using Aiter Unified Attention backend.") return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path() # Priority 2: Check for AITER MHA (Flash Attention) # Only use if explicitly enabled (not just VLLM_ROCM_USE_AITER=1) if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA and on_gfx9(): logger.info("Using Aiter Flash Attention backend.") return AttentionBackendEnum.ROCM_AITER_FA.get_path() # Priority 3: Check for ROCM_ATTN (prefill-decode split) if envs.VLLM_V1_USE_PREFILL_DECODE_ATTENTION: logger.info("Using Rocm Attention backend.") return AttentionBackendEnum.ROCM_ATTN.get_path() # Priority 4: Check for AITER enabled without specific flags # This defaults to AITER FA only if MHA is not explicitly disabled if ( envs.VLLM_ROCM_USE_AITER and on_gfx9() and envs.VLLM_ROCM_USE_AITER_MHA is not False ): logger.info("Using Aiter Flash Attention backend.") return AttentionBackendEnum.ROCM_AITER_FA.get_path() # Priority 5: If model is Encoder-only self-attention type if ( attn_selector_config.attn_type is not None and attn_selector_config.attn_type == AttentionType.ENCODER_ONLY ): logger.info("Using FlexAttention backend.") return AttentionBackendEnum.FLEX_ATTENTION.get_path() # Default: Triton Unified Attention logger.info("Using Triton Attention backend.") return AttentionBackendEnum.TRITON_ATTN.get_path() raise RuntimeError( f"Attention backend {selected_backend.name} is not supported on " "ROCm. Note that V0 attention backends have been removed." ) @classmethod def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: return [ AttentionBackendEnum.FLASH_ATTN, AttentionBackendEnum.ROCM_AITER_FA, AttentionBackendEnum.TORCH_SDPA, ] @classmethod def get_vit_attn_backend( cls, head_size: int, dtype: torch.dtype, backend: Optional["AttentionBackendEnum"] = None, ) -> "AttentionBackendEnum": if backend is not None: assert backend in cls.get_supported_vit_attn_backends(), ( f"Backend {backend} is not supported for vit attention. " f"Supported backends are: {cls.get_supported_vit_attn_backends()}" ) logger.info_once(f"Using backend {backend} for vit attention") return backend from importlib.util import find_spec from vllm._aiter_ops import rocm_aiter_ops if rocm_aiter_ops.is_enabled(): logger.info_once("Using AITER Flash Attention backend for ViT model.") return AttentionBackendEnum.ROCM_AITER_FA if ( on_gfx9() and find_spec("flash_attn") is not None and (dtype == torch.float16 or dtype == torch.bfloat16) ): logger.info_once("Using Flash Attention backend for ViT model.") return AttentionBackendEnum.FLASH_ATTN logger.info_once("Using Torch SDPA backend for ViT model.") return AttentionBackendEnum.TORCH_SDPA @classmethod def set_device(cls, device: torch.device) -> None: """ Set the device for the current platform. """ torch.cuda.set_device(device) @classmethod @lru_cache(maxsize=8) def get_device_capability(cls, device_id: int = 0) -> DeviceCapability | None: major, minor = torch.cuda.get_device_capability(device_id) return DeviceCapability(major=major, minor=minor) @classmethod @with_amdsmi_context def is_fully_connected(cls, physical_device_ids: list[int]) -> bool: """ Query if the set of gpus are fully connected by xgmi (1 hop) """ handles = [amdsmi_get_processor_handles()[i] for i in physical_device_ids] for i, handle in enumerate(handles): for j, peer_handle in enumerate(handles): if i < j: try: link_type = amdsmi_topo_get_link_type(handle, peer_handle) # type is 2 for XGMI if link_type["hops"] != 1 or link_type["type"] != 2: return False except AmdSmiException as error: logger.error("AMD 1 hop XGMI detection failed.", exc_info=error) return False return True @classmethod @with_amdsmi_context @lru_cache(maxsize=8) def get_device_name(cls, device_id: int = 0) -> str: physical_device_id = cls.device_id_to_physical_device_id(device_id) handle = amdsmi_get_processor_handles()[physical_device_id] asic_info = amdsmi_get_gpu_asic_info(handle) device_name: str = asic_info["device_id"] if device_name in _ROCM_DEVICE_ID_NAME_MAP: return _ROCM_DEVICE_ID_NAME_MAP[device_name] return asic_info["market_name"] @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory @classmethod def check_and_update_config(cls, vllm_config: "VllmConfig") -> None: from vllm._aiter_ops import rocm_aiter_ops from vllm.config.compilation import CUDAGraphMode cache_config = vllm_config.cache_config compilation_config = vllm_config.compilation_config parallel_config = vllm_config.parallel_config is_eager_execution = compilation_config == CUDAGraphMode.NONE use_aiter_fused_moe = rocm_aiter_ops.is_fused_moe_enabled() use_aiter_rms_norm = rocm_aiter_ops.is_rmsnorm_enabled() use_aiter_fp8_linear = rocm_aiter_ops.is_linear_fp8_enabled() use_aiter_fused_se = rocm_aiter_ops.is_fusion_moe_shared_experts_enabled() if compilation_config.cudagraph_mode.has_full_cudagraphs(): # decode context parallel does not support full cudagraphs if parallel_config.decode_context_parallel_size > 1: logger.warning_once( "Decode context parallel (DCP) is enabled, which is " "incompatible with full CUDA graphs. " "Overriding cudagraph_mode to PIECEWISE." ) compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE # prefill context parallel do not support full cudagraphs elif parallel_config.prefill_context_parallel_size > 1: logger.warning_once( "Prefill context parallel (PCP) is enabled, which is " "incompatible with full CUDA graphs. " "Overriding cudagraph_mode to PIECEWISE." ) compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE if cache_config and cache_config.block_size is None: if ( envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION and envs.VLLM_ROCM_USE_AITER # NOTE: This block has been deprecated # or get_env_variable_attn_backend() # == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN # TODO: monitor https://github.com/vllm-project/vllm/pull/30396 # to see how we can transition to the new way of selecting # attention backends ): cache_config.block_size = 64 logger.warning( "[ROCM_AITER_UNIFIED_ATTN]: Setting kv cache block size to 64." ) else: cache_config.block_size = 16 if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker" # Aiter rms norm perform best when CUDA Graph capture is enabled. if ( use_aiter_rms_norm and not is_eager_execution and "-rms_norm" not in compilation_config.custom_ops ): compilation_config.custom_ops.append("+rms_norm") if use_aiter_fp8_linear and "-quant_fp8" not in compilation_config.custom_ops: compilation_config.custom_ops.append("+quant_fp8") if use_aiter_fused_se and "-grouped_topk" in compilation_config.custom_ops: logger.warning_once( "VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS is enabled, which " "requires the 'grouped_topk' custom op. Overriding the " "user-provided '-grouped_topk'." ) compilation_config.custom_ops.remove("-grouped_topk") # Ensure grouped_topk is always enabled when using AITER if # its not disabled by user if ( use_aiter_fused_moe and "+grouped_topk" not in compilation_config.custom_ops and "-grouped_topk" not in compilation_config.custom_ops ): compilation_config.custom_ops.append("+grouped_topk") @classmethod def verify_model_arch(cls, model_arch: str) -> None: if model_arch in _ROCM_UNSUPPORTED_MODELS: raise ValueError( f"Model architecture '{model_arch}' is not supported by ROCm for now." ) if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS: msg = _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch] logger.warning( "Model architecture '%s' is partially supported by ROCm: %s", model_arch, msg, ) @classmethod def verify_quantization(cls, quant: str) -> None: super().verify_quantization(quant) if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ: logger.warning( "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ" " is not set, enabling VLLM_USE_TRITON_AWQ." ) os.environ["VLLM_USE_TRITON_AWQ"] = "1" @classmethod def get_punica_wrapper(cls) -> str: return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU" @classmethod def get_current_memory_usage( cls, device: torch.types.Device | None = None ) -> float: torch.cuda.reset_peak_memory_stats(device) return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(device)[0] @classmethod def get_device_communicator_cls(cls) -> str: return ( "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator" # noqa ) @classmethod def supports_mx(cls) -> bool: gcn_arch = torch.cuda.get_device_properties(0).gcnArchName return any(gfx in gcn_arch for gfx in ["gfx95"]) @classmethod def supports_fp8(cls) -> bool: gcn_arch = torch.cuda.get_device_properties(0).gcnArchName return any(gfx in gcn_arch for gfx in ["gfx94", "gfx95", "gfx12"]) @classmethod def is_fp8_fnuz(cls) -> bool: # only device 0 is checked, this assumes MI300 platforms are homogeneous return "gfx94" in torch.cuda.get_device_properties(0).gcnArchName @classmethod def fp8_dtype(cls) -> torch.dtype: if cls.is_fp8_fnuz(): return torch.float8_e4m3fnuz else: return torch.float8_e4m3fn @classmethod def use_custom_allreduce(cls) -> bool: # We only enable custom allreduce for MI300 series gcn_arch = torch.cuda.get_device_properties(0).gcnArchName supported_archs = ["gfx94", "gfx95"] return any(gfx in gcn_arch for gfx in supported_archs) @classmethod def opaque_attention_op(cls) -> bool: return True @classmethod def is_navi(cls) -> bool: return "gfx1" in torch.cuda.get_device_properties(0).gcnArchName @classmethod def get_static_graph_wrapper_cls(cls) -> str: return "vllm.compilation.cuda_graph.CUDAGraphWrapper" @classmethod def device_count(cls) -> int: return cuda_device_count_stateless() @classmethod def check_if_supports_dtype(cls, dtype: torch.dtype): if dtype == torch.bfloat16: # noqa: SIM102 if not cls.has_device_capability(80): capability = cls.get_device_capability() gpu_name = cls.get_device_name() if capability is None: compute_str = "does not have a compute capability" else: version_str = capability.as_version_str() compute_str = f"has compute capability {version_str}" raise ValueError( "Bfloat16 is only supported on GPUs " "with compute capability of at least 8.0. " f"Your {gpu_name} GPU {compute_str}. " "You can use float16 instead by explicitly setting the " "`dtype` flag in CLI, for example: --dtype=half." ) @classmethod def support_hybrid_kv_cache(cls) -> bool: return True @classmethod def support_static_graph_mode(cls) -> bool: return True
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/platforms/__init__.py
vllm/platforms/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import logging import traceback from itertools import chain from typing import TYPE_CHECKING from vllm import envs from vllm.plugins import PLATFORM_PLUGINS_GROUP, load_plugins_by_group from vllm.utils.import_utils import resolve_obj_by_qualname from vllm.utils.torch_utils import supports_xccl from .interface import CpuArchEnum, Platform, PlatformEnum logger = logging.getLogger(__name__) def vllm_version_matches_substr(substr: str) -> bool: """ Check to see if the vLLM version matches a substring. """ from importlib.metadata import PackageNotFoundError, version try: vllm_version = version("vllm") except PackageNotFoundError as e: logger.warning( "The vLLM package was not found, so its version could not be " "inspected. This may cause platform detection to fail." ) raise e return substr in vllm_version def tpu_platform_plugin() -> str | None: logger.debug("Checking if TPU platform is available.") # Check for Pathways TPU proxy if envs.VLLM_TPU_USING_PATHWAYS: logger.debug("Confirmed TPU platform is available via Pathways proxy.") return "tpu_inference.platforms.tpu_platform.TpuPlatform" # Check for libtpu installation try: # While it's technically possible to install libtpu on a # non-TPU machine, this is a very uncommon scenario. Therefore, # we assume that libtpu is installed only if the machine # has TPUs. import libtpu # noqa: F401 logger.debug("Confirmed TPU platform is available.") return "vllm.platforms.tpu.TpuPlatform" except Exception as e: logger.debug("TPU platform is not available because: %s", str(e)) return None def cuda_platform_plugin() -> str | None: is_cuda = False logger.debug("Checking if CUDA platform is available.") try: from vllm.utils.import_utils import import_pynvml pynvml = import_pynvml() pynvml.nvmlInit() try: # NOTE: Edge case: vllm cpu build on a GPU machine. # Third-party pynvml can be imported in cpu build, # we need to check if vllm is built with cpu too. # Otherwise, vllm will always activate cuda plugin # on a GPU machine, even if in a cpu build. is_cuda = ( pynvml.nvmlDeviceGetCount() > 0 and not vllm_version_matches_substr("cpu") ) if pynvml.nvmlDeviceGetCount() <= 0: logger.debug("CUDA platform is not available because no GPU is found.") if vllm_version_matches_substr("cpu"): logger.debug( "CUDA platform is not available because vLLM is built with CPU." ) if is_cuda: logger.debug("Confirmed CUDA platform is available.") finally: pynvml.nvmlShutdown() except Exception as e: logger.debug("Exception happens when checking CUDA platform: %s", str(e)) if "nvml" not in e.__class__.__name__.lower(): # If the error is not related to NVML, re-raise it. raise e # CUDA is supported on Jetson, but NVML may not be. import os def cuda_is_jetson() -> bool: return os.path.isfile("/etc/nv_tegra_release") or os.path.exists( "/sys/class/tegra-firmware" ) if cuda_is_jetson(): logger.debug("Confirmed CUDA platform is available on Jetson.") is_cuda = True else: logger.debug("CUDA platform is not available because: %s", str(e)) return "vllm.platforms.cuda.CudaPlatform" if is_cuda else None def rocm_platform_plugin() -> str | None: is_rocm = False logger.debug("Checking if ROCm platform is available.") try: import amdsmi amdsmi.amdsmi_init() try: if len(amdsmi.amdsmi_get_processor_handles()) > 0: is_rocm = True logger.debug("Confirmed ROCm platform is available.") else: logger.debug("ROCm platform is not available because no GPU is found.") finally: amdsmi.amdsmi_shut_down() except Exception as e: logger.debug("ROCm platform is not available because: %s", str(e)) return "vllm.platforms.rocm.RocmPlatform" if is_rocm else None def xpu_platform_plugin() -> str | None: is_xpu = False logger.debug("Checking if XPU platform is available.") try: # installed IPEX if the machine has XPUs. import intel_extension_for_pytorch # noqa: F401 import torch if supports_xccl(): dist_backend = "xccl" else: dist_backend = "ccl" import oneccl_bindings_for_pytorch # noqa: F401 if hasattr(torch, "xpu") and torch.xpu.is_available(): is_xpu = True from vllm.platforms.xpu import XPUPlatform XPUPlatform.dist_backend = dist_backend logger.debug("Confirmed %s backend is available.", XPUPlatform.dist_backend) logger.debug("Confirmed XPU platform is available.") except Exception as e: logger.debug("XPU platform is not available because: %s", str(e)) return "vllm.platforms.xpu.XPUPlatform" if is_xpu else None def cpu_platform_plugin() -> str | None: is_cpu = False logger.debug("Checking if CPU platform is available.") try: is_cpu = vllm_version_matches_substr("cpu") if is_cpu: logger.debug( "Confirmed CPU platform is available because vLLM is built with CPU." ) if not is_cpu: import sys is_cpu = sys.platform.startswith("darwin") if is_cpu: logger.debug( "Confirmed CPU platform is available because the machine is MacOS." ) except Exception as e: logger.debug("CPU platform is not available because: %s", str(e)) return "vllm.platforms.cpu.CpuPlatform" if is_cpu else None builtin_platform_plugins = { "tpu": tpu_platform_plugin, "cuda": cuda_platform_plugin, "rocm": rocm_platform_plugin, "xpu": xpu_platform_plugin, "cpu": cpu_platform_plugin, } def resolve_current_platform_cls_qualname() -> str: platform_plugins = load_plugins_by_group(PLATFORM_PLUGINS_GROUP) activated_plugins = [] for name, func in chain(builtin_platform_plugins.items(), platform_plugins.items()): try: assert callable(func) platform_cls_qualname = func() if platform_cls_qualname is not None: activated_plugins.append(name) except Exception: pass activated_builtin_plugins = list( set(activated_plugins) & set(builtin_platform_plugins.keys()) ) activated_oot_plugins = list(set(activated_plugins) & set(platform_plugins.keys())) if len(activated_oot_plugins) >= 2: raise RuntimeError( "Only one platform plugin can be activated, but got: " f"{activated_oot_plugins}" ) elif len(activated_oot_plugins) == 1: platform_cls_qualname = platform_plugins[activated_oot_plugins[0]]() logger.info("Platform plugin %s is activated", activated_oot_plugins[0]) elif len(activated_builtin_plugins) >= 2: raise RuntimeError( "Only one platform plugin can be activated, but got: " f"{activated_builtin_plugins}" ) elif len(activated_builtin_plugins) == 1: platform_cls_qualname = builtin_platform_plugins[activated_builtin_plugins[0]]() logger.debug( "Automatically detected platform %s.", activated_builtin_plugins[0] ) else: platform_cls_qualname = "vllm.platforms.interface.UnspecifiedPlatform" logger.debug("No platform detected, vLLM is running on UnspecifiedPlatform") return platform_cls_qualname _current_platform = None _init_trace: str = "" if TYPE_CHECKING: current_platform: Platform def __getattr__(name: str): if name == "current_platform": # lazy init current_platform. # 1. out-of-tree platform plugins need `from vllm.platforms import # Platform` so that they can inherit `Platform` class. Therefore, # we cannot resolve `current_platform` during the import of # `vllm.platforms`. # 2. when users use out-of-tree platform plugins, they might run # `import vllm`, some vllm internal code might access # `current_platform` during the import, and we need to make sure # `current_platform` is only resolved after the plugins are loaded # (we have tests for this, if any developer violate this, they will # see the test failures). global _current_platform if _current_platform is None: platform_cls_qualname = resolve_current_platform_cls_qualname() _current_platform = resolve_obj_by_qualname(platform_cls_qualname)() global _init_trace _init_trace = "".join(traceback.format_stack()) return _current_platform elif name in globals(): return globals()[name] else: raise AttributeError(f"No attribute named '{name}' exists in {__name__}.") def __setattr__(name: str, value): if name == "current_platform": global _current_platform _current_platform = value elif name in globals(): globals()[name] = value else: raise AttributeError(f"No attribute named '{name}' exists in {__name__}.") __all__ = ["Platform", "PlatformEnum", "current_platform", "CpuArchEnum", "_init_trace"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/distributed/tpu_distributed_utils.py
vllm/distributed/tpu_distributed_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections import OrderedDict from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F import torch_xla.distributed.spmd as xs from torch.nn.parameter import Parameter from vllm.logger import init_logger from vllm.model_executor.layers.linear import ( ColumnParallelLinear, QKVParallelLinear, RowParallelLinear, ) logger = init_logger(__name__) class XlaQKVParallelLinear(nn.Module): def __init__(self, qkv_linear: nn.Module, mesh: Optional["xs.Mesh"] = None): super().__init__() assert isinstance(qkv_linear, QKVParallelLinear) self.skip_bias_add = qkv_linear.skip_bias_add self.return_bias = qkv_linear.return_bias assert qkv_linear.tp_size == 1, "TP > 1 is only supported under SPMD." self.q_weight: Parameter self.k_weight: Parameter self.v_weight: Parameter self.q_bias: Parameter | None self.k_bias: Parameter | None self.v_bias: Parameter | None self._load_weights_from_qkv_linear(qkv_linear) if mesh is not None: self._shard_weight(mesh) def _shard_weight(self, mesh: "xs.Mesh"): self.q_weight = Parameter(self.q_weight.to("xla"), requires_grad=False) self.k_weight = Parameter(self.k_weight.to("xla"), requires_grad=False) self.v_weight = Parameter(self.v_weight.to("xla"), requires_grad=False) xs.mark_sharding(self.q_weight, mesh, ("x", None)) xs.mark_sharding(self.k_weight, mesh, ("x", None)) xs.mark_sharding(self.v_weight, mesh, ("x", None)) if self.q_bias is not None: assert self.k_bias is not None and self.v_bias is not None, ( "QKVParallelLinear should have q, k, and v biases together." ) self.q_bias = Parameter(self.q_bias.to("xla"), requires_grad=False) xs.mark_sharding(self.q_bias, mesh, ("x",)) self.k_bias = Parameter(self.k_bias.to("xla"), requires_grad=False) xs.mark_sharding(self.k_bias, mesh, ("x",)) self.v_bias = Parameter(self.v_bias.to("xla"), requires_grad=False) xs.mark_sharding(self.v_bias, mesh, ("x",)) def _load_weights_from_qkv_linear(self, qkv_linear: nn.Module): q_proj_size, k_proj_size, _ = qkv_linear.output_sizes # The weight of qkv linear is a concatenation of q, k, and v weights # along the output dimension. qkv_weight = qkv_linear.weight.data.cpu() q_weight = Parameter(qkv_weight[:q_proj_size], requires_grad=False) k_weight = Parameter( qkv_weight[q_proj_size : q_proj_size + k_proj_size], requires_grad=False ) v_weight = Parameter( qkv_weight[q_proj_size + k_proj_size :], requires_grad=False ) self.register_parameter("q_weight", q_weight) self.register_parameter("k_weight", k_weight) self.register_parameter("v_weight", v_weight) if qkv_linear.bias is not None: q_bias = Parameter(qkv_linear.bias[:q_proj_size], requires_grad=False) k_bias = Parameter( qkv_linear.bias[q_proj_size : q_proj_size + k_proj_size], requires_grad=False, ) v_bias = Parameter( qkv_linear.bias[q_proj_size + k_proj_size :], requires_grad=False ) self.register_parameter("q_bias", q_bias) self.register_parameter("k_bias", k_bias) self.register_parameter("v_bias", v_bias) else: self.register_parameter("q_bias", None) self.register_parameter("k_bias", None) self.register_parameter("v_bias", None) def forward(self, input): # Same forward functionality as QKVParallelLinear, but doing qkv porj # separately. q_bias = self.q_bias if not self.skip_bias_add else None k_bias = self.k_bias if not self.skip_bias_add else None v_bias = self.v_bias if not self.skip_bias_add else None q_proj = F.linear(input, self.q_weight, q_bias) k_proj = F.linear(input, self.k_weight, k_bias) v_proj = F.linear(input, self.v_weight, v_bias) # The q/k/v projections will be split outside of the QKVParallelLinear. # Because we are replacing XlaQKVParallelLinear with the # QKVParallelLinear, we need to concatenate q, k, and v projections to # match the output shape of the QKVParallelLinear implementation even if # it seems to be redundant. # The concat and the following split will be noop, and should be # optimized away by the compiler. qkv_proj = torch.cat([q_proj, k_proj, v_proj], dim=-1) output_bias = ( torch.cat([q_bias, k_bias, v_bias], dim=-1) if self.skip_bias_add else None ) if not self.return_bias: return qkv_proj return qkv_proj, output_bias def partition_column_parallel_linear( layer: torch.nn.Module, mesh: xs.Mesh ) -> torch.nn.Module: assert isinstance(layer, ColumnParallelLinear) xs.mark_sharding(layer.weight, mesh, ("x", None)) logger.debug("Applied column-parallel sharding to %s", layer) return layer def partition_row_parallel_linear( layer: torch.nn.Module, mesh: xs.Mesh ) -> torch.nn.Module: assert isinstance(layer, RowParallelLinear) xs.mark_sharding(layer.weight, mesh, (None, "x")) logger.debug("Applied row-parallel sharding to %s", layer) return layer def partition_qkv_parallel_linear( layer: torch.nn.Module, mesh: xs.Mesh ) -> torch.nn.Module: assert isinstance(layer, QKVParallelLinear) xla_layer = XlaQKVParallelLinear(layer, mesh) logger.debug("Applied qkv parallel sharding to %s", layer) return xla_layer MODULE_TYPE_TO_WRAPPING_FUNC = OrderedDict( [ ("QKVParallelLinear", partition_qkv_parallel_linear), ("ColumnParallelLinear", partition_column_parallel_linear), ("RowParallelLinear", partition_row_parallel_linear), ] ) def get_fqn(module): # Get the fully qualified name of the module return module.__class__.__qualname__ def shard_model(model: torch.nn.Module, mesh: "xs.Mesh") -> None: """ Recursively check a PyTorch model and apply appropriate sharding based on the MODULE_TYPE_TO_WRAPPING_FUNC mapping. Args: model: torch.nn.Module to process mesh: An XLA SPMD mesh object used for sharding """ def _process_module(module, name=None, parent=None): for module_type, wrapping_func in MODULE_TYPE_TO_WRAPPING_FUNC.items(): if get_fqn(module) == module_type: wrapped_module = wrapping_func(module, mesh) assert parent is not None and name is not None, ( "Top Level module is not expected to be wrapped." ) if wrapped_module is not module: # Wrapped module and module are different py object. # The original module should be replaced by the # wrapped_module. logger.debug("replace %s with %s", module, wrapped_module) setattr(parent, name, wrapped_module) module = wrapped_module break for child_name, child_module in list(module.named_children()): _process_module(child_module, child_name, module) _process_module(model)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/distributed/kv_events.py
vllm/distributed/kv_events.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import queue import threading import time from abc import ABC, abstractmethod from collections import Counter, deque from collections.abc import Callable from dataclasses import asdict from itertools import count from queue import Queue from typing import Any import msgspec import zmq from vllm.config.kv_events import KVEventsConfig from vllm.logger import init_logger from vllm.v1.core.kv_cache_utils import ExternalBlockHash logger = init_logger(__name__) class EventBatch( msgspec.Struct, array_like=True, # type: ignore[call-arg] omit_defaults=True, # type: ignore[call-arg] gc=False, # type: ignore[call-arg] ): ts: float events: list[Any] data_parallel_rank: int | None = None class KVCacheEvent( msgspec.Struct, array_like=True, # type: ignore[call-arg] omit_defaults=True, # type: ignore[call-arg] gc=False, # type: ignore[call-arg] tag=True, ): """Base class for all KV cache-related events""" MEDIUM_GPU = "GPU" class BlockStored(KVCacheEvent): block_hashes: list[ExternalBlockHash] parent_block_hash: ExternalBlockHash | None token_ids: list[int] block_size: int lora_id: int | None """Deprecated: use `lora_name` for KV block key hash. Retained for backward compatibility. """ medium: str | None lora_name: str | None def __hash__(self) -> int: return hash( ( tuple(self.block_hashes), self.parent_block_hash, tuple(self.token_ids), self.block_size, self.lora_id, self.medium, ) ) class BlockRemoved(KVCacheEvent): block_hashes: list[ExternalBlockHash] medium: str | None def __hash__(self) -> int: return hash((tuple(self.block_hashes), self.medium)) class AllBlocksCleared(KVCacheEvent): pass class KVEventBatch(EventBatch): events: list[BlockStored | BlockRemoved | AllBlocksCleared] class KVEventAggregator: """ Aggregates KV events across multiple workers. Tracks how many times each event appears and returns only those that were emitted by all workers. """ __slots__ = ("_event_counter", "_num_workers") def __init__(self, num_workers: int) -> None: if num_workers <= 0: raise ValueError("num_workers must be greater than zero.") self._event_counter: Counter[KVCacheEvent] = Counter() self._num_workers: int = num_workers def add_events(self, events: list[KVCacheEvent]) -> None: """ Add events from a worker batch. :param events: List of KVCacheEvent objects. """ if not isinstance(events, list): raise TypeError("events must be a list of KVCacheEvent.") self._event_counter.update(events) def get_common_events(self) -> list[KVCacheEvent]: """ Return events that appeared in all workers. :return: List of events present in all workers. """ return [ event for event, count in self._event_counter.items() if count == self._num_workers ] def get_all_events(self) -> list[KVCacheEvent]: """ Return all events for all workers. :return: List of events for all workers. """ return list(self._event_counter.elements()) def clear_events(self) -> None: """ Clear all tracked events. """ self._event_counter.clear() def increment_workers(self, count: int = 1) -> None: """ Increment the number of workers contributing events. :param count: Number to increment the workers by. """ if count <= 0: raise ValueError("count must be positive.") self._num_workers += count def reset_workers(self) -> None: """ Reset the number of workers to 1. """ self._num_workers = 1 def get_number_of_workers(self) -> int: """ Return the number of workers. :return: int number of workers. """ return self._num_workers def __repr__(self) -> str: return ( f"<KVEventAggregator workers={self._num_workers}, " f"events={len(self._event_counter)}>" ) class KVConnectorKVEvents(ABC): """ Abstract base class for KV events. Acts as a container for KV events from the connector. """ @abstractmethod def add_events(self, events: list[KVCacheEvent]) -> None: raise NotImplementedError @abstractmethod def aggregate(self) -> "KVConnectorKVEvents": raise NotImplementedError @abstractmethod def increment_workers(self, count: int = 1) -> None: raise NotImplementedError @abstractmethod def get_all_events(self) -> list[KVCacheEvent]: raise NotImplementedError @abstractmethod def get_number_of_workers(self) -> int: raise NotImplementedError @abstractmethod def clear_events(self) -> None: raise NotImplementedError class EventPublisher(ABC): """Lightweight publisher for EventBatch batches with data parallelism support. In data parallel setups, each DP rank runs its own EventPublisher instance to avoid duplicate events and ensure proper event attribution: - Each DP rank creates a separate publisher - Publishers automatically annotate events with their data_parallel_rank - This allows consumers to distinguish events from different DP ranks The publisher is responsible for adding DP metadata since the scheduler operates independently of DP topology and shouldn't need DP awareness. """ def __init__(self, data_parallel_rank: int = 0) -> None: self._data_parallel_rank = data_parallel_rank @abstractmethod def publish(self, events: EventBatch) -> None: """Emit events in order. Implementations should guarantee at-least-once delivery and monotonic ordering (e.g., via sequence numbers). """ @abstractmethod def shutdown(self) -> None: """Shutdown the publisher.""" class NullEventPublisher(EventPublisher): """No-op implementation (default when disabled).""" def publish(self, events) -> None: return def shutdown(self) -> None: return class ZmqEventPublisher(EventPublisher): """Reliable PUB/ROUTER publisher with an in-memory replay buffer. Spawns a separate thread to handle publishing from a queue. Parameters ---------- endpoint: PUB address. Use `tcp://*:5557` to bind or `tcp://host:5557` to connect. replay_endpoint: Optional ROUTER address for replay requests. When given, subscribers can request missed batches by sending the starting sequence number as an 8-byte big-endian integer. buffer_steps: Number of past batches to keep for replay. hwm: ZeroMQ high-water-mark for PUB socket. max_queue_size: Maximum number of events to buffer in memory. topic: Topic to publish events to. """ SHUTDOWN_TIMEOUT: float = 1.0 END_SEQ = (-1).to_bytes(8, "big", signed=True) def __init__( self, data_parallel_rank: int, endpoint: str = "tcp://*:5557", replay_endpoint: str | None = None, buffer_steps: int = 10_000, hwm: int = 100_000, max_queue_size: int = 100_000, topic: str = "", ) -> None: # Storage super().__init__(data_parallel_rank) self._event_queue = Queue[EventBatch | None](maxsize=max_queue_size) self._buffer = deque[tuple[int, bytes]](maxlen=buffer_steps) # ZMQ sockets self._ctx = zmq.Context.instance() self._pub: zmq.Socket | None = None self._replay: zmq.Socket | None = None self._dp_rank = data_parallel_rank self._endpoint = self.offset_endpoint_port(endpoint, self._dp_rank) self._replay_endpoint = self.offset_endpoint_port( replay_endpoint, self._dp_rank ) self._hwm = hwm self._socket_setup() # Payload self._seq_gen = count() self._topic_bytes = topic.encode("utf-8") # Thread self._running = True logger.info("Starting ZMQ publisher thread") self._thread = threading.Thread( target=self._publisher_thread, daemon=True, name="zmq-publisher" ) self._thread.start() def publish(self, events: EventBatch) -> None: if not self._running: raise RuntimeError("Publisher is closed") if events.data_parallel_rank is None: events.data_parallel_rank = self._data_parallel_rank self._event_queue.put(events) def shutdown(self) -> None: """Stop the publisher thread and clean up resources.""" self._running = False self._event_queue.put_nowait(None) start = time.time() pending_items = True while pending_items and (time.time() - start < self.SHUTDOWN_TIMEOUT): pending_items = not self._event_queue.empty() if pending_items: time.sleep(0.1) if pending_items: logger.warning( "Warning: Queue still has %s items after %s seconds timeout", self._event_queue.qsize(), self.SHUTDOWN_TIMEOUT, ) if self._thread.is_alive(): self._thread.join(timeout=self.SHUTDOWN_TIMEOUT) # Clean up ZMQ resources try: if self._pub is not None: self._pub.close(linger=0) if self._replay is not None: self._replay.close(linger=0) finally: pass # Do not terminate context; other sockets may use it def _socket_setup(self) -> None: """Initialize sockets https://pyzmq.readthedocs.io/en/v19.0.0/morethanbindings.html#thread-safety """ if self._pub is None: self._pub = self._ctx.socket(zmq.PUB) self._pub.set_hwm(self._hwm) # Heuristic: bind if wildcard / * present, else connect. # bind stable, connect volatile convention if self._endpoint is not None and ( "*" in self._endpoint or "::" in self._endpoint or self._endpoint.startswith("ipc://") or self._endpoint.startswith("inproc://") ): self._pub.bind(self._endpoint) elif self._endpoint is not None: self._pub.connect(self._endpoint) # Set up replay socket: use ROUTER # 1) handles multiple REQ clients (identities) # 2) lets us send back one request → many replies (streamed events) # 3) works in our non‑blocking poll loop alongside PUB if self._replay_endpoint is not None: self._replay = self._ctx.socket(zmq.ROUTER) self._replay.bind(self._replay_endpoint) def _publisher_thread(self) -> None: """Background thread that processes the event queue.""" self._pack = msgspec.msgpack.Encoder() assert self._pub is not None # narrows type for mypy while self._running or self._event_queue.qsize() > 0: # --- replay (non-critical) --------------------------------- if self._replay is not None and self._replay.poll(0): try: self._service_replay() except Exception as e: logger.exception("Error in replay: %s", e) # --- main queue (critical) --------------------------------- try: event = self._event_queue.get(timeout=0.1) if event is None: break # Sentinel received, exit thread except queue.Empty: continue try: seq = next(self._seq_gen) payload = self._pack.encode(event) seq_bytes = seq.to_bytes(8, "big") self._pub.send_multipart((self._topic_bytes, seq_bytes, payload)) self._buffer.append((seq, payload)) self._event_queue.task_done() except Exception as e: # Publishing failed; back-off a bit to avoid a tight error loop logger.exception("Error in publisher thread: %s", e) time.sleep(0.1) def _service_replay(self) -> None: """If a replay request is waiting, send buffered batches.""" assert self._replay is not None # narrows type for mypy frame = self._replay.recv_multipart() if len(frame) != 3: logger.warning("Invalid replay request: %s", frame) return client_id, _, start_seq_bytes = frame start_seq = int.from_bytes(start_seq_bytes, "big") for seq, buf in self._buffer: if seq >= start_seq: # [identity, empty_delim, seq_bytes, payload] # (identity, empty_delim) are stripped off by the router # receiving payload is (seq_bytes, payload) self._replay.send_multipart( (client_id, b"", seq.to_bytes(8, "big"), buf) ) # Send end of sequence marker # receiving payload is (-1, b""") self._replay.send_multipart((client_id, b"", self.END_SEQ, b"")) @staticmethod def offset_endpoint_port( endpoint: str | None, data_parallel_rank: int ) -> str | None: """Helper function to offset the port in an endpoint by the data parallel rank. Args: endpoint: The endpoint string (e.g., "tcp://*:5557" or "inproc://cache") data_parallel_rank: The data parallel rank to offset by Returns: The endpoint with the port offset by data_parallel_rank or suffix appended """ # Do nothing if input is None or data_parallel_rank is 0 if not endpoint or data_parallel_rank == 0: return endpoint if "inproc" in endpoint: return f"{endpoint}_dp{data_parallel_rank}" if "tcp" in endpoint: if endpoint and ":" in endpoint: # Get everything after the last colon (the port) last_colon_idx = endpoint.rfind(":") base_addr = endpoint[:last_colon_idx] base_port = int(endpoint[last_colon_idx + 1 :]) new_port = base_port + data_parallel_rank return f"{base_addr}:{new_port}" return endpoint raise ValueError("Invalid endpoint: must contain 'inproc' or 'tcp'") class EventPublisherFactory: _registry: dict[str, Callable[..., EventPublisher]] = { "null": NullEventPublisher, "zmq": ZmqEventPublisher, } @classmethod def register_publisher(cls, name: str, ctor: Callable[..., EventPublisher]) -> None: if name in cls._registry: raise KeyError(f"publisher '{name}' already registered") cls._registry[name] = ctor @classmethod def create( cls, config: KVEventsConfig | None, data_parallel_rank: int = 0 ) -> EventPublisher: """Create publisher from a config mapping.""" if ( config is None or not config.enable_kv_cache_events or config.publisher == "null" ): return NullEventPublisher() config_dict = asdict(config) kind = config_dict.pop("publisher") config_dict.pop("enable_kv_cache_events") try: constructor = cls._registry[kind] except KeyError as exc: raise ValueError(f"Unknown event publisher '{kind}'") from exc return constructor(data_parallel_rank=data_parallel_rank, **config_dict)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/distributed/utils.py
vllm/distributed/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Copyright 2023 The vLLM team. # Adapted from # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/utils.py # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import dataclasses import os import pickle import socket import sys import time import uuid from collections import deque from collections.abc import Sequence from datetime import timedelta from typing import Any import torch from torch.distributed import ProcessGroup, TCPStore from torch.distributed.distributed_c10d import ( Backend, PrefixStore, _get_default_timeout, _unregister_process_group, ) from torch.distributed.rendezvous import rendezvous import vllm.envs as envs from vllm.logger import init_logger from vllm.utils.network_utils import get_tcp_uri from vllm.utils.system_utils import suppress_stdout from vllm.utils.torch_utils import is_torch_equal_or_newer logger = init_logger(__name__) # We prefer to use os.sched_yield as it results in tighter polling loops, # measured to be around 3e-7 seconds. However on earlier versions of Python # os.sched_yield() does not release the GIL, so we fall back to time.sleep(0) USE_SCHED_YIELD = (sys.version_info[:3] >= (3, 11, 1)) or ( sys.version_info[:2] == (3, 10) and sys.version_info[2] >= 8 ) def sched_yield(): if USE_SCHED_YIELD: os.sched_yield() else: time.sleep(0) def ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, "{} is not divisible by {}".format( numerator, denominator ) def divide(numerator, denominator): """Ensure that numerator is divisible by the denominator and return the division value.""" ensure_divisibility(numerator, denominator) return numerator // denominator def split_tensor_along_last_dim( tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False, ) -> Sequence[torch.Tensor]: """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. Returns: A list of Tensors """ # Get the size and dimension. last_dim = tensor.dim() - 1 last_dim_size = divide(tensor.size()[last_dim], num_partitions) # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # NOTE: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list def get_pp_indices( num_hidden_layers: int, pp_rank: int, pp_size: int ) -> tuple[int, int]: """Try to evenly distribute layers across partitions. If the number of layers is not divisible by the number of partitions, the remaining layers are evenly distributed across all but the last partition. The last partition is excluded because it often contains an additional norm layer and we are attempting to balance compute. If `pp_size > 2` and the number of remaining layers is `0 < x <= pp_size - 2` then the remaining layers are evenly distributed across the middle partitions. The first and last partitions are excluded because they contain the input and output embeddings respectively and we are attempting to reduce maximum memory consumption across partitions. """ partition_list_str = envs.VLLM_PP_LAYER_PARTITION if partition_list_str is not None: try: partitions = [int(layer) for layer in partition_list_str.split(",")] except ValueError as err: raise ValueError( "Invalid partition string: {}".format(partition_list_str) ) from err if len(partitions) != pp_size: raise ValueError(f"{len(partitions)=} does not match {pp_size=}.") if sum(partitions) != num_hidden_layers: raise ValueError(f"{sum(partitions)=} does not match {num_hidden_layers=}.") else: layers_per_partition = num_hidden_layers // pp_size partitions = [layers_per_partition for _ in range(pp_size)] if remaining_layers := num_hidden_layers % pp_size: for i in range(2, remaining_layers + 2): partitions[-i] += 1 logger.info( "Hidden layers were unevenly partitioned: [%s]. " "This can be manually overridden using the " "VLLM_PP_LAYER_PARTITION environment variable", ",".join(str(p) for p in partitions), ) start_layer = sum(partitions[:pp_rank]) end_layer = start_layer + partitions[pp_rank] return (start_layer, end_layer) @dataclasses.dataclass class StatelessProcessGroup: """A dataclass to hold a metadata store, and the rank, world_size of the group. Only use it to communicate metadata between processes. For data-plane communication, create NCCL-related objects. """ rank: int world_size: int store: torch._C._distributed_c10d.Store # stores a reference to the socket so that the file descriptor stays alive socket: socket.socket | None data_expiration_seconds: int = 3600 # 1 hour # dst rank -> counter send_dst_counter: dict[int, int] = dataclasses.field(default_factory=dict) # src rank -> counter recv_src_counter: dict[int, int] = dataclasses.field(default_factory=dict) broadcast_send_counter: int = 0 broadcast_recv_src_counter: dict[int, int] = dataclasses.field(default_factory=dict) # A deque to store the data entries, with key and timestamp. entries: deque[tuple[str, float]] = dataclasses.field(default_factory=deque) def __post_init__(self): assert self.rank < self.world_size self.send_dst_counter = {i: 0 for i in range(self.world_size)} self.recv_src_counter = {i: 0 for i in range(self.world_size)} self.broadcast_recv_src_counter = {i: 0 for i in range(self.world_size)} def send_obj(self, obj: Any, dst: int): """Send an object to a destination rank.""" self.expire_data() key = f"send_to/{dst}/{self.send_dst_counter[dst]}" self.store.set(key, pickle.dumps(obj)) self.send_dst_counter[dst] += 1 self.entries.append((key, time.time())) def expire_data(self): """Expire data that is older than `data_expiration_seconds` seconds.""" while self.entries: # check the oldest entry key, timestamp = self.entries[0] if time.time() - timestamp > self.data_expiration_seconds: self.store.delete_key(key) self.entries.popleft() else: break def recv_obj(self, src: int) -> Any: """Receive an object from a source rank.""" obj = pickle.loads( self.store.get(f"send_to/{self.rank}/{self.recv_src_counter[src]}") ) self.recv_src_counter[src] += 1 return obj def broadcast_obj(self, obj: Any | None, src: int) -> Any: """Broadcast an object from a source rank to all other ranks. It does not clean up after all ranks have received the object. Use it for limited times, e.g., for initialization. """ if self.rank == src: self.expire_data() key = f"broadcast_from/{src}/{self.broadcast_send_counter}" self.store.set(key, pickle.dumps(obj)) self.broadcast_send_counter += 1 self.entries.append((key, time.time())) return obj else: key = f"broadcast_from/{src}/{self.broadcast_recv_src_counter[src]}" recv_obj = pickle.loads(self.store.get(key)) self.broadcast_recv_src_counter[src] += 1 return recv_obj def all_gather_obj(self, obj: Any) -> list[Any]: """All gather an object from all ranks.""" gathered_objs = [] for i in range(self.world_size): if i == self.rank: gathered_objs.append(obj) self.broadcast_obj(obj, src=self.rank) else: recv_obj = self.broadcast_obj(None, src=i) gathered_objs.append(recv_obj) return gathered_objs def barrier(self, timeout: float = 30.0): """A robust barrier to synchronize all ranks. Uses a multi-phase approach to ensure all processes reach the barrier before proceeding: 1. Each process signals it has reached the barrier 2. Each process signals that it has confirmed the arrival of all other ranks. 3. Rank 0 waits for all other ranks to signal their departure to ensure that all ranks have departed the barrier first. Args: timeout: Maximum time in seconds to wait for each phase (in seconds) Raises: RuntimeError: If coordination fails or times out """ # Generate a barrier ID that is globally unique try: if self.rank == 0: barrier_id = f"barrier_{uuid.uuid4()}" self.broadcast_obj(barrier_id, src=0) else: barrier_id = self.broadcast_obj(None, src=0) except Exception as e: raise RuntimeError("Failed to broadcast barrier_id") from e # Phase 1: Signal arrival at barrier # Wait for all processes to arrive # We need all ranks to confirm the arrival of all other ranks. # This is the key synchronization point. arrival_key = f"arrival_{barrier_id}_{self.rank}" try: self.store.set(arrival_key, b"1") except Exception as e: raise RuntimeError("Failed to signal barrier arrival") from e start_time = time.time() processes_arrived: set[int] = set() while len(processes_arrived) < self.world_size: # Check for timeout cur_time = time.time() if cur_time - start_time > timeout: raise RuntimeError(f"Barrier timed out after {timeout:.2f} seconds") # Check for each process for i in range(self.world_size): if i in processes_arrived: continue key = f"arrival_{barrier_id}_{i}" try: # Try to get the key - if it exists, we'll get a value # If it doesn't exist, it will throw an exception self.store.get(key) processes_arrived.add(i) except KeyError: # Key doesn't exist yet pass except Exception as check_e: logger.debug("Error checking key existence: %s", check_e) sched_yield() # Short sleep to avoid tight polling if len(processes_arrived) < self.world_size: sched_yield() # Phase 2: Signal departure from barrier # We only care to block at this stage in rank 0, which runs the # server side of the TCPStore. We want to make sure that all # clients have departed the barrier before rank 0 in case the # next thing after the barrier is a shutdown, including tearing # down the TCPStore. Other ranks can exit the barrier immediately # after signaling their departure. departure_key = f"departure_{barrier_id}_{self.rank}" try: self.store.set(departure_key, b"1") except Exception as e: raise RuntimeError("Failed to signal barrier departure") from e if self.rank != 0: return # Make rank 0 wait for all processes to signal departure start_time = time.time() processes_departed: set[int] = set() while len(processes_departed) < self.world_size: # Check for timeout if time.time() - start_time > timeout: raise RuntimeError( f"Barrier departure timed out after {timeout:.2f} seconds" ) # Check for each process for i in range(self.world_size): if i in processes_departed: continue key = f"departure_{barrier_id}_{i}" try: # Try to get the key - if it exists, we'll get a value # If it doesn't exist, it will throw an exception self.store.get(key) processes_departed.add(i) except KeyError: # Key doesn't exist yet pass except Exception as check_e: logger.debug("Error checking key existence: %s", check_e) sched_yield() # Short sleep to avoid tight polling if len(processes_departed) < self.world_size: sched_yield() # Clean up keys to avoid leaking memory in the store for i in range(self.world_size): try: self.store.delete_key(f"arrival_{barrier_id}_{i}") except Exception: logger.debug("Error deleting key: %s", f"arrival_{barrier_id}_{i}") try: self.store.delete_key(f"departure_{barrier_id}_{i}") except Exception: logger.debug("Error deleting key: %s", f"departure_{barrier_id}_{i}") @staticmethod def create( host: str, port: int, rank: int, world_size: int, data_expiration_seconds: int = 3600, store_timeout: int = 300, ) -> "StatelessProcessGroup": """A replacement for `torch.distributed.init_process_group` that does not pollute the global state. If we have process A and process B called `torch.distributed.init_process_group` to form a group, and then we want to form another group with process A, B, C, D, it is not possible in PyTorch, because process A and process B have already formed a group, and process C and process D cannot join that group. This function is a workaround for this issue. `torch.distributed.init_process_group` is a global call, while this function is a stateless call. It will return a `StatelessProcessGroup` object that can be used for exchanging metadata. With this function, process A and process B can call `StatelessProcessGroup.create` to form a group, and then process A, B, C, and D can call `StatelessProcessGroup.create` to form another group. """ # noqa launch_server = rank == 0 if launch_server: # listen on the specified interface (instead of 0.0.0.0) listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listen_socket.bind((host, port)) listen_socket.listen() listen_fd = listen_socket.fileno() else: listen_socket = None listen_fd = None store = TCPStore( host_name=host, port=port, world_size=world_size, is_master=launch_server, timeout=timedelta(seconds=store_timeout), use_libuv=False, # for now: github.com/pytorch/pytorch/pull/150215 master_listen_fd=listen_fd, ) return StatelessProcessGroup( rank=rank, world_size=world_size, store=store, socket=listen_socket, data_expiration_seconds=data_expiration_seconds, ) def init_gloo_process_group( prefix_store: PrefixStore, group_rank: int, group_size: int, timeout: timedelta, ) -> ProcessGroup: """ Stateless init ProcessGroup with gloo backend compatible with different torch versions. """ with suppress_stdout(): if is_torch_equal_or_newer("2.6"): pg = ProcessGroup( prefix_store, group_rank, group_size, ) else: options = ProcessGroup.Options(backend="gloo") pg = ProcessGroup( prefix_store, group_rank, group_size, options, ) from torch.distributed.distributed_c10d import ProcessGroupGloo backend_class = ProcessGroupGloo( prefix_store, group_rank, group_size, timeout=timeout ) backend_type = ProcessGroup.BackendType.GLOO device = torch.device("cpu") if is_torch_equal_or_newer("2.6"): # _set_default_backend is supported in torch >= 2.6 pg._set_default_backend(backend_type) backend_class._set_sequence_number_for_group() pg._register_backend(device, backend_type, backend_class) return pg def stateless_init_torch_distributed_process_group( host: str, port: int, rank: int, world_size: int, backend: str ) -> ProcessGroup: """ A replacement for `torch.distributed.init_process_group` that does not pollute the global state. The created ProcessGroup object can be used for some operations such as `allreduce`, because it does not depend on the global rank. However, some operations such as `broadcast` cannot be used because it depends on the global rank. # TODO: ask for help from PyTorch team if we need the `broadcast` operation. This function is useful when we are not sure about the total number of processes in the process group. For example, we may have process 1, 2, ..., 8 who want to communicate, and process 9 might be the same process as process 1, or it might be a different process; process 10 might be the same process as process 5, or it might be a different process. In this case, how can we reliably form a communication channel within process 9 and 10, without affecting the communication channel within process 1, 2, ..., 8? One possible solution is to figure out if process 9 and 10 are the same as process 1 and 5 beforehand, and then form a communication channel based on the information, adjusting the ranks and world_size etc. However, figuring out the information is not always easy, and it will interfere with the main communication channel. Our solution is to always form a communication channel with process 1, 2, ..., 8, and then use this function to form another communication channel with process 9 and 10. This way, regardless of whether process 9 and 10 are the same as process 1 and 5, the main communication channel is always formed with process 1, 2, ..., 8, and the additional communication channel is formed with process 9 and 10. """ init_method = get_tcp_uri(host, port) backend = Backend(backend) # it is basically string timeout = _get_default_timeout(backend) store, rank, world_size = next( rendezvous(init_method, rank, world_size, timeout=timeout) ) store.set_timeout(timeout) group_rank = rank group_size = world_size # Use a PrefixStore to avoid accidental overrides of keys used by # different systems (e.g. RPC) in case the store is multi-tenant. prefix_store = PrefixStore(init_method, store) try: from vllm.platforms import current_platform return current_platform.stateless_init_device_torch_dist_pg( backend=backend, prefix_store=prefix_store, group_rank=group_rank, group_size=group_size, timeout=timeout, ) except NotImplementedError: # If platform doesn't implement stateless_init_device_torch_dist_pg, it # will raise a NotImplementedError. In this case, we fall back to gloo. return init_gloo_process_group( prefix_store=prefix_store, group_rank=group_rank, group_size=group_size, timeout=timeout, ) def stateless_destroy_torch_distributed_process_group(pg: ProcessGroup) -> None: """ Destroy ProcessGroup returned by stateless_init_torch_distributed_process_group(). """ if is_torch_equal_or_newer("2.7"): pg.shutdown() else: # Lazy import for non-CUDA backends. from torch.distributed.distributed_c10d import _shutdown_backend _shutdown_backend(pg) _unregister_process_group(pg.group_name)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false