Add files using upload-large-folder tool
Browse files- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/llm_engine.py +326 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/logprobs.py +201 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/mm_input_cache.py +121 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/output_processor.py +486 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/parallel_sampling.py +133 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/processor.py +420 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/utils.py +832 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/abstract.py +113 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/multiproc_executor.py +606 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/ray_distributed_executor.py +108 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/loggers.py +695 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/prometheus.py +82 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/ray_wrappers.py +133 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/reader.py +246 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/stats.py +244 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/metadata.py +25 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__init__.py +0 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/llm_engine.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from collections.abc import Mapping
|
| 5 |
+
from copy import copy
|
| 6 |
+
from typing import Any, Callable, Optional, Union
|
| 7 |
+
|
| 8 |
+
from typing_extensions import TypeVar
|
| 9 |
+
|
| 10 |
+
import vllm.envs as envs
|
| 11 |
+
from vllm.config import ParallelConfig, VllmConfig
|
| 12 |
+
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
| 13 |
+
from vllm.engine.arg_utils import EngineArgs
|
| 14 |
+
from vllm.inputs import PromptType
|
| 15 |
+
from vllm.logger import init_logger
|
| 16 |
+
from vllm.lora.request import LoRARequest
|
| 17 |
+
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
| 18 |
+
from vllm.outputs import PoolingRequestOutput, RequestOutput
|
| 19 |
+
from vllm.pooling_params import PoolingParams
|
| 20 |
+
from vllm.sampling_params import SamplingParams
|
| 21 |
+
from vllm.tasks import SupportedTask
|
| 22 |
+
from vllm.transformers_utils.tokenizer_group import (
|
| 23 |
+
TokenizerGroup, init_tokenizer_from_configs)
|
| 24 |
+
from vllm.usage.usage_lib import UsageContext
|
| 25 |
+
from vllm.utils import Device
|
| 26 |
+
from vllm.v1.engine.core_client import EngineCoreClient
|
| 27 |
+
from vllm.v1.engine.output_processor import OutputProcessor
|
| 28 |
+
from vllm.v1.engine.parallel_sampling import ParentRequest
|
| 29 |
+
from vllm.v1.engine.processor import Processor
|
| 30 |
+
from vllm.v1.executor.abstract import Executor
|
| 31 |
+
from vllm.v1.metrics.loggers import (PrometheusStatLogger, StatLoggerBase,
|
| 32 |
+
StatLoggerFactory)
|
| 33 |
+
from vllm.v1.metrics.reader import Metric, get_metrics_snapshot
|
| 34 |
+
from vllm.v1.metrics.stats import IterationStats
|
| 35 |
+
|
| 36 |
+
logger = init_logger(__name__)
|
| 37 |
+
|
| 38 |
+
_R = TypeVar("_R", default=Any)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class LLMEngine:
|
| 42 |
+
"""Legacy LLMEngine for backwards compatibility."""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
vllm_config: VllmConfig,
|
| 47 |
+
executor_class: type[Executor],
|
| 48 |
+
log_stats: bool,
|
| 49 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 50 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 51 |
+
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
|
| 52 |
+
use_cached_outputs: bool = False,
|
| 53 |
+
multiprocess_mode: bool = False,
|
| 54 |
+
) -> None:
|
| 55 |
+
if not envs.VLLM_USE_V1:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
"Using V1 LLMEngine, but envs.VLLM_USE_V1=False. "
|
| 58 |
+
"This should not happen. As a workaround, try using "
|
| 59 |
+
"LLMEngine.from_vllm_config(...) or explicitly set "
|
| 60 |
+
"VLLM_USE_V1=0 or 1 and report this issue on Github.")
|
| 61 |
+
|
| 62 |
+
if stat_loggers is not None:
|
| 63 |
+
raise NotImplementedError(
|
| 64 |
+
"Passing StatLoggers to LLMEngine in V1 is not yet supported. "
|
| 65 |
+
"Set VLLM_USE_V1=0 and file and issue on Github.")
|
| 66 |
+
|
| 67 |
+
self.vllm_config = vllm_config
|
| 68 |
+
self.model_config = vllm_config.model_config
|
| 69 |
+
self.cache_config = vllm_config.cache_config
|
| 70 |
+
|
| 71 |
+
self.log_stats = log_stats
|
| 72 |
+
self.stat_logger: Optional[StatLoggerBase] = None
|
| 73 |
+
if self.log_stats:
|
| 74 |
+
self.stat_logger = PrometheusStatLogger(vllm_config)
|
| 75 |
+
|
| 76 |
+
# important: init dp group before init the engine_core
|
| 77 |
+
# In the decoupled engine case this is handled in EngineCoreProc.
|
| 78 |
+
parallel_config = vllm_config.parallel_config
|
| 79 |
+
if not multiprocess_mode and parallel_config.data_parallel_size > 1:
|
| 80 |
+
self.dp_group = parallel_config.stateless_init_dp_group()
|
| 81 |
+
else:
|
| 82 |
+
self.dp_group = None
|
| 83 |
+
self.should_execute_dummy_batch = False
|
| 84 |
+
|
| 85 |
+
if self.model_config.skip_tokenizer_init:
|
| 86 |
+
self.tokenizer = None
|
| 87 |
+
else:
|
| 88 |
+
# Tokenizer (+ ensure liveness if running in another process).
|
| 89 |
+
self.tokenizer = init_tokenizer_from_configs(
|
| 90 |
+
model_config=vllm_config.model_config,
|
| 91 |
+
scheduler_config=vllm_config.scheduler_config,
|
| 92 |
+
lora_config=vllm_config.lora_config)
|
| 93 |
+
|
| 94 |
+
# Processor (convert Inputs --> EngineCoreRequests)
|
| 95 |
+
self.processor = Processor(vllm_config=vllm_config,
|
| 96 |
+
tokenizer=self.tokenizer,
|
| 97 |
+
mm_registry=mm_registry)
|
| 98 |
+
|
| 99 |
+
# OutputProcessor (convert EngineCoreOutputs --> RequestOutput).
|
| 100 |
+
self.output_processor = OutputProcessor(self.tokenizer,
|
| 101 |
+
log_stats=self.log_stats)
|
| 102 |
+
|
| 103 |
+
# EngineCore (gets EngineCoreRequests and gives EngineCoreOutputs)
|
| 104 |
+
self.engine_core = EngineCoreClient.make_client(
|
| 105 |
+
multiprocess_mode=multiprocess_mode,
|
| 106 |
+
asyncio_mode=False,
|
| 107 |
+
vllm_config=vllm_config,
|
| 108 |
+
executor_class=executor_class,
|
| 109 |
+
log_stats=self.log_stats,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
if not multiprocess_mode:
|
| 113 |
+
# for v0 compatibility
|
| 114 |
+
self.model_executor = self.engine_core.engine_core.model_executor # type: ignore
|
| 115 |
+
|
| 116 |
+
# Don't keep the dummy data in memory
|
| 117 |
+
self.reset_mm_cache()
|
| 118 |
+
|
| 119 |
+
@classmethod
|
| 120 |
+
def from_vllm_config(
|
| 121 |
+
cls,
|
| 122 |
+
vllm_config: VllmConfig,
|
| 123 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 124 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 125 |
+
disable_log_stats: bool = False,
|
| 126 |
+
) -> "LLMEngine":
|
| 127 |
+
return cls(vllm_config=vllm_config,
|
| 128 |
+
executor_class=Executor.get_class(vllm_config),
|
| 129 |
+
log_stats=(not disable_log_stats),
|
| 130 |
+
usage_context=usage_context,
|
| 131 |
+
stat_loggers=stat_loggers,
|
| 132 |
+
multiprocess_mode=envs.VLLM_ENABLE_V1_MULTIPROCESSING)
|
| 133 |
+
|
| 134 |
+
@classmethod
|
| 135 |
+
def from_engine_args(
|
| 136 |
+
cls,
|
| 137 |
+
engine_args: EngineArgs,
|
| 138 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 139 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 140 |
+
enable_multiprocessing: bool = False,
|
| 141 |
+
) -> "LLMEngine":
|
| 142 |
+
"""Creates an LLM engine from the engine arguments."""
|
| 143 |
+
|
| 144 |
+
# Create the engine configs.
|
| 145 |
+
vllm_config = engine_args.create_engine_config(usage_context)
|
| 146 |
+
executor_class = Executor.get_class(vllm_config)
|
| 147 |
+
|
| 148 |
+
if envs.VLLM_ENABLE_V1_MULTIPROCESSING:
|
| 149 |
+
logger.debug("Enabling multiprocessing for LLMEngine.")
|
| 150 |
+
enable_multiprocessing = True
|
| 151 |
+
|
| 152 |
+
# Create the LLMEngine.
|
| 153 |
+
return cls(vllm_config=vllm_config,
|
| 154 |
+
executor_class=executor_class,
|
| 155 |
+
log_stats=not engine_args.disable_log_stats,
|
| 156 |
+
usage_context=usage_context,
|
| 157 |
+
stat_loggers=stat_loggers,
|
| 158 |
+
multiprocess_mode=enable_multiprocessing)
|
| 159 |
+
|
| 160 |
+
def get_num_unfinished_requests(self) -> int:
|
| 161 |
+
return self.output_processor.get_num_unfinished_requests()
|
| 162 |
+
|
| 163 |
+
def has_unfinished_requests(self) -> bool:
|
| 164 |
+
has_unfinished = self.output_processor.has_unfinished_requests()
|
| 165 |
+
if self.dp_group is None:
|
| 166 |
+
return has_unfinished or self.engine_core.dp_engines_running()
|
| 167 |
+
return self.has_unfinished_requests_dp(has_unfinished)
|
| 168 |
+
|
| 169 |
+
def has_unfinished_requests_dp(self, has_unfinished: bool) -> bool:
|
| 170 |
+
aggregated_has_unfinished = ParallelConfig.has_unfinished_dp(
|
| 171 |
+
self.dp_group, has_unfinished)
|
| 172 |
+
if not has_unfinished and aggregated_has_unfinished:
|
| 173 |
+
self.should_execute_dummy_batch = True
|
| 174 |
+
return aggregated_has_unfinished
|
| 175 |
+
|
| 176 |
+
@classmethod
|
| 177 |
+
def validate_outputs(cls, outputs, output_type):
|
| 178 |
+
return outputs
|
| 179 |
+
|
| 180 |
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 181 |
+
return self.engine_core.get_supported_tasks()
|
| 182 |
+
|
| 183 |
+
def abort_request(self, request_ids: list[str]) -> None:
|
| 184 |
+
"""Remove request_ids from EngineCore and Detokenizer."""
|
| 185 |
+
|
| 186 |
+
request_ids = self.output_processor.abort_requests(request_ids)
|
| 187 |
+
self.engine_core.abort_requests(request_ids)
|
| 188 |
+
|
| 189 |
+
def add_request(
|
| 190 |
+
self,
|
| 191 |
+
request_id: str,
|
| 192 |
+
prompt: PromptType,
|
| 193 |
+
params: Union[SamplingParams, PoolingParams],
|
| 194 |
+
arrival_time: Optional[float] = None,
|
| 195 |
+
lora_request: Optional[LoRARequest] = None,
|
| 196 |
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
| 197 |
+
trace_headers: Optional[Mapping[str, str]] = None,
|
| 198 |
+
priority: int = 0,
|
| 199 |
+
) -> None:
|
| 200 |
+
# Validate the request_id type.
|
| 201 |
+
if not isinstance(request_id, str):
|
| 202 |
+
raise TypeError(
|
| 203 |
+
f"request_id must be a string, got {type(request_id)}")
|
| 204 |
+
|
| 205 |
+
# Process raw inputs into the request.
|
| 206 |
+
prompt_str, request = self.processor.process_inputs(
|
| 207 |
+
request_id, prompt, params, arrival_time, lora_request,
|
| 208 |
+
tokenization_kwargs, trace_headers, priority)
|
| 209 |
+
|
| 210 |
+
n = params.n if isinstance(params, SamplingParams) else 1
|
| 211 |
+
|
| 212 |
+
if n == 1:
|
| 213 |
+
# Make a new RequestState and queue.
|
| 214 |
+
self.output_processor.add_request(request, prompt_str, None, 0)
|
| 215 |
+
# Add the request to EngineCore.
|
| 216 |
+
self.engine_core.add_request(request)
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
# Fan out child requests (for n>1).
|
| 220 |
+
parent_req = ParentRequest(request_id, params)
|
| 221 |
+
for idx in range(n):
|
| 222 |
+
request_id, params = parent_req.get_child_info(idx)
|
| 223 |
+
child_request = request if idx == n - 1 else copy(request)
|
| 224 |
+
child_request.request_id = request_id
|
| 225 |
+
child_request.sampling_params = params
|
| 226 |
+
|
| 227 |
+
# Make a new RequestState and queue.
|
| 228 |
+
self.output_processor.add_request(child_request, prompt_str,
|
| 229 |
+
parent_req, idx)
|
| 230 |
+
# Add the request to EngineCore.
|
| 231 |
+
self.engine_core.add_request(child_request)
|
| 232 |
+
|
| 233 |
+
def step(self) -> Union[list[RequestOutput], list[PoolingRequestOutput]]:
|
| 234 |
+
|
| 235 |
+
if self.should_execute_dummy_batch:
|
| 236 |
+
self.should_execute_dummy_batch = False
|
| 237 |
+
self.engine_core.execute_dummy_batch()
|
| 238 |
+
return []
|
| 239 |
+
|
| 240 |
+
# 1) Get EngineCoreOutput from the EngineCore.
|
| 241 |
+
outputs = self.engine_core.get_output()
|
| 242 |
+
|
| 243 |
+
# 2) Process EngineCoreOutputs.
|
| 244 |
+
iteration_stats = IterationStats() if self.log_stats else None
|
| 245 |
+
processed_outputs = self.output_processor.process_outputs(
|
| 246 |
+
outputs.outputs,
|
| 247 |
+
engine_core_timestamp=outputs.timestamp,
|
| 248 |
+
iteration_stats=iteration_stats)
|
| 249 |
+
|
| 250 |
+
# 3) Abort any reqs that finished due to stop strings.
|
| 251 |
+
self.engine_core.abort_requests(processed_outputs.reqs_to_abort)
|
| 252 |
+
|
| 253 |
+
# 4) Record stats
|
| 254 |
+
if self.stat_logger is not None:
|
| 255 |
+
assert outputs.scheduler_stats is not None
|
| 256 |
+
self.stat_logger.record(scheduler_stats=outputs.scheduler_stats,
|
| 257 |
+
iteration_stats=iteration_stats)
|
| 258 |
+
|
| 259 |
+
return processed_outputs.request_outputs
|
| 260 |
+
|
| 261 |
+
def get_vllm_config(self):
|
| 262 |
+
return self.vllm_config
|
| 263 |
+
|
| 264 |
+
def get_model_config(self):
|
| 265 |
+
return self.model_config
|
| 266 |
+
|
| 267 |
+
def start_profile(self):
|
| 268 |
+
self.engine_core.profile(True)
|
| 269 |
+
|
| 270 |
+
def stop_profile(self):
|
| 271 |
+
self.engine_core.profile(False)
|
| 272 |
+
|
| 273 |
+
def reset_mm_cache(self):
|
| 274 |
+
self.processor.mm_registry.reset_processor_cache(self.model_config)
|
| 275 |
+
self.processor.mm_input_cache_client.reset()
|
| 276 |
+
self.engine_core.reset_mm_cache()
|
| 277 |
+
|
| 278 |
+
def reset_prefix_cache(self, device: Optional[Device] = None):
|
| 279 |
+
self.engine_core.reset_prefix_cache()
|
| 280 |
+
|
| 281 |
+
def sleep(self, level: int = 1):
|
| 282 |
+
self.engine_core.sleep(level)
|
| 283 |
+
|
| 284 |
+
def wake_up(self, tags: Optional[list[str]] = None):
|
| 285 |
+
self.engine_core.wake_up(tags)
|
| 286 |
+
|
| 287 |
+
def is_sleeping(self) -> bool:
|
| 288 |
+
return self.engine_core.is_sleeping()
|
| 289 |
+
|
| 290 |
+
def get_metrics(self) -> list[Metric]:
|
| 291 |
+
assert self.log_stats, "Stat logging disabled"
|
| 292 |
+
return get_metrics_snapshot()
|
| 293 |
+
|
| 294 |
+
def get_tokenizer_group(self) -> TokenizerGroup:
|
| 295 |
+
if self.tokenizer is None:
|
| 296 |
+
raise ValueError("Unable to get tokenizer because "
|
| 297 |
+
"skip_tokenizer_init is True")
|
| 298 |
+
|
| 299 |
+
return self.tokenizer
|
| 300 |
+
|
| 301 |
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 302 |
+
"""Load a new LoRA adapter into the engine for future requests."""
|
| 303 |
+
return self.engine_core.add_lora(lora_request)
|
| 304 |
+
|
| 305 |
+
def remove_lora(self, lora_id: int) -> bool:
|
| 306 |
+
"""Remove an already loaded LoRA adapter."""
|
| 307 |
+
return self.engine_core.remove_lora(lora_id)
|
| 308 |
+
|
| 309 |
+
def list_loras(self) -> set[int]:
|
| 310 |
+
"""List all registered adapters."""
|
| 311 |
+
return self.engine_core.list_loras()
|
| 312 |
+
|
| 313 |
+
def pin_lora(self, lora_id: int) -> bool:
|
| 314 |
+
"""Prevent an adapter from being evicted."""
|
| 315 |
+
return self.engine_core.pin_lora(lora_id)
|
| 316 |
+
|
| 317 |
+
def collective_rpc(self,
|
| 318 |
+
method: Union[str, Callable[..., _R]],
|
| 319 |
+
timeout: Optional[float] = None,
|
| 320 |
+
args: tuple = (),
|
| 321 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 322 |
+
return self.engine_core.collective_rpc(method, timeout, args, kwargs)
|
| 323 |
+
|
| 324 |
+
def __del__(self):
|
| 325 |
+
if dp_group := getattr(self, "dp_group", None):
|
| 326 |
+
stateless_destroy_torch_distributed_process_group(dp_group)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/logprobs.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import itertools
|
| 5 |
+
from collections.abc import Iterable
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from vllm.logger import init_logger
|
| 10 |
+
from vllm.sequence import Logprob, PromptLogprobs, SampleLogprobs
|
| 11 |
+
from vllm.transformers_utils.detokenizer_utils import (
|
| 12 |
+
AnyTokenizer, convert_ids_list_to_tokens)
|
| 13 |
+
from vllm.v1.engine import EngineCoreOutput, EngineCoreRequest
|
| 14 |
+
from vllm.v1.outputs import LogprobsLists, LogprobsTensors
|
| 15 |
+
|
| 16 |
+
logger = init_logger(__name__)
|
| 17 |
+
|
| 18 |
+
NONES = itertools.repeat(None)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclass
|
| 22 |
+
class LogprobsProcessor:
|
| 23 |
+
|
| 24 |
+
# Tokenizer for this request,
|
| 25 |
+
# None if detokenization is disabled.
|
| 26 |
+
tokenizer: Optional[AnyTokenizer]
|
| 27 |
+
|
| 28 |
+
# Logprobs for this request
|
| 29 |
+
logprobs: Optional[SampleLogprobs]
|
| 30 |
+
prompt_logprobs: Optional[PromptLogprobs]
|
| 31 |
+
cumulative_logprob: Optional[float]
|
| 32 |
+
num_logprobs: Optional[int]
|
| 33 |
+
num_prompt_logprobs: Optional[int]
|
| 34 |
+
|
| 35 |
+
@classmethod
|
| 36 |
+
def from_new_request(
|
| 37 |
+
cls,
|
| 38 |
+
tokenizer: Optional[AnyTokenizer],
|
| 39 |
+
request: EngineCoreRequest,
|
| 40 |
+
) -> "LogprobsProcessor":
|
| 41 |
+
assert request.sampling_params is not None
|
| 42 |
+
num_logprobs = request.sampling_params.logprobs
|
| 43 |
+
num_prompt_logprobs = request.sampling_params.prompt_logprobs
|
| 44 |
+
return cls(
|
| 45 |
+
tokenizer=tokenizer,
|
| 46 |
+
cumulative_logprob=(None if num_logprobs is None else 0.),
|
| 47 |
+
logprobs=(None if num_logprobs is None else []),
|
| 48 |
+
# NOTE: logprob of first prompt token is None.
|
| 49 |
+
prompt_logprobs=(None if num_prompt_logprobs is None else [None]),
|
| 50 |
+
num_prompt_logprobs=num_prompt_logprobs,
|
| 51 |
+
num_logprobs=num_logprobs,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def _update_sample_logprobs(self, logprobs_lists: LogprobsLists) -> None:
|
| 55 |
+
"""Update with sample logprobs from EngineCore.
|
| 56 |
+
|
| 57 |
+
Outer lists are only of len > 1 if EngineCore made
|
| 58 |
+
>1 tokens in prior step (e.g. in spec decoding).
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
logprobs_lists: the lists of logprob tokens, logprobs, and ranks.
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
assert self.num_logprobs is not None
|
| 66 |
+
assert self.logprobs is not None
|
| 67 |
+
assert self.cumulative_logprob is not None
|
| 68 |
+
|
| 69 |
+
token_ids_lst, logprobs_lst, ranks_lst = logprobs_lists
|
| 70 |
+
|
| 71 |
+
for rank, logprobs, token_ids in zip(ranks_lst, logprobs_lst,
|
| 72 |
+
token_ids_lst):
|
| 73 |
+
|
| 74 |
+
# Detokenize (non-incrementally).
|
| 75 |
+
decoded_tokens = NONES if self.tokenizer is None else (
|
| 76 |
+
convert_ids_list_to_tokens(self.tokenizer, token_ids))
|
| 77 |
+
|
| 78 |
+
# Sampler puts the sampled logprob in first.
|
| 79 |
+
sampled_token_logprob = logprobs[0]
|
| 80 |
+
self.cumulative_logprob += sampled_token_logprob
|
| 81 |
+
|
| 82 |
+
# Update with the Logprob dictionary for this pos.
|
| 83 |
+
self.logprobs.append(
|
| 84 |
+
self._make_logprob_dict(
|
| 85 |
+
logprobs,
|
| 86 |
+
token_ids,
|
| 87 |
+
decoded_tokens,
|
| 88 |
+
rank,
|
| 89 |
+
self.num_logprobs,
|
| 90 |
+
))
|
| 91 |
+
|
| 92 |
+
def _update_prompt_logprobs(
|
| 93 |
+
self,
|
| 94 |
+
prompt_logprobs_tensors: LogprobsTensors,
|
| 95 |
+
) -> None:
|
| 96 |
+
"""Update with prompt logprobs from EngineCore.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
prompt_logprobs_tensors: tuple containing the prompt logprobs
|
| 100 |
+
tensors.
|
| 101 |
+
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
# Prompt logprobs are enabled.
|
| 105 |
+
assert self.num_prompt_logprobs is not None
|
| 106 |
+
assert self.prompt_logprobs is not None
|
| 107 |
+
|
| 108 |
+
token_ids, logprobs, ranks = prompt_logprobs_tensors
|
| 109 |
+
|
| 110 |
+
# Detokenize non-incrementally.
|
| 111 |
+
# Output is flat: [num_tok, num_lps] -> [num_tok * num_lps]
|
| 112 |
+
decoded_tokens = None if self.tokenizer is None else (
|
| 113 |
+
convert_ids_list_to_tokens(self.tokenizer,
|
| 114 |
+
token_ids.flatten().tolist()))
|
| 115 |
+
|
| 116 |
+
# Recover shapes.
|
| 117 |
+
num_prompt_tokens, num_logprobs = logprobs.shape
|
| 118 |
+
|
| 119 |
+
# Pythonize the torch tensors.
|
| 120 |
+
prompt_token_ranks = ranks.tolist()
|
| 121 |
+
prompt_logprobs = logprobs.tolist()
|
| 122 |
+
token_ids = token_ids.tolist()
|
| 123 |
+
|
| 124 |
+
# Make Logprob for each position.
|
| 125 |
+
for pos in range(num_prompt_tokens):
|
| 126 |
+
# Handle flattening.
|
| 127 |
+
offset = pos * num_logprobs
|
| 128 |
+
offset_end = offset + num_logprobs
|
| 129 |
+
decoded_tokens_for_pos = NONES \
|
| 130 |
+
if decoded_tokens is None else decoded_tokens[offset:offset_end]
|
| 131 |
+
|
| 132 |
+
# Update with the Logprob dictionary for this pos.
|
| 133 |
+
self.prompt_logprobs.append(
|
| 134 |
+
self._make_logprob_dict(prompt_logprobs[pos], token_ids[pos],
|
| 135 |
+
decoded_tokens_for_pos,
|
| 136 |
+
prompt_token_ranks[pos],
|
| 137 |
+
self.num_prompt_logprobs))
|
| 138 |
+
|
| 139 |
+
def pop_prompt_logprobs(self) -> Optional[PromptLogprobs]:
|
| 140 |
+
"""Pop and return all request prompt logprobs
|
| 141 |
+
|
| 142 |
+
The logprobs processor aggregates prompt chunk logprobs
|
| 143 |
+
over one or more prefill chunks. This method returns
|
| 144 |
+
all prompt logprobs at once and then forgets them.
|
| 145 |
+
Ensures correct RequestOutputKind.DELTA semantics
|
| 146 |
+
wherein all prompt logprobs are returned at once at
|
| 147 |
+
the end of prefill.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
None if prompt logprobs are disabled for this request.
|
| 151 |
+
List of all prompt logprobs, otherwise.
|
| 152 |
+
"""
|
| 153 |
+
plp = self.prompt_logprobs
|
| 154 |
+
if plp:
|
| 155 |
+
self.prompt_logprobs = []
|
| 156 |
+
return plp
|
| 157 |
+
|
| 158 |
+
@staticmethod
|
| 159 |
+
def _make_logprob_dict(
|
| 160 |
+
logprobs: list[float],
|
| 161 |
+
logprob_token_ids: list[int],
|
| 162 |
+
decoded_tokens: Iterable[Optional[str]],
|
| 163 |
+
rank: int,
|
| 164 |
+
num_logprobs: int,
|
| 165 |
+
) -> dict[int, Logprob]:
|
| 166 |
+
"""Make a Logprob dictionary for a position.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
logprobs: list of log probabilities
|
| 170 |
+
logprob_token_ids: list of top token ids
|
| 171 |
+
decoded_tokens: list of decoded top tokens
|
| 172 |
+
rank: rank of the sampled token
|
| 173 |
+
num_logprobs: number of logprobs requested
|
| 174 |
+
by the user (in addition to sampled logprob)
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
dict[token id, Logprob]
|
| 178 |
+
"""
|
| 179 |
+
if num_logprobs == -1:
|
| 180 |
+
num_logprobs = len(logprobs)
|
| 181 |
+
# We do not need a special case for the sampled token
|
| 182 |
+
# being in the topk, since inserting duplicated data
|
| 183 |
+
# into a dictionary twice is the same as doing it once.
|
| 184 |
+
topk_ranks = range(1, num_logprobs + 1)
|
| 185 |
+
ranks = itertools.chain((rank, ), topk_ranks)
|
| 186 |
+
|
| 187 |
+
return {
|
| 188 |
+
token_id: Logprob(
|
| 189 |
+
logprob=logprob,
|
| 190 |
+
rank=rank,
|
| 191 |
+
decoded_token=token,
|
| 192 |
+
)
|
| 193 |
+
for token_id, logprob, rank, token in zip(
|
| 194 |
+
logprob_token_ids, logprobs, ranks, decoded_tokens)
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
def update_from_output(self, output: EngineCoreOutput) -> None:
|
| 198 |
+
if output.new_logprobs is not None:
|
| 199 |
+
self._update_sample_logprobs(output.new_logprobs)
|
| 200 |
+
if output.new_prompt_logprobs_tensors is not None:
|
| 201 |
+
self._update_prompt_logprobs(output.new_prompt_logprobs_tensors)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/mm_input_cache.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from collections.abc import Sequence
|
| 4 |
+
from typing import TYPE_CHECKING, Optional
|
| 5 |
+
|
| 6 |
+
from vllm.multimodal import MultiModalRegistry
|
| 7 |
+
from vllm.multimodal.cache import MultiModalCache, MultiModalCacheItemMetadata
|
| 8 |
+
from vllm.multimodal.inputs import MultiModalKwargsItem
|
| 9 |
+
from vllm.utils import is_list_of
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from vllm.config import ModelConfig
|
| 13 |
+
|
| 14 |
+
# The idea of multimodal input caching is based on having a client and
|
| 15 |
+
# a server, where the client executes in the frontend process (=P0) and the
|
| 16 |
+
# server in the core process (=P1).
|
| 17 |
+
#
|
| 18 |
+
# -- P0:
|
| 19 |
+
# - BaseMultiModalProcessor calls MultiModalHasher to get the `mm_hash` of
|
| 20 |
+
# each input multi-modal item (e.g. image),
|
| 21 |
+
# - BaseMultiModalProcessor processes the input items into `mm_kwargs`,
|
| 22 |
+
# which are MultiModalKwargsItem instances that each correspond to an
|
| 23 |
+
# input multi-modal item.
|
| 24 |
+
# - MultiModalInputCacheClient accepts the `mm_kwargs` and corresponding
|
| 25 |
+
# `mm_hash` for each item. It stores the `mm_hash` as keys and the size
|
| 26 |
+
# of `mm_kwargs`, but not the `mm_kwargs` themselves, to avoid taking
|
| 27 |
+
# up additional memory in P0.
|
| 28 |
+
# - The `mm_hash` is always sent to P1.
|
| 29 |
+
# - The corresponding `mm_kwargs` are only sent to P1 if they are not cached
|
| 30 |
+
# in MultiModalInputCacheServer.
|
| 31 |
+
#
|
| 32 |
+
# -- P1:
|
| 33 |
+
# - If the `mm_hash` is cached (i.e. `mm_kwargs` are not sent from P0),
|
| 34 |
+
# MultiModalInputCacheServer retrieves the corresponding `mm_kwargs`.
|
| 35 |
+
# - If the `mm_hash` is not cached (i.e. `mm_kwargs` are sent from P0),
|
| 36 |
+
# MultiModalInputCacheServer stores `mm_kwargs` under the key `mm_hash`.
|
| 37 |
+
# - Either way, the `mm_hash` and corresponding `mm_kwargs` are sent to
|
| 38 |
+
# the engine for model execution.
|
| 39 |
+
#
|
| 40 |
+
# Both Client and Server must perform cache update and eviction based on the
|
| 41 |
+
# same item size. This ensures that the keys of MultiModalInputCacheClient
|
| 42 |
+
# and MultiModalInputCacheServer are mirrored, allowing us to determine in P0
|
| 43 |
+
# whether a key is cached in MultiModalInputCacheServer by querying
|
| 44 |
+
# MultiModalInputCacheClient without having to communicate with P1.
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class MultiModalInputCacheClient:
|
| 48 |
+
"""Used by P0 to check whether multi-modal kwargs are cached in P1."""
|
| 49 |
+
|
| 50 |
+
def __init__(self, model_config: "ModelConfig",
|
| 51 |
+
mm_registry: MultiModalRegistry) -> None:
|
| 52 |
+
super().__init__()
|
| 53 |
+
|
| 54 |
+
self.enabled = mm_registry.enable_mm_input_cache(model_config)
|
| 55 |
+
self.mm_cache = MultiModalCache.get_lru_cache(
|
| 56 |
+
model_config.get_mm_input_cache_gb(),
|
| 57 |
+
MultiModalCacheItemMetadata,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def get_and_update(
|
| 61 |
+
self,
|
| 62 |
+
mm_kwargs: Sequence[MultiModalKwargsItem],
|
| 63 |
+
mm_hashes: list[str],
|
| 64 |
+
) -> list[Optional[MultiModalKwargsItem]]:
|
| 65 |
+
if not self.enabled:
|
| 66 |
+
return list(mm_kwargs)
|
| 67 |
+
|
| 68 |
+
assert len(mm_kwargs) == len(mm_hashes)
|
| 69 |
+
|
| 70 |
+
out_mm_items = list[Optional[MultiModalKwargsItem]]()
|
| 71 |
+
for mm_item, mm_hash in zip(mm_kwargs, mm_hashes):
|
| 72 |
+
if self.mm_cache.get(mm_hash) is not None:
|
| 73 |
+
out_mm_items.append(None)
|
| 74 |
+
else:
|
| 75 |
+
self.mm_cache[mm_hash] = \
|
| 76 |
+
MultiModalCacheItemMetadata.wraps(mm_item)
|
| 77 |
+
out_mm_items.append(mm_item)
|
| 78 |
+
|
| 79 |
+
return out_mm_items
|
| 80 |
+
|
| 81 |
+
def reset(self) -> None:
|
| 82 |
+
self.mm_cache.clear()
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class MultiModalInputCacheServer:
|
| 86 |
+
"""Used by P1 to avoid requiring past multi-modal kwargs from P0."""
|
| 87 |
+
|
| 88 |
+
def __init__(self, model_config: "ModelConfig",
|
| 89 |
+
mm_registry: MultiModalRegistry) -> None:
|
| 90 |
+
super().__init__()
|
| 91 |
+
|
| 92 |
+
self.enabled = mm_registry.enable_mm_input_cache(model_config)
|
| 93 |
+
self.mm_cache = MultiModalCache.get_lru_cache(
|
| 94 |
+
model_config.get_mm_input_cache_gb(),
|
| 95 |
+
MultiModalKwargsItem,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
def get_and_update(
|
| 99 |
+
self,
|
| 100 |
+
mm_kwargs: Sequence[Optional[MultiModalKwargsItem]],
|
| 101 |
+
mm_hashes: list[str],
|
| 102 |
+
) -> list[MultiModalKwargsItem]:
|
| 103 |
+
if not self.enabled:
|
| 104 |
+
mm_kwargs_lst = list(mm_kwargs)
|
| 105 |
+
assert is_list_of(mm_kwargs_lst, MultiModalKwargsItem)
|
| 106 |
+
return mm_kwargs_lst
|
| 107 |
+
|
| 108 |
+
assert len(mm_kwargs) == len(mm_hashes)
|
| 109 |
+
|
| 110 |
+
out_mm_items = list[MultiModalKwargsItem]()
|
| 111 |
+
for mm_item, mm_hash in zip(mm_kwargs, mm_hashes):
|
| 112 |
+
if mm_item is None:
|
| 113 |
+
out_mm_items.append(self.mm_cache[mm_hash])
|
| 114 |
+
else:
|
| 115 |
+
self.mm_cache[mm_hash] = mm_item
|
| 116 |
+
out_mm_items.append(mm_item)
|
| 117 |
+
|
| 118 |
+
return out_mm_items
|
| 119 |
+
|
| 120 |
+
def reset(self) -> None:
|
| 121 |
+
self.mm_cache.clear()
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/output_processor.py
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import asyncio
|
| 5 |
+
from collections.abc import Iterable
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Any, Optional, Union, cast
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from vllm.outputs import (CompletionOutput, PoolingOutput,
|
| 12 |
+
PoolingRequestOutput, RequestOutput)
|
| 13 |
+
from vllm.sampling_params import RequestOutputKind
|
| 14 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
| 15 |
+
from vllm.transformers_utils.tokenizer_group import TokenizerGroup
|
| 16 |
+
from vllm.v1.engine import EngineCoreOutput, EngineCoreRequest, FinishReason
|
| 17 |
+
from vllm.v1.engine.detokenizer import IncrementalDetokenizer
|
| 18 |
+
from vllm.v1.engine.logprobs import LogprobsProcessor
|
| 19 |
+
from vllm.v1.engine.parallel_sampling import ParentRequest
|
| 20 |
+
from vllm.v1.metrics.stats import (IterationStats, LoRARequestStates,
|
| 21 |
+
RequestStateStats)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class RequestOutputCollector:
|
| 25 |
+
"""
|
| 26 |
+
Collects streamed RequestOutputs per individual request,
|
| 27 |
+
for hand-off to the consuming asyncio generate task.
|
| 28 |
+
|
| 29 |
+
When streaming deltas, RequestOutputs are merged if the
|
| 30 |
+
producer gets ahead of the consumer.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, output_kind: RequestOutputKind):
|
| 34 |
+
self.aggregate = output_kind == RequestOutputKind.DELTA
|
| 35 |
+
self.output: Optional[Union[RequestOutput, PoolingRequestOutput,
|
| 36 |
+
Exception]] = None
|
| 37 |
+
self.ready = asyncio.Event()
|
| 38 |
+
|
| 39 |
+
def put(self, output: Union[RequestOutput, PoolingRequestOutput,
|
| 40 |
+
Exception]) -> None:
|
| 41 |
+
"""Non-blocking put operation."""
|
| 42 |
+
if self.output is None or isinstance(output, Exception):
|
| 43 |
+
self.output = output
|
| 44 |
+
self.ready.set()
|
| 45 |
+
elif isinstance(self.output, (RequestOutput, PoolingRequestOutput)):
|
| 46 |
+
# This ensures that request outputs with different request indexes
|
| 47 |
+
# (if n > 1) do not override each other.
|
| 48 |
+
self.output.add(output, aggregate=self.aggregate)
|
| 49 |
+
|
| 50 |
+
async def get(self) -> Union[RequestOutput, PoolingRequestOutput]:
|
| 51 |
+
"""Get operation blocks on put event."""
|
| 52 |
+
while (output := self.output) is None:
|
| 53 |
+
await self.ready.wait()
|
| 54 |
+
self.output = None
|
| 55 |
+
self.ready.clear()
|
| 56 |
+
if isinstance(output, Exception):
|
| 57 |
+
raise output
|
| 58 |
+
return output
|
| 59 |
+
|
| 60 |
+
def get_nowait(
|
| 61 |
+
self) -> Optional[Union[RequestOutput, PoolingRequestOutput]]:
|
| 62 |
+
"""Non-blocking get operation."""
|
| 63 |
+
output = self.output
|
| 64 |
+
if output is not None:
|
| 65 |
+
self.output = None
|
| 66 |
+
self.ready.clear()
|
| 67 |
+
if isinstance(output, Exception):
|
| 68 |
+
raise output
|
| 69 |
+
return output
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class OutputProcessorOutput:
|
| 74 |
+
|
| 75 |
+
request_outputs: list[Union[RequestOutput, PoolingRequestOutput]]
|
| 76 |
+
reqs_to_abort: list[str]
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class RequestState:
|
| 80 |
+
|
| 81 |
+
def __init__(
|
| 82 |
+
self,
|
| 83 |
+
request_id: str,
|
| 84 |
+
parent_req: Optional[ParentRequest],
|
| 85 |
+
request_index: int,
|
| 86 |
+
lora_name: Optional[str],
|
| 87 |
+
output_kind: RequestOutputKind,
|
| 88 |
+
prompt: Optional[str],
|
| 89 |
+
prompt_token_ids: list[int],
|
| 90 |
+
logprobs_processor: Optional[LogprobsProcessor],
|
| 91 |
+
detokenizer: Optional[IncrementalDetokenizer],
|
| 92 |
+
max_tokens_param: Optional[int],
|
| 93 |
+
arrival_time: float,
|
| 94 |
+
queue: Optional[RequestOutputCollector],
|
| 95 |
+
log_stats: bool,
|
| 96 |
+
):
|
| 97 |
+
self.request_id = request_id
|
| 98 |
+
self.parent_req = parent_req
|
| 99 |
+
self.request_index = request_index
|
| 100 |
+
self.lora_name = lora_name
|
| 101 |
+
self.output_kind = output_kind
|
| 102 |
+
self.prompt = prompt
|
| 103 |
+
self.prompt_token_ids = prompt_token_ids
|
| 104 |
+
self.prompt_len = len(prompt_token_ids)
|
| 105 |
+
self.logprobs_processor = logprobs_processor
|
| 106 |
+
self.detokenizer = detokenizer
|
| 107 |
+
self.max_tokens_param = max_tokens_param
|
| 108 |
+
self.is_prefilling = True
|
| 109 |
+
self.queue = queue
|
| 110 |
+
self.num_cached_tokens = 0
|
| 111 |
+
|
| 112 |
+
self.stats = RequestStateStats(
|
| 113 |
+
arrival_time=arrival_time) if log_stats else None
|
| 114 |
+
|
| 115 |
+
@classmethod
|
| 116 |
+
def from_new_request(
|
| 117 |
+
cls,
|
| 118 |
+
tokenizer: AnyTokenizer,
|
| 119 |
+
request: EngineCoreRequest,
|
| 120 |
+
prompt: Optional[str],
|
| 121 |
+
parent_req: Optional[ParentRequest],
|
| 122 |
+
request_index: int,
|
| 123 |
+
queue: Optional[RequestOutputCollector],
|
| 124 |
+
log_stats: bool,
|
| 125 |
+
) -> "RequestState":
|
| 126 |
+
|
| 127 |
+
if sampling_params := request.sampling_params:
|
| 128 |
+
if not sampling_params.detokenize:
|
| 129 |
+
tokenizer = None
|
| 130 |
+
output_kind = sampling_params.output_kind
|
| 131 |
+
logprobs_processor = LogprobsProcessor.from_new_request(
|
| 132 |
+
tokenizer=tokenizer,
|
| 133 |
+
request=request,
|
| 134 |
+
)
|
| 135 |
+
detokenizer = IncrementalDetokenizer.from_new_request(
|
| 136 |
+
tokenizer=tokenizer,
|
| 137 |
+
request=request,
|
| 138 |
+
)
|
| 139 |
+
max_tokens_param = sampling_params.max_tokens
|
| 140 |
+
else:
|
| 141 |
+
logprobs_processor = None
|
| 142 |
+
detokenizer = None
|
| 143 |
+
max_tokens_param = None
|
| 144 |
+
assert request.pooling_params is not None
|
| 145 |
+
output_kind = request.pooling_params.output_kind
|
| 146 |
+
|
| 147 |
+
return cls(
|
| 148 |
+
request_id=request.request_id,
|
| 149 |
+
parent_req=parent_req,
|
| 150 |
+
request_index=request_index,
|
| 151 |
+
lora_name=(request.lora_request.name
|
| 152 |
+
if request.lora_request is not None else None),
|
| 153 |
+
output_kind=output_kind,
|
| 154 |
+
prompt=prompt,
|
| 155 |
+
prompt_token_ids=request.prompt_token_ids,
|
| 156 |
+
logprobs_processor=logprobs_processor,
|
| 157 |
+
detokenizer=detokenizer,
|
| 158 |
+
max_tokens_param=max_tokens_param,
|
| 159 |
+
arrival_time=request.arrival_time,
|
| 160 |
+
queue=queue,
|
| 161 |
+
log_stats=log_stats,
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
def make_request_output(
|
| 165 |
+
self,
|
| 166 |
+
new_token_ids: list[int],
|
| 167 |
+
pooling_output: Optional[torch.Tensor],
|
| 168 |
+
finish_reason: Optional[FinishReason],
|
| 169 |
+
stop_reason: Union[int, str, None],
|
| 170 |
+
kv_transfer_params: Optional[dict[str, Any]] = None,
|
| 171 |
+
) -> Optional[Union[RequestOutput, PoolingRequestOutput]]:
|
| 172 |
+
|
| 173 |
+
finished = finish_reason is not None
|
| 174 |
+
final_only = self.output_kind == RequestOutputKind.FINAL_ONLY
|
| 175 |
+
|
| 176 |
+
if not finished and final_only:
|
| 177 |
+
# Only the final output is required in FINAL_ONLY mode.
|
| 178 |
+
return None
|
| 179 |
+
|
| 180 |
+
request_id = self.request_id
|
| 181 |
+
if pooling_output is not None:
|
| 182 |
+
return self._new_request_output(
|
| 183 |
+
request_id, [self._new_pooling_output(pooling_output)],
|
| 184 |
+
finished)
|
| 185 |
+
|
| 186 |
+
output = self._new_completion_output(new_token_ids, finish_reason,
|
| 187 |
+
stop_reason)
|
| 188 |
+
|
| 189 |
+
if self.parent_req is None:
|
| 190 |
+
outputs = [output]
|
| 191 |
+
else:
|
| 192 |
+
request_id, outputs, finished = self.parent_req.get_outputs(
|
| 193 |
+
request_id, output)
|
| 194 |
+
if not outputs:
|
| 195 |
+
return None
|
| 196 |
+
|
| 197 |
+
return self._new_request_output(request_id, outputs, finished,
|
| 198 |
+
kv_transfer_params)
|
| 199 |
+
|
| 200 |
+
def _new_request_output(
|
| 201 |
+
self,
|
| 202 |
+
request_id: str,
|
| 203 |
+
outputs: Union[list[CompletionOutput], list[PoolingOutput]],
|
| 204 |
+
finished: bool,
|
| 205 |
+
kv_transfer_params: Optional[dict[str, Any]] = None,
|
| 206 |
+
) -> Union[RequestOutput, PoolingRequestOutput]:
|
| 207 |
+
|
| 208 |
+
first_output = outputs[0]
|
| 209 |
+
if isinstance(first_output, PoolingOutput):
|
| 210 |
+
assert len(outputs) == 1
|
| 211 |
+
return PoolingRequestOutput(
|
| 212 |
+
request_id=request_id,
|
| 213 |
+
outputs=first_output,
|
| 214 |
+
prompt_token_ids=self.prompt_token_ids,
|
| 215 |
+
finished=finished,
|
| 216 |
+
)
|
| 217 |
+
assert self.logprobs_processor is not None
|
| 218 |
+
if self.output_kind == RequestOutputKind.DELTA:
|
| 219 |
+
# Side effect: logprobs processor forgets prompt logprobs
|
| 220 |
+
prompt_logprobs = self.logprobs_processor.pop_prompt_logprobs()
|
| 221 |
+
else:
|
| 222 |
+
prompt_logprobs = self.logprobs_processor.prompt_logprobs
|
| 223 |
+
|
| 224 |
+
return RequestOutput(
|
| 225 |
+
request_id=request_id,
|
| 226 |
+
prompt=self.prompt,
|
| 227 |
+
prompt_token_ids=self.prompt_token_ids,
|
| 228 |
+
prompt_logprobs=prompt_logprobs,
|
| 229 |
+
outputs=cast(list[CompletionOutput], outputs),
|
| 230 |
+
finished=finished,
|
| 231 |
+
kv_transfer_params=kv_transfer_params,
|
| 232 |
+
num_cached_tokens=self.num_cached_tokens,
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
def _new_completion_output(
|
| 236 |
+
self,
|
| 237 |
+
token_ids: list[int],
|
| 238 |
+
finish_reason: Optional[FinishReason],
|
| 239 |
+
stop_reason: Union[int, str, None],
|
| 240 |
+
) -> CompletionOutput:
|
| 241 |
+
|
| 242 |
+
assert self.detokenizer is not None
|
| 243 |
+
assert self.logprobs_processor is not None
|
| 244 |
+
finished = finish_reason is not None
|
| 245 |
+
delta = self.output_kind == RequestOutputKind.DELTA
|
| 246 |
+
|
| 247 |
+
# Prepare text and token_ids, based on delta mode
|
| 248 |
+
text = self.detokenizer.get_next_output_text(finished, delta)
|
| 249 |
+
if not delta:
|
| 250 |
+
token_ids = self.detokenizer.output_token_ids
|
| 251 |
+
|
| 252 |
+
# Prepare logprobs, based on delta mode
|
| 253 |
+
logprobs = self.logprobs_processor.logprobs
|
| 254 |
+
if delta and logprobs:
|
| 255 |
+
logprobs = logprobs[-len(token_ids):]
|
| 256 |
+
|
| 257 |
+
return CompletionOutput(
|
| 258 |
+
index=self.request_index,
|
| 259 |
+
text=text,
|
| 260 |
+
token_ids=token_ids,
|
| 261 |
+
logprobs=logprobs,
|
| 262 |
+
cumulative_logprob=self.logprobs_processor.cumulative_logprob,
|
| 263 |
+
finish_reason=str(finish_reason) if finished else None,
|
| 264 |
+
stop_reason=stop_reason if finished else None)
|
| 265 |
+
|
| 266 |
+
def _new_pooling_output(
|
| 267 |
+
self,
|
| 268 |
+
pooling_output: torch.Tensor,
|
| 269 |
+
) -> PoolingOutput:
|
| 270 |
+
|
| 271 |
+
return PoolingOutput(data=pooling_output)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class OutputProcessor:
|
| 275 |
+
"""Process EngineCoreOutputs into RequestOutputs."""
|
| 276 |
+
|
| 277 |
+
def __init__(
|
| 278 |
+
self,
|
| 279 |
+
tokenizer: TokenizerGroup,
|
| 280 |
+
log_stats: bool,
|
| 281 |
+
):
|
| 282 |
+
self.log_stats = log_stats
|
| 283 |
+
self.tokenizer = tokenizer
|
| 284 |
+
self.request_states: dict[str, RequestState] = {}
|
| 285 |
+
self.parent_requests: dict[str, ParentRequest] = {}
|
| 286 |
+
self.lora_states = LoRARequestStates()
|
| 287 |
+
|
| 288 |
+
def get_num_unfinished_requests(self):
|
| 289 |
+
return len(self.request_states)
|
| 290 |
+
|
| 291 |
+
def has_unfinished_requests(self) -> bool:
|
| 292 |
+
return len(self.request_states) > 0
|
| 293 |
+
|
| 294 |
+
def propagate_error(self, e: Exception):
|
| 295 |
+
"""Propagate error to all generate() tasks."""
|
| 296 |
+
|
| 297 |
+
for _, state in self.request_states.items():
|
| 298 |
+
assert state.queue is not None
|
| 299 |
+
state.queue.put(e)
|
| 300 |
+
|
| 301 |
+
def abort_requests(
|
| 302 |
+
self,
|
| 303 |
+
request_ids: Iterable[str],
|
| 304 |
+
) -> list[str]:
|
| 305 |
+
request_ids_to_abort = []
|
| 306 |
+
for request_id in request_ids:
|
| 307 |
+
req_state = self.request_states.pop(request_id, None)
|
| 308 |
+
if req_state is not None:
|
| 309 |
+
self.lora_states.abort_request(req_state)
|
| 310 |
+
request_ids_to_abort.append(request_id)
|
| 311 |
+
# Produce final abort output.
|
| 312 |
+
if req_state.queue is not None and (
|
| 313 |
+
request_output := req_state.make_request_output(
|
| 314 |
+
[], None, FinishReason.ABORT, None, None)):
|
| 315 |
+
req_state.queue.put(request_output)
|
| 316 |
+
elif parent := self.parent_requests.get(request_id):
|
| 317 |
+
# Abort children prior to removing the parent.
|
| 318 |
+
if parent.child_requests:
|
| 319 |
+
child_reqs = list(parent.child_requests)
|
| 320 |
+
child_reqs = self.abort_requests(child_reqs)
|
| 321 |
+
request_ids_to_abort.extend(child_reqs)
|
| 322 |
+
self.parent_requests.pop(request_id, None)
|
| 323 |
+
return request_ids_to_abort
|
| 324 |
+
|
| 325 |
+
def add_request(
|
| 326 |
+
self,
|
| 327 |
+
request: EngineCoreRequest,
|
| 328 |
+
prompt: Optional[str],
|
| 329 |
+
parent_req: Optional[ParentRequest] = None,
|
| 330 |
+
request_index: int = 0,
|
| 331 |
+
queue: Optional[RequestOutputCollector] = None,
|
| 332 |
+
) -> None:
|
| 333 |
+
request_id = request.request_id
|
| 334 |
+
if request_id in self.request_states:
|
| 335 |
+
raise ValueError(f"Request id {request_id} already running.")
|
| 336 |
+
|
| 337 |
+
tokenizer = None if not self.tokenizer else \
|
| 338 |
+
self.tokenizer.get_lora_tokenizer(request.lora_request)
|
| 339 |
+
|
| 340 |
+
req_state = RequestState.from_new_request(tokenizer=tokenizer,
|
| 341 |
+
request=request,
|
| 342 |
+
prompt=prompt,
|
| 343 |
+
parent_req=parent_req,
|
| 344 |
+
request_index=request_index,
|
| 345 |
+
queue=queue,
|
| 346 |
+
log_stats=self.log_stats)
|
| 347 |
+
self.request_states[request_id] = req_state
|
| 348 |
+
self.lora_states.add_request(req_state)
|
| 349 |
+
if parent_req:
|
| 350 |
+
self.parent_requests[parent_req.request_id] = parent_req
|
| 351 |
+
|
| 352 |
+
def process_outputs(
|
| 353 |
+
self,
|
| 354 |
+
engine_core_outputs: list[EngineCoreOutput],
|
| 355 |
+
engine_core_timestamp: Optional[float] = None,
|
| 356 |
+
iteration_stats: Optional[IterationStats] = None,
|
| 357 |
+
) -> OutputProcessorOutput:
|
| 358 |
+
"""
|
| 359 |
+
Process the EngineCoreOutputs:
|
| 360 |
+
1) Compute stats for logging
|
| 361 |
+
2) Detokenize
|
| 362 |
+
3) Create and handle RequestOutput objects:
|
| 363 |
+
* If there is a queue (for usage with AsyncLLM),
|
| 364 |
+
put the RequestOutput objects into the queue for
|
| 365 |
+
handling by the per-request generate() tasks.
|
| 366 |
+
|
| 367 |
+
* If there is no queue (for usage with LLMEngine),
|
| 368 |
+
return a list of RequestOutput objects.
|
| 369 |
+
|
| 370 |
+
NOTE FOR DEVELOPERS
|
| 371 |
+
|
| 372 |
+
vLLM V1 minimizes the number of python loops over the full
|
| 373 |
+
batch to ensure system overheads are minimized. This is the
|
| 374 |
+
only function that should loop over EngineCoreOutputs.
|
| 375 |
+
|
| 376 |
+
If you need to touch every element of the batch, do it from
|
| 377 |
+
within the loop below.
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
request_outputs: Union[list[RequestOutput],
|
| 381 |
+
list[PoolingRequestOutput]] = []
|
| 382 |
+
reqs_to_abort: list[str] = []
|
| 383 |
+
for engine_core_output in engine_core_outputs:
|
| 384 |
+
req_id = engine_core_output.request_id
|
| 385 |
+
req_state = self.request_states.get(req_id)
|
| 386 |
+
if req_state is None:
|
| 387 |
+
# Ignore output for already-aborted request.
|
| 388 |
+
continue
|
| 389 |
+
|
| 390 |
+
# 1) Compute stats for this iteration.
|
| 391 |
+
self._update_stats_from_output(req_state, engine_core_output,
|
| 392 |
+
engine_core_timestamp,
|
| 393 |
+
iteration_stats)
|
| 394 |
+
|
| 395 |
+
new_token_ids = engine_core_output.new_token_ids
|
| 396 |
+
pooling_output = engine_core_output.pooling_output
|
| 397 |
+
finish_reason = engine_core_output.finish_reason
|
| 398 |
+
stop_reason = engine_core_output.stop_reason
|
| 399 |
+
kv_transfer_params = engine_core_output.kv_transfer_params
|
| 400 |
+
req_state.num_cached_tokens = engine_core_output.num_cached_tokens
|
| 401 |
+
req_state.is_prefilling = False
|
| 402 |
+
|
| 403 |
+
if pooling_output is None:
|
| 404 |
+
assert req_state.detokenizer is not None
|
| 405 |
+
assert req_state.logprobs_processor is not None
|
| 406 |
+
# 2) Detokenize the token ids into text and perform stop checks.
|
| 407 |
+
stop_string = req_state.detokenizer.update(
|
| 408 |
+
new_token_ids, finish_reason == FinishReason.STOP)
|
| 409 |
+
if stop_string:
|
| 410 |
+
finish_reason = FinishReason.STOP
|
| 411 |
+
stop_reason = stop_string
|
| 412 |
+
|
| 413 |
+
# 3) Compute sample and prompt logprobs for request,
|
| 414 |
+
# if required.
|
| 415 |
+
req_state.logprobs_processor.update_from_output(
|
| 416 |
+
engine_core_output)
|
| 417 |
+
|
| 418 |
+
# 4) Create and handle RequestOutput objects.
|
| 419 |
+
if request_output := req_state.make_request_output(
|
| 420 |
+
new_token_ids, pooling_output, finish_reason, stop_reason,
|
| 421 |
+
kv_transfer_params):
|
| 422 |
+
if req_state.queue is not None:
|
| 423 |
+
# AsyncLLM: put into queue for handling by generate().
|
| 424 |
+
req_state.queue.put(request_output)
|
| 425 |
+
else:
|
| 426 |
+
# LLMEngine: return list of RequestOutputs.
|
| 427 |
+
request_outputs.append(request_output)
|
| 428 |
+
|
| 429 |
+
# Free completed requests.
|
| 430 |
+
if finish_reason is not None:
|
| 431 |
+
self.request_states.pop(req_id)
|
| 432 |
+
# Remove parent request if applicable.
|
| 433 |
+
parent_req = req_state.parent_req
|
| 434 |
+
if parent_req and not parent_req.child_requests:
|
| 435 |
+
self.parent_requests.pop(parent_req.request_id, None)
|
| 436 |
+
if not engine_core_output.finished:
|
| 437 |
+
# If req not finished in EngineCore, but Detokenizer
|
| 438 |
+
# detected stop string, abort needed in EngineCore.
|
| 439 |
+
reqs_to_abort.append(req_id)
|
| 440 |
+
|
| 441 |
+
# Track per-request stats
|
| 442 |
+
self._update_stats_from_finished(req_state, finish_reason,
|
| 443 |
+
iteration_stats)
|
| 444 |
+
|
| 445 |
+
self.lora_states.update_iteration_stats(iteration_stats)
|
| 446 |
+
|
| 447 |
+
return OutputProcessorOutput(
|
| 448 |
+
request_outputs=request_outputs,
|
| 449 |
+
reqs_to_abort=reqs_to_abort,
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
def _update_stats_from_output(self, req_state: RequestState,
|
| 453 |
+
engine_core_output: EngineCoreOutput,
|
| 454 |
+
engine_core_timestamp: Optional[float],
|
| 455 |
+
iteration_stats: Optional[IterationStats]):
|
| 456 |
+
if iteration_stats is None:
|
| 457 |
+
return
|
| 458 |
+
|
| 459 |
+
lora_stats = self.lora_states.get_stats(req_state)
|
| 460 |
+
|
| 461 |
+
assert engine_core_timestamp is not None
|
| 462 |
+
assert req_state.stats is not None
|
| 463 |
+
iteration_stats.update_from_output(engine_core_output,
|
| 464 |
+
engine_core_timestamp,
|
| 465 |
+
req_state.is_prefilling,
|
| 466 |
+
req_state.prompt_len,
|
| 467 |
+
req_state.stats, lora_stats)
|
| 468 |
+
|
| 469 |
+
def _update_stats_from_finished(self, req_state: RequestState,
|
| 470 |
+
finish_reason: Optional[FinishReason],
|
| 471 |
+
iteration_stats: Optional[IterationStats]):
|
| 472 |
+
if iteration_stats is None:
|
| 473 |
+
return
|
| 474 |
+
|
| 475 |
+
assert finish_reason is not None
|
| 476 |
+
assert req_state.stats is not None
|
| 477 |
+
iteration_stats.update_from_finished_request(
|
| 478 |
+
finish_reason=finish_reason,
|
| 479 |
+
num_prompt_tokens=len(req_state.prompt_token_ids),
|
| 480 |
+
max_tokens_param=req_state.max_tokens_param,
|
| 481 |
+
req_stats=req_state.stats)
|
| 482 |
+
self.lora_states.finish_request(req_state)
|
| 483 |
+
|
| 484 |
+
ParentRequest.observe_finished_request(
|
| 485 |
+
req_state.parent_req, iteration_stats,
|
| 486 |
+
req_state.stats.num_generation_tokens)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/parallel_sampling.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from copy import copy
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from vllm.outputs import CompletionOutput
|
| 8 |
+
from vllm.sampling_params import RequestOutputKind, SamplingParams
|
| 9 |
+
from vllm.v1.metrics.stats import IterationStats
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ParentRequest:
|
| 13 |
+
"""Info, state & processing for parallel sampling request.
|
| 14 |
+
|
| 15 |
+
Store parent request ID and sampling params.
|
| 16 |
+
Facilitate generating child request sampling params.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
request_id: str
|
| 20 |
+
sampling_params: SamplingParams
|
| 21 |
+
|
| 22 |
+
# To track the completion of child requests
|
| 23 |
+
child_requests: set[str]
|
| 24 |
+
|
| 25 |
+
# To aggregate child completions when not streaming
|
| 26 |
+
output_aggregator: list[CompletionOutput]
|
| 27 |
+
|
| 28 |
+
# To find the max number of generated tokens across all children
|
| 29 |
+
max_num_generation_tokens: int
|
| 30 |
+
|
| 31 |
+
# To efficiently obtain child sampling params
|
| 32 |
+
cached_child_sampling_params: Optional[SamplingParams]
|
| 33 |
+
|
| 34 |
+
def __init__(self, request_id: str,
|
| 35 |
+
sampling_params: SamplingParams) -> None:
|
| 36 |
+
self.request_id = request_id
|
| 37 |
+
self.sampling_params = sampling_params
|
| 38 |
+
|
| 39 |
+
self.child_requests = set()
|
| 40 |
+
self.output_aggregator = [None] * sampling_params.n if (
|
| 41 |
+
sampling_params.output_kind
|
| 42 |
+
== RequestOutputKind.FINAL_ONLY) else []
|
| 43 |
+
self.max_num_generation_tokens = 0
|
| 44 |
+
self.cached_child_sampling_params = None
|
| 45 |
+
|
| 46 |
+
def _get_child_sampling_params(
|
| 47 |
+
self,
|
| 48 |
+
index: int,
|
| 49 |
+
) -> SamplingParams:
|
| 50 |
+
"""Efficiently obtain child `sampling_params`
|
| 51 |
+
|
| 52 |
+
If `sampling_params.seed` is not `None` then
|
| 53 |
+
each child request requires a unique clone of
|
| 54 |
+
parent `sampling_params` with a unique seed.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
index: index within `n` child requests
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
Child `sampling_params` instance.
|
| 61 |
+
"""
|
| 62 |
+
seed = self.sampling_params.seed
|
| 63 |
+
if self.cached_child_sampling_params:
|
| 64 |
+
# Reuse child sampling_params data structure
|
| 65 |
+
return self.cached_child_sampling_params
|
| 66 |
+
# Build child sampling_params
|
| 67 |
+
child_sampling_params = copy(self.sampling_params)
|
| 68 |
+
child_sampling_params.n = 1
|
| 69 |
+
if seed is None:
|
| 70 |
+
# Cache child sampling_params for later reuse
|
| 71 |
+
self.cached_child_sampling_params = child_sampling_params
|
| 72 |
+
else:
|
| 73 |
+
# Each child gets a clone with a unique seed
|
| 74 |
+
child_sampling_params.seed = seed + index
|
| 75 |
+
return child_sampling_params
|
| 76 |
+
|
| 77 |
+
def get_child_info(self, index: int) -> tuple[str, SamplingParams]:
|
| 78 |
+
"""Get child request ID and sampling params.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
index: index within `n` child requests.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
(request ID, sampling_params) tuple
|
| 85 |
+
"""
|
| 86 |
+
child_req_id = f"{index}_{self.request_id}"
|
| 87 |
+
self.child_requests.add(child_req_id)
|
| 88 |
+
return child_req_id, self._get_child_sampling_params(index)
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def n(self) -> int:
|
| 92 |
+
return self.sampling_params.n
|
| 93 |
+
|
| 94 |
+
def get_outputs(
|
| 95 |
+
self,
|
| 96 |
+
child_request_id: str,
|
| 97 |
+
completion_output: CompletionOutput,
|
| 98 |
+
) -> tuple[str, list[CompletionOutput], bool]:
|
| 99 |
+
if completion_output.finished():
|
| 100 |
+
self.child_requests.remove(child_request_id)
|
| 101 |
+
|
| 102 |
+
if self.sampling_params.output_kind != RequestOutputKind.FINAL_ONLY:
|
| 103 |
+
# If streaming, just return the current output.
|
| 104 |
+
outputs = [completion_output]
|
| 105 |
+
else:
|
| 106 |
+
# If not streaming, aggregate the n final outputs.
|
| 107 |
+
self.output_aggregator[completion_output.index] = completion_output
|
| 108 |
+
outputs = [] if self.child_requests else self.output_aggregator
|
| 109 |
+
|
| 110 |
+
finished = not self.child_requests
|
| 111 |
+
return self.request_id, outputs, finished
|
| 112 |
+
|
| 113 |
+
def observe_num_generation_tokens(self, num_generation_tokens: int):
|
| 114 |
+
self.max_num_generation_tokens = max(num_generation_tokens,
|
| 115 |
+
self.max_num_generation_tokens)
|
| 116 |
+
return self.max_num_generation_tokens
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def observe_finished_request(parent_req: Optional['ParentRequest'],
|
| 120 |
+
iteration_stats: IterationStats,
|
| 121 |
+
num_generation_tokens: int):
|
| 122 |
+
|
| 123 |
+
n_param = parent_req.n if parent_req is not None else 1
|
| 124 |
+
|
| 125 |
+
if parent_req is not None:
|
| 126 |
+
num_generation_tokens = parent_req.observe_num_generation_tokens(
|
| 127 |
+
num_generation_tokens)
|
| 128 |
+
|
| 129 |
+
# Child requests finished, we can now record to iteration stats
|
| 130 |
+
if parent_req is None or not parent_req.child_requests:
|
| 131 |
+
iteration_stats.max_num_generation_tokens_iter.append(
|
| 132 |
+
num_generation_tokens)
|
| 133 |
+
iteration_stats.n_params_iter.append(n_param)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/processor.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import time
|
| 5 |
+
from collections.abc import Mapping
|
| 6 |
+
from typing import Any, Literal, Optional, Union
|
| 7 |
+
|
| 8 |
+
from vllm.config import VllmConfig
|
| 9 |
+
from vllm.inputs import ProcessorInputs, PromptType, SingletonInputs
|
| 10 |
+
from vllm.inputs.parse import split_enc_dec_inputs
|
| 11 |
+
from vllm.inputs.preprocess import InputPreprocessor
|
| 12 |
+
from vllm.lora.request import LoRARequest
|
| 13 |
+
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
| 14 |
+
from vllm.multimodal.inputs import MultiModalKwargsItem, PlaceholderRange
|
| 15 |
+
from vllm.multimodal.processing import EncDecMultiModalProcessor
|
| 16 |
+
from vllm.multimodal.utils import argsort_mm_positions
|
| 17 |
+
from vllm.pooling_params import PoolingParams
|
| 18 |
+
from vllm.sampling_params import SamplingParams
|
| 19 |
+
from vllm.transformers_utils.tokenizer_group import TokenizerGroup
|
| 20 |
+
from vllm.utils import is_list_of
|
| 21 |
+
from vllm.v1.engine import EngineCoreRequest
|
| 22 |
+
from vllm.v1.engine.mm_input_cache import MultiModalInputCacheClient
|
| 23 |
+
from vllm.v1.structured_output.backend_guidance import (
|
| 24 |
+
validate_guidance_grammar)
|
| 25 |
+
from vllm.v1.structured_output.backend_outlines import (
|
| 26 |
+
validate_structured_output_request_outlines)
|
| 27 |
+
from vllm.v1.structured_output.backend_xgrammar import (
|
| 28 |
+
validate_xgrammar_grammar)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Processor:
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
vllm_config: VllmConfig,
|
| 36 |
+
tokenizer: TokenizerGroup,
|
| 37 |
+
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
|
| 38 |
+
):
|
| 39 |
+
|
| 40 |
+
self.vllm_config = vllm_config
|
| 41 |
+
self.model_config = vllm_config.model_config
|
| 42 |
+
self.cache_config = vllm_config.cache_config
|
| 43 |
+
self.lora_config = vllm_config.lora_config
|
| 44 |
+
self.decoding_config = vllm_config.decoding_config
|
| 45 |
+
self.tokenizer = tokenizer
|
| 46 |
+
|
| 47 |
+
self.generation_config_fields = (
|
| 48 |
+
self.model_config.try_get_generation_config())
|
| 49 |
+
self.input_preprocessor = InputPreprocessor(self.model_config,
|
| 50 |
+
self.tokenizer,
|
| 51 |
+
mm_registry)
|
| 52 |
+
|
| 53 |
+
self.mm_input_cache_client = MultiModalInputCacheClient(
|
| 54 |
+
self.model_config, mm_registry)
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def mm_registry(self):
|
| 58 |
+
return self.input_preprocessor.mm_registry
|
| 59 |
+
|
| 60 |
+
def _validate_logprobs(
|
| 61 |
+
self,
|
| 62 |
+
params: SamplingParams,
|
| 63 |
+
) -> None:
|
| 64 |
+
max_logprobs = self.model_config.max_logprobs
|
| 65 |
+
if max_logprobs == -1:
|
| 66 |
+
return
|
| 67 |
+
# Validate sample logprobs.
|
| 68 |
+
if params.logprobs and (params.logprobs == -1
|
| 69 |
+
or params.logprobs > max_logprobs):
|
| 70 |
+
raise ValueError(
|
| 71 |
+
f"Requested sample logprobs of {params.logprobs}, "
|
| 72 |
+
f"which is greater than max allowed: {max_logprobs}")
|
| 73 |
+
|
| 74 |
+
# Validate prompt logprobs.
|
| 75 |
+
if params.prompt_logprobs and params.prompt_logprobs > max_logprobs:
|
| 76 |
+
raise ValueError(
|
| 77 |
+
f"Requested prompt logprobs of {params.prompt_logprobs}, "
|
| 78 |
+
f"which is greater than max allowed: {max_logprobs}")
|
| 79 |
+
|
| 80 |
+
def _validate_sampling_params(
|
| 81 |
+
self,
|
| 82 |
+
params: SamplingParams,
|
| 83 |
+
lora_request: Optional[LoRARequest],
|
| 84 |
+
) -> None:
|
| 85 |
+
self._validate_structured_output(params)
|
| 86 |
+
self._validate_logit_bias(params)
|
| 87 |
+
|
| 88 |
+
if params.allowed_token_ids is None:
|
| 89 |
+
return
|
| 90 |
+
if not params.allowed_token_ids:
|
| 91 |
+
raise ValueError("allowed_token_ids is not None and empty!")
|
| 92 |
+
if self.tokenizer is None:
|
| 93 |
+
# When skip_tokenizer_init=True, we can't validate token IDs
|
| 94 |
+
# Skip validation and let the model handle invalid tokens
|
| 95 |
+
return
|
| 96 |
+
tokenizer = self.tokenizer.get_lora_tokenizer(lora_request)
|
| 97 |
+
vocab_size = len(tokenizer)
|
| 98 |
+
if not all(0 <= tid < vocab_size for tid in params.allowed_token_ids):
|
| 99 |
+
raise ValueError(
|
| 100 |
+
"allowed_token_ids contains out-of-vocab token id!")
|
| 101 |
+
|
| 102 |
+
def _validate_logit_bias(
|
| 103 |
+
self,
|
| 104 |
+
params: SamplingParams,
|
| 105 |
+
) -> None:
|
| 106 |
+
"""Validate logit_bias token IDs are within vocabulary range."""
|
| 107 |
+
if not params.logit_bias:
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
vocab_size = self.model_config.get_vocab_size()
|
| 111 |
+
invalid_token_ids = []
|
| 112 |
+
|
| 113 |
+
for token_id in params.logit_bias:
|
| 114 |
+
if token_id < 0 or token_id >= vocab_size:
|
| 115 |
+
invalid_token_ids.append(token_id)
|
| 116 |
+
|
| 117 |
+
if invalid_token_ids:
|
| 118 |
+
raise ValueError(
|
| 119 |
+
f"token_id(s) {invalid_token_ids} in logit_bias contain "
|
| 120 |
+
f"out-of-vocab token ids. Vocabulary size: {vocab_size}")
|
| 121 |
+
|
| 122 |
+
def _validate_supported_sampling_params(
|
| 123 |
+
self,
|
| 124 |
+
params: SamplingParams,
|
| 125 |
+
) -> None:
|
| 126 |
+
# Best of not yet supported.
|
| 127 |
+
if params.best_of is not None and params.best_of > 1:
|
| 128 |
+
raise ValueError("vLLM V1 does not yet support best_of.")
|
| 129 |
+
# Logits processors not supported.
|
| 130 |
+
if params.logits_processors:
|
| 131 |
+
raise ValueError("vLLM V1 does not support per request "
|
| 132 |
+
"user provided logits processors.")
|
| 133 |
+
|
| 134 |
+
def _validate_params(
|
| 135 |
+
self,
|
| 136 |
+
params: Union[SamplingParams, PoolingParams],
|
| 137 |
+
lora_request: Optional[LoRARequest],
|
| 138 |
+
):
|
| 139 |
+
"""
|
| 140 |
+
Validate supported SamplingParam.
|
| 141 |
+
Should raise ValueError if unsupported for API Server.
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
if isinstance(params, PoolingParams):
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
self._validate_logprobs(params)
|
| 148 |
+
self._validate_sampling_params(params, lora_request)
|
| 149 |
+
self._validate_supported_sampling_params(params)
|
| 150 |
+
|
| 151 |
+
def _validate_lora(self, lora_request: Optional[LoRARequest]) -> None:
|
| 152 |
+
if lora_request is not None and not self.lora_config:
|
| 153 |
+
raise ValueError(f"Got lora_request {lora_request} but LoRA is "
|
| 154 |
+
"not enabled!")
|
| 155 |
+
|
| 156 |
+
def _validate_structured_output(self, params: SamplingParams) -> None:
|
| 157 |
+
if not params.guided_decoding or not self.decoding_config:
|
| 158 |
+
return
|
| 159 |
+
|
| 160 |
+
if self.model_config.skip_tokenizer_init and params.guided_decoding:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
"Structured outputs requires a tokenizer so it can't be used with 'skip_tokenizer_init'" # noqa: E501
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
engine_level_backend = self.decoding_config.backend
|
| 166 |
+
if params.guided_decoding.backend:
|
| 167 |
+
# Request-level backend selection is not supported in V1.
|
| 168 |
+
# The values may differ if `params` is reused and was set
|
| 169 |
+
# to a specific backend based on `auto` behavior in a previous
|
| 170 |
+
# request. We remember that it was set as a result of `auto`
|
| 171 |
+
# using the `_auto` option set on the backend in the params.
|
| 172 |
+
if (params.guided_decoding.backend != engine_level_backend
|
| 173 |
+
and not (engine_level_backend == "auto"
|
| 174 |
+
and params.guided_decoding.backend_was_auto)):
|
| 175 |
+
raise ValueError(
|
| 176 |
+
"Request-level structured output backend selection is no "
|
| 177 |
+
"longer supported. The request specified "
|
| 178 |
+
f"'{params.guided_decoding.backend}', but vLLM was "
|
| 179 |
+
f"initialised with '{engine_level_backend}'. This error "
|
| 180 |
+
"can be resolved by removing backend selection from the "
|
| 181 |
+
"request.")
|
| 182 |
+
else:
|
| 183 |
+
params.guided_decoding.backend = engine_level_backend
|
| 184 |
+
|
| 185 |
+
# Request content validation
|
| 186 |
+
if (isinstance(params.guided_decoding.choice, list)
|
| 187 |
+
and not params.guided_decoding.choice):
|
| 188 |
+
# It is invalid for choice to be an empty list
|
| 189 |
+
raise ValueError(f"Choice '{params.guided_decoding.choice}' "
|
| 190 |
+
"cannot be an empty list")
|
| 191 |
+
|
| 192 |
+
if engine_level_backend.startswith("xgrammar"):
|
| 193 |
+
# xgrammar with no fallback
|
| 194 |
+
validate_xgrammar_grammar(params)
|
| 195 |
+
elif engine_level_backend.startswith("guidance"):
|
| 196 |
+
# TODO: ideally we would have the LLTokenizer here as Lark syntax
|
| 197 |
+
# allows <|special_token|> and similar, see
|
| 198 |
+
# https://github.com/guidance-ai/llguidance/blob/main/docs/syntax.md#special-tokens
|
| 199 |
+
# Without tokenizer these are disallowed in grammars.
|
| 200 |
+
validate_guidance_grammar(params, tokenizer=None)
|
| 201 |
+
elif engine_level_backend == "outlines":
|
| 202 |
+
# outlines backend
|
| 203 |
+
validate_structured_output_request_outlines(params)
|
| 204 |
+
else:
|
| 205 |
+
# NOTE: engine_level_backend must be "auto" here, because we have
|
| 206 |
+
# checked supported_backends above.
|
| 207 |
+
# "auto" is an opt-in to opinionated behavior where we try to
|
| 208 |
+
# choose a backend based on request contents. This is not the
|
| 209 |
+
# default as it is less predictable and subject to change
|
| 210 |
+
# between releases as feature support changes.
|
| 211 |
+
try:
|
| 212 |
+
validate_xgrammar_grammar(params)
|
| 213 |
+
params.guided_decoding.backend = "xgrammar"
|
| 214 |
+
except ValueError:
|
| 215 |
+
# The request either failed validation
|
| 216 |
+
# or includes some jsonschema feature(s) that
|
| 217 |
+
# are not supported in xgrammar. Fall back to guidance.
|
| 218 |
+
validate_guidance_grammar(params, tokenizer=None)
|
| 219 |
+
params.guided_decoding.backend = "guidance"
|
| 220 |
+
# Remember that this backend was set automatically
|
| 221 |
+
params.guided_decoding.backend_was_auto = True
|
| 222 |
+
|
| 223 |
+
def process_inputs(
|
| 224 |
+
self,
|
| 225 |
+
request_id: str,
|
| 226 |
+
prompt: PromptType,
|
| 227 |
+
params: Union[SamplingParams, PoolingParams],
|
| 228 |
+
arrival_time: Optional[float] = None,
|
| 229 |
+
lora_request: Optional[LoRARequest] = None,
|
| 230 |
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
| 231 |
+
trace_headers: Optional[Mapping[str, str]] = None,
|
| 232 |
+
priority: int = 0,
|
| 233 |
+
data_parallel_rank: Optional[int] = None,
|
| 234 |
+
) -> tuple[Optional[str], EngineCoreRequest]:
|
| 235 |
+
|
| 236 |
+
# TODO(woosuk): Support pooling models.
|
| 237 |
+
# TODO(woosuk): Support encoder-decoder models.
|
| 238 |
+
self._validate_lora(lora_request)
|
| 239 |
+
self._validate_params(params, lora_request)
|
| 240 |
+
if trace_headers is not None:
|
| 241 |
+
raise ValueError("V1 does not support tracing yet.")
|
| 242 |
+
|
| 243 |
+
data_parallel_size = self.vllm_config.parallel_config.data_parallel_size
|
| 244 |
+
if data_parallel_rank is not None and not (0 <= data_parallel_rank <
|
| 245 |
+
data_parallel_size):
|
| 246 |
+
raise ValueError(f"data_parallel_rank {data_parallel_rank} "
|
| 247 |
+
f"is out of range [0, {data_parallel_size}).")
|
| 248 |
+
|
| 249 |
+
if arrival_time is None:
|
| 250 |
+
arrival_time = time.time()
|
| 251 |
+
|
| 252 |
+
# Process inputs, which includes:
|
| 253 |
+
# 1. Tokenize text prompt, with LoRA request if one exists.
|
| 254 |
+
# 2. For multimodal models with a merged preprocessor, preprocess
|
| 255 |
+
# multimodal data and expand prompt token ids accordingly.
|
| 256 |
+
return_mm_hashes = (self.model_config.processor_return_mm_hashes
|
| 257 |
+
or bool(self.cache_config.enable_prefix_caching))
|
| 258 |
+
processed_inputs: ProcessorInputs = self.input_preprocessor.preprocess(
|
| 259 |
+
prompt,
|
| 260 |
+
tokenization_kwargs=tokenization_kwargs,
|
| 261 |
+
lora_request=lora_request,
|
| 262 |
+
return_mm_hashes=return_mm_hashes,
|
| 263 |
+
)
|
| 264 |
+
from vllm.platforms import current_platform
|
| 265 |
+
current_platform.validate_request(
|
| 266 |
+
prompt=prompt,
|
| 267 |
+
params=params,
|
| 268 |
+
processed_inputs=processed_inputs,
|
| 269 |
+
)
|
| 270 |
+
eos_token_id = self.input_preprocessor.get_eos_token_id(lora_request)
|
| 271 |
+
|
| 272 |
+
self._validate_model_inputs(processed_inputs, lora_request)
|
| 273 |
+
|
| 274 |
+
encoder_inputs, decoder_inputs = split_enc_dec_inputs(processed_inputs)
|
| 275 |
+
|
| 276 |
+
# TODO: Impl encoder-decoder
|
| 277 |
+
if encoder_inputs is not None:
|
| 278 |
+
raise NotImplementedError
|
| 279 |
+
|
| 280 |
+
sampling_params = None
|
| 281 |
+
pooling_params = None
|
| 282 |
+
if isinstance(params, SamplingParams):
|
| 283 |
+
# TODO: can we avoid cloning here in multiproc case?
|
| 284 |
+
sampling_params = params.clone()
|
| 285 |
+
# If unset max tokens, then generate up to the max_model_len.
|
| 286 |
+
if sampling_params.max_tokens is None:
|
| 287 |
+
sampling_params.max_tokens = (
|
| 288 |
+
self.model_config.max_model_len -
|
| 289 |
+
len(decoder_inputs["prompt_token_ids"]))
|
| 290 |
+
sampling_params.update_from_generation_config(
|
| 291 |
+
self.generation_config_fields, eos_token_id)
|
| 292 |
+
if self.tokenizer is not None:
|
| 293 |
+
sampling_params.update_from_tokenizer(
|
| 294 |
+
self.tokenizer.get_lora_tokenizer(lora_request))
|
| 295 |
+
else:
|
| 296 |
+
pooling_params = params.clone()
|
| 297 |
+
|
| 298 |
+
# Multimodal related.
|
| 299 |
+
sorted_mm_inputs: Optional[list[Optional[MultiModalKwargsItem]]] = None
|
| 300 |
+
sorted_mm_positions: Optional[list[PlaceholderRange]] = None
|
| 301 |
+
sorted_mm_hashes: Optional[list[str]] = None
|
| 302 |
+
if decoder_inputs["type"] == "multimodal":
|
| 303 |
+
decoder_mm_inputs = decoder_inputs["mm_kwargs"]
|
| 304 |
+
decoder_mm_positions = decoder_inputs["mm_placeholders"]
|
| 305 |
+
decoder_mm_hashes = decoder_inputs.get("mm_hashes")
|
| 306 |
+
|
| 307 |
+
# Merge and flatten multimodal placeholders, hashes and inputs
|
| 308 |
+
# from dictionaries to lists, and sort them by each item's position
|
| 309 |
+
# in the input sequence.
|
| 310 |
+
sorted_mm_idxs = argsort_mm_positions(decoder_mm_positions)
|
| 311 |
+
|
| 312 |
+
orig_sorted_mm_inputs = [
|
| 313 |
+
decoder_mm_inputs.get_item(modality, idx)
|
| 314 |
+
for modality, idx in sorted_mm_idxs
|
| 315 |
+
]
|
| 316 |
+
sorted_mm_positions = [
|
| 317 |
+
decoder_mm_positions[modality][idx]
|
| 318 |
+
for modality, idx in sorted_mm_idxs
|
| 319 |
+
]
|
| 320 |
+
sorted_mm_hashes = None if decoder_mm_hashes is None else [
|
| 321 |
+
decoder_mm_hashes[modality][idx]
|
| 322 |
+
for modality, idx in sorted_mm_idxs
|
| 323 |
+
]
|
| 324 |
+
|
| 325 |
+
if sorted_mm_hashes is not None:
|
| 326 |
+
sorted_mm_inputs = self.mm_input_cache_client.get_and_update(
|
| 327 |
+
orig_sorted_mm_inputs,
|
| 328 |
+
sorted_mm_hashes,
|
| 329 |
+
)
|
| 330 |
+
else:
|
| 331 |
+
assert is_list_of(orig_sorted_mm_inputs, MultiModalKwargsItem)
|
| 332 |
+
sorted_mm_inputs = orig_sorted_mm_inputs
|
| 333 |
+
|
| 334 |
+
return decoder_inputs.get("prompt"), EngineCoreRequest(
|
| 335 |
+
request_id=request_id,
|
| 336 |
+
prompt_token_ids=decoder_inputs["prompt_token_ids"],
|
| 337 |
+
mm_kwargs=sorted_mm_inputs,
|
| 338 |
+
mm_hashes=sorted_mm_hashes,
|
| 339 |
+
mm_placeholders=sorted_mm_positions,
|
| 340 |
+
sampling_params=sampling_params,
|
| 341 |
+
pooling_params=pooling_params,
|
| 342 |
+
eos_token_id=eos_token_id,
|
| 343 |
+
arrival_time=arrival_time,
|
| 344 |
+
lora_request=lora_request,
|
| 345 |
+
cache_salt=decoder_inputs.get("cache_salt"),
|
| 346 |
+
priority=priority,
|
| 347 |
+
data_parallel_rank=data_parallel_rank,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
def _validate_model_inputs(self,
|
| 351 |
+
inputs: ProcessorInputs,
|
| 352 |
+
lora_request: Optional[LoRARequest] = None):
|
| 353 |
+
encoder_inputs, decoder_inputs = split_enc_dec_inputs(inputs)
|
| 354 |
+
|
| 355 |
+
if encoder_inputs is not None:
|
| 356 |
+
self._validate_model_input(encoder_inputs,
|
| 357 |
+
lora_request,
|
| 358 |
+
prompt_type="encoder")
|
| 359 |
+
|
| 360 |
+
self._validate_model_input(decoder_inputs,
|
| 361 |
+
lora_request,
|
| 362 |
+
prompt_type="decoder")
|
| 363 |
+
|
| 364 |
+
def _validate_model_input(
|
| 365 |
+
self,
|
| 366 |
+
prompt_inputs: SingletonInputs,
|
| 367 |
+
lora_request: Optional[LoRARequest],
|
| 368 |
+
*,
|
| 369 |
+
prompt_type: Literal["encoder", "decoder"],
|
| 370 |
+
):
|
| 371 |
+
model_config = self.model_config
|
| 372 |
+
|
| 373 |
+
prompt_ids = prompt_inputs["prompt_token_ids"]
|
| 374 |
+
if not prompt_ids:
|
| 375 |
+
if prompt_type == "encoder" and model_config.is_multimodal_model:
|
| 376 |
+
pass # Mllama may have empty encoder inputs for text-only data
|
| 377 |
+
else:
|
| 378 |
+
raise ValueError(f"The {prompt_type} prompt cannot be empty")
|
| 379 |
+
|
| 380 |
+
if self.model_config.skip_tokenizer_init:
|
| 381 |
+
tokenizer = None
|
| 382 |
+
else:
|
| 383 |
+
tokenizer = self.tokenizer.get_lora_tokenizer(lora_request)
|
| 384 |
+
max_input_id = max(prompt_ids, default=0)
|
| 385 |
+
if max_input_id > tokenizer.max_token_id:
|
| 386 |
+
raise ValueError(
|
| 387 |
+
f"Token id {max_input_id} is out of vocabulary")
|
| 388 |
+
|
| 389 |
+
max_prompt_len = self.model_config.max_model_len
|
| 390 |
+
if len(prompt_ids) > max_prompt_len:
|
| 391 |
+
if prompt_type == "encoder" and model_config.is_multimodal_model:
|
| 392 |
+
mm_registry = self.input_preprocessor.mm_registry
|
| 393 |
+
mm_processor = mm_registry.create_processor(
|
| 394 |
+
model_config,
|
| 395 |
+
tokenizer=tokenizer,
|
| 396 |
+
)
|
| 397 |
+
assert isinstance(mm_processor, EncDecMultiModalProcessor)
|
| 398 |
+
|
| 399 |
+
if mm_processor.pad_dummy_encoder_prompt:
|
| 400 |
+
return # Skip encoder length check for Whisper
|
| 401 |
+
|
| 402 |
+
if model_config.is_multimodal_model:
|
| 403 |
+
suggestion = (
|
| 404 |
+
"Make sure that `max_model_len` is no smaller than the "
|
| 405 |
+
"number of text tokens plus multimodal tokens. For image "
|
| 406 |
+
"inputs, the number of image tokens depends on the number "
|
| 407 |
+
"of images, and possibly their aspect ratios as well.")
|
| 408 |
+
else:
|
| 409 |
+
suggestion = (
|
| 410 |
+
"Make sure that `max_model_len` is no smaller than the "
|
| 411 |
+
"number of text tokens.")
|
| 412 |
+
|
| 413 |
+
raise ValueError(
|
| 414 |
+
f"The {prompt_type} prompt (length {len(prompt_ids)}) is "
|
| 415 |
+
f"longer than the maximum model length of {max_prompt_len}. "
|
| 416 |
+
f"{suggestion}")
|
| 417 |
+
|
| 418 |
+
# TODO: Find out how many placeholder tokens are there so we can
|
| 419 |
+
# check that chunked prefill does not truncate them
|
| 420 |
+
# max_batch_len = self.scheduler_config.max_num_batched_tokens
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/utils.py
ADDED
|
@@ -0,0 +1,832 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import contextlib
|
| 5 |
+
import os
|
| 6 |
+
import weakref
|
| 7 |
+
from collections.abc import Iterator
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from enum import Enum, auto
|
| 10 |
+
from multiprocessing import Process, connection
|
| 11 |
+
from multiprocessing.process import BaseProcess
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Optional, Union
|
| 13 |
+
from unittest.mock import patch
|
| 14 |
+
|
| 15 |
+
import msgspec
|
| 16 |
+
import zmq
|
| 17 |
+
|
| 18 |
+
from vllm.config import CacheConfig, ParallelConfig, VllmConfig
|
| 19 |
+
from vllm.logger import init_logger
|
| 20 |
+
from vllm.platforms import current_platform
|
| 21 |
+
from vllm.ray.ray_env import get_env_vars_to_copy
|
| 22 |
+
from vllm.utils import get_mp_context, get_open_zmq_ipc_path, zmq_socket_ctx
|
| 23 |
+
from vllm.v1.engine.coordinator import DPCoordinator
|
| 24 |
+
from vllm.v1.executor.abstract import Executor
|
| 25 |
+
from vllm.v1.utils import get_engine_client_zmq_addr, shutdown
|
| 26 |
+
|
| 27 |
+
if TYPE_CHECKING:
|
| 28 |
+
from ray.util.placement_group import PlacementGroup
|
| 29 |
+
|
| 30 |
+
logger = init_logger(__name__)
|
| 31 |
+
|
| 32 |
+
STARTUP_POLL_PERIOD_MS = 10000
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class CoreEngineState(Enum):
|
| 36 |
+
NEW = auto()
|
| 37 |
+
CONNECTED = auto()
|
| 38 |
+
READY = auto()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class CoreEngine:
|
| 42 |
+
"""One per data parallel rank, used to track state during handshaking."""
|
| 43 |
+
|
| 44 |
+
def __init__(self, index: int = 0, local: bool = True):
|
| 45 |
+
self.local = local
|
| 46 |
+
self.identity = index.to_bytes(2, "little")
|
| 47 |
+
|
| 48 |
+
self.state = CoreEngineState.NEW
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@dataclass
|
| 52 |
+
class EngineZmqAddresses:
|
| 53 |
+
# ZMQ input socket addresses for each front-end client (requests)
|
| 54 |
+
inputs: list[str]
|
| 55 |
+
# ZMQ output socket addresses for each front-end client (responses)
|
| 56 |
+
outputs: list[str]
|
| 57 |
+
# ZMQ input socket address of DP coordinator if applicable
|
| 58 |
+
coordinator_input: Optional[str] = None
|
| 59 |
+
# ZMQ output socket address of DP coordinator if applicable
|
| 60 |
+
coordinator_output: Optional[str] = None
|
| 61 |
+
# ZMQ socket for front-end to connect to DP coordinator.
|
| 62 |
+
# Not used by engine, just relayed to front-end in handshake response.
|
| 63 |
+
# Only required for external DP LB case.
|
| 64 |
+
frontend_stats_publish_address: Optional[str] = None
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@dataclass
|
| 68 |
+
class EngineHandshakeMetadata:
|
| 69 |
+
"""Metadata sent to each engine process during startup handshake,
|
| 70 |
+
including addresses of the front-end ZMQ queues that they should
|
| 71 |
+
connect to.
|
| 72 |
+
"""
|
| 73 |
+
addresses: EngineZmqAddresses
|
| 74 |
+
parallel_config: dict[str, Union[int, str]]
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class CoreEngineProcManager:
|
| 78 |
+
"""
|
| 79 |
+
Utility class to handle creation, readiness, and shutdown
|
| 80 |
+
of background processes used by the AsyncLLM and LLMEngine.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
target_fn: Callable,
|
| 86 |
+
local_engine_count: int,
|
| 87 |
+
start_index: int,
|
| 88 |
+
local_start_index: int,
|
| 89 |
+
vllm_config: VllmConfig,
|
| 90 |
+
local_client: bool,
|
| 91 |
+
handshake_address: str,
|
| 92 |
+
executor_class: type[Executor],
|
| 93 |
+
log_stats: bool,
|
| 94 |
+
client_handshake_address: Optional[str] = None,
|
| 95 |
+
):
|
| 96 |
+
context = get_mp_context()
|
| 97 |
+
common_kwargs = {
|
| 98 |
+
"vllm_config": vllm_config,
|
| 99 |
+
"local_client": local_client,
|
| 100 |
+
"handshake_address": handshake_address,
|
| 101 |
+
"executor_class": executor_class,
|
| 102 |
+
"log_stats": log_stats,
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
if client_handshake_address:
|
| 106 |
+
common_kwargs[
|
| 107 |
+
"client_handshake_address"] = client_handshake_address
|
| 108 |
+
|
| 109 |
+
self.processes: list[BaseProcess] = []
|
| 110 |
+
local_dp_ranks = []
|
| 111 |
+
for index in range(local_engine_count):
|
| 112 |
+
local_index = local_start_index + index
|
| 113 |
+
global_index = start_index + index
|
| 114 |
+
|
| 115 |
+
# Start EngineCore in background process.
|
| 116 |
+
local_dp_ranks.append(local_index)
|
| 117 |
+
self.processes.append(
|
| 118 |
+
context.Process(target=target_fn,
|
| 119 |
+
name=f"EngineCore_{global_index}",
|
| 120 |
+
kwargs=common_kwargs | {
|
| 121 |
+
"dp_rank": global_index,
|
| 122 |
+
"local_dp_rank": local_index,
|
| 123 |
+
}))
|
| 124 |
+
|
| 125 |
+
self._finalizer = weakref.finalize(self, shutdown, self.processes)
|
| 126 |
+
|
| 127 |
+
data_parallel = vllm_config.parallel_config.data_parallel_size > 1
|
| 128 |
+
try:
|
| 129 |
+
for proc, local_dp_rank in zip(self.processes, local_dp_ranks):
|
| 130 |
+
with set_device_control_env_var(
|
| 131 |
+
vllm_config, local_dp_rank) if (
|
| 132 |
+
data_parallel) else contextlib.nullcontext():
|
| 133 |
+
proc.start()
|
| 134 |
+
finally:
|
| 135 |
+
# Kill other procs if not all are running.
|
| 136 |
+
if self.finished_procs():
|
| 137 |
+
self.close()
|
| 138 |
+
|
| 139 |
+
def close(self):
|
| 140 |
+
"""Shutdown all procs."""
|
| 141 |
+
self._finalizer()
|
| 142 |
+
|
| 143 |
+
def join_first(self):
|
| 144 |
+
"""Wait for any process to exit."""
|
| 145 |
+
connection.wait(proc.sentinel for proc in self.processes)
|
| 146 |
+
|
| 147 |
+
def sentinels(self) -> list:
|
| 148 |
+
return [proc.sentinel for proc in self.processes]
|
| 149 |
+
|
| 150 |
+
def finished_procs(self) -> dict[str, int]:
|
| 151 |
+
"""Returns dict of proc name -> exit code for any finished procs."""
|
| 152 |
+
return {
|
| 153 |
+
proc.name: proc.exitcode
|
| 154 |
+
for proc in self.processes if proc.exitcode is not None
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@contextlib.contextmanager
|
| 159 |
+
def set_device_control_env_var(vllm_config: VllmConfig,
|
| 160 |
+
local_dp_rank: int) -> Iterator[None]:
|
| 161 |
+
"""
|
| 162 |
+
Temporarily set CUDA_VISIBLE_DEVICES or equivalent
|
| 163 |
+
for engine subprocess.
|
| 164 |
+
"""
|
| 165 |
+
world_size = vllm_config.parallel_config.world_size
|
| 166 |
+
evar = current_platform.device_control_env_var
|
| 167 |
+
try:
|
| 168 |
+
value = ",".join(
|
| 169 |
+
str(current_platform.device_id_to_physical_device_id(i))
|
| 170 |
+
for i in range(local_dp_rank * world_size, (local_dp_rank + 1) *
|
| 171 |
+
world_size))
|
| 172 |
+
except IndexError as e:
|
| 173 |
+
raise Exception(f"Error setting {evar}: "
|
| 174 |
+
f"local range: [{local_dp_rank * world_size}, "
|
| 175 |
+
f"{(local_dp_rank + 1) * world_size}) "
|
| 176 |
+
"base value: "
|
| 177 |
+
f"\"{os.getenv(evar)}\"") from e
|
| 178 |
+
with patch.dict(os.environ, values=((evar, value), )):
|
| 179 |
+
yield
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class CoreEngineActorManager:
|
| 183 |
+
"""
|
| 184 |
+
Utility class to handle creation, readiness, and shutdown
|
| 185 |
+
of core engine Ray actors used by the AsyncLLM and LLMEngine.
|
| 186 |
+
|
| 187 |
+
Different from CoreEngineProcManager, this class manages
|
| 188 |
+
core engines for both local and remote nodes.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
def __init__(
|
| 192 |
+
self,
|
| 193 |
+
vllm_config: VllmConfig,
|
| 194 |
+
addresses: EngineZmqAddresses,
|
| 195 |
+
executor_class: type[Executor],
|
| 196 |
+
log_stats: bool,
|
| 197 |
+
placement_groups: Optional[list["PlacementGroup"]] = None,
|
| 198 |
+
local_dp_ranks: Optional[list[int]] = None,
|
| 199 |
+
):
|
| 200 |
+
import copy
|
| 201 |
+
|
| 202 |
+
import ray
|
| 203 |
+
from ray.runtime_env import RuntimeEnv
|
| 204 |
+
from ray.util.scheduling_strategies import (
|
| 205 |
+
PlacementGroupSchedulingStrategy)
|
| 206 |
+
|
| 207 |
+
from vllm.v1.engine.core import DPEngineCoreActor
|
| 208 |
+
|
| 209 |
+
self.local_engine_actors: list[ray.ActorHandle] = []
|
| 210 |
+
self.remote_engine_actors: list[ray.ActorHandle] = []
|
| 211 |
+
|
| 212 |
+
env_vars_list = get_env_vars_to_copy(destination="DPEngineCoreActor")
|
| 213 |
+
self.env_vars_dict = {
|
| 214 |
+
name: os.environ[name]
|
| 215 |
+
for name in env_vars_list if name in os.environ
|
| 216 |
+
}
|
| 217 |
+
runtime_env = RuntimeEnv(env_vars=self.env_vars_dict)
|
| 218 |
+
|
| 219 |
+
self.addresses = addresses
|
| 220 |
+
self.executor_class = executor_class
|
| 221 |
+
self.log_stats = log_stats
|
| 222 |
+
dp_size = vllm_config.parallel_config.data_parallel_size
|
| 223 |
+
local_engine_count = \
|
| 224 |
+
vllm_config.parallel_config.data_parallel_size_local
|
| 225 |
+
world_size = vllm_config.parallel_config.world_size
|
| 226 |
+
|
| 227 |
+
if ray.is_initialized():
|
| 228 |
+
logger.info(
|
| 229 |
+
"Ray is already initialized. Skipping Ray initialization.")
|
| 230 |
+
else:
|
| 231 |
+
ray.init()
|
| 232 |
+
|
| 233 |
+
if placement_groups is not None:
|
| 234 |
+
assert local_dp_ranks is not None, (
|
| 235 |
+
"local_dp_ranks must be provided if "
|
| 236 |
+
"placement_groups is provided")
|
| 237 |
+
assert len(placement_groups) == len(local_dp_ranks), (
|
| 238 |
+
"placement_groups and local_dp_ranks must "
|
| 239 |
+
"have the same length")
|
| 240 |
+
logger.info("Using provided placement groups")
|
| 241 |
+
# TODO(rui): validate passed-in placement groups
|
| 242 |
+
self.created_placement_groups = []
|
| 243 |
+
else:
|
| 244 |
+
placement_groups, local_dp_ranks = \
|
| 245 |
+
CoreEngineActorManager.create_dp_placement_groups(vllm_config)
|
| 246 |
+
self.created_placement_groups = placement_groups
|
| 247 |
+
assert len(placement_groups) == dp_size, (
|
| 248 |
+
"Number of placement groups must match data parallel size")
|
| 249 |
+
|
| 250 |
+
self.placement_group_is_local = []
|
| 251 |
+
refs = []
|
| 252 |
+
for index, local_index, pg in zip(range(dp_size), local_dp_ranks,
|
| 253 |
+
placement_groups):
|
| 254 |
+
dp_vllm_config = copy.deepcopy(vllm_config)
|
| 255 |
+
dp_vllm_config.parallel_config.placement_group = pg
|
| 256 |
+
local_client = index < local_engine_count
|
| 257 |
+
actor = ray.remote(DPEngineCoreActor).options(
|
| 258 |
+
scheduling_strategy=PlacementGroupSchedulingStrategy(
|
| 259 |
+
placement_group=pg,
|
| 260 |
+
placement_group_bundle_index=world_size,
|
| 261 |
+
),
|
| 262 |
+
runtime_env=runtime_env).remote(vllm_config=dp_vllm_config,
|
| 263 |
+
executor_class=executor_class,
|
| 264 |
+
log_stats=log_stats,
|
| 265 |
+
local_client=local_client,
|
| 266 |
+
addresses=addresses,
|
| 267 |
+
dp_rank=index,
|
| 268 |
+
local_dp_rank=local_index)
|
| 269 |
+
if local_client:
|
| 270 |
+
self.local_engine_actors.append(actor)
|
| 271 |
+
else:
|
| 272 |
+
self.remote_engine_actors.append(actor)
|
| 273 |
+
self.placement_group_is_local.append(local_client)
|
| 274 |
+
refs.append(actor.wait_for_init.remote())
|
| 275 |
+
|
| 276 |
+
ray.get(refs)
|
| 277 |
+
self.run_refs = []
|
| 278 |
+
for actor in self.local_engine_actors + self.remote_engine_actors:
|
| 279 |
+
self.run_refs.append(actor.run.remote())
|
| 280 |
+
|
| 281 |
+
@staticmethod
|
| 282 |
+
def create_dp_placement_groups(
|
| 283 |
+
vllm_config: VllmConfig
|
| 284 |
+
) -> tuple[list["PlacementGroup"], list[int]]:
|
| 285 |
+
"""
|
| 286 |
+
Create placement groups for data parallel.
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
import ray
|
| 290 |
+
from ray._private.state import available_resources_per_node
|
| 291 |
+
from ray.util.state import list_nodes
|
| 292 |
+
|
| 293 |
+
logger.info("Creating placement groups for data parallel")
|
| 294 |
+
dp_master_ip = \
|
| 295 |
+
vllm_config.parallel_config.data_parallel_master_ip
|
| 296 |
+
num_pg_to_create = vllm_config.parallel_config.data_parallel_size
|
| 297 |
+
local_engine_count = \
|
| 298 |
+
vllm_config.parallel_config.data_parallel_size_local
|
| 299 |
+
|
| 300 |
+
nodes = sorted(list_nodes(filters=[("state", "=", "ALIVE")]),
|
| 301 |
+
key=lambda node: node.node_ip != dp_master_ip)
|
| 302 |
+
assert nodes[0].node_ip == dp_master_ip, (
|
| 303 |
+
"The head node is missing or dead")
|
| 304 |
+
assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
|
| 305 |
+
"There can only be one head node")
|
| 306 |
+
|
| 307 |
+
available_resources = available_resources_per_node()
|
| 308 |
+
world_size = vllm_config.parallel_config.world_size
|
| 309 |
+
placement_groups: list[PlacementGroup] = []
|
| 310 |
+
local_dp_ranks: list[int] = []
|
| 311 |
+
|
| 312 |
+
for node in nodes:
|
| 313 |
+
node_ip = node.node_ip
|
| 314 |
+
node_resources = available_resources[node.node_id]
|
| 315 |
+
if "GPU" not in node_resources:
|
| 316 |
+
continue
|
| 317 |
+
# For now, each DP rank can only be assigned to one node
|
| 318 |
+
# TODO(rui): support allocating a single DP rank
|
| 319 |
+
# to multiple nodes
|
| 320 |
+
available_engine_count = int(node_resources["GPU"]) // world_size
|
| 321 |
+
if node_ip == dp_master_ip:
|
| 322 |
+
assert available_engine_count >= local_engine_count, (
|
| 323 |
+
"Not enough resources to allocate DP ranks "
|
| 324 |
+
f"on DP master node {node_ip}")
|
| 325 |
+
for i in range(local_engine_count):
|
| 326 |
+
bundles = [{
|
| 327 |
+
"GPU": 1.0,
|
| 328 |
+
"node:" + dp_master_ip: 0.001
|
| 329 |
+
}] * world_size + [{
|
| 330 |
+
"CPU": 1.0
|
| 331 |
+
}]
|
| 332 |
+
pg = ray.util.placement_group(
|
| 333 |
+
name=f"dp_rank_{len(placement_groups)}",
|
| 334 |
+
strategy="STRICT_PACK",
|
| 335 |
+
bundles=bundles,
|
| 336 |
+
)
|
| 337 |
+
placement_groups.append(pg)
|
| 338 |
+
local_dp_ranks.append(i)
|
| 339 |
+
else:
|
| 340 |
+
for i in range(available_engine_count):
|
| 341 |
+
if len(placement_groups) == num_pg_to_create:
|
| 342 |
+
break
|
| 343 |
+
bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]
|
| 344 |
+
pg = ray.util.placement_group(
|
| 345 |
+
name=f"dp_rank_{len(placement_groups)}",
|
| 346 |
+
strategy="STRICT_PACK",
|
| 347 |
+
bundles=bundles,
|
| 348 |
+
)
|
| 349 |
+
placement_groups.append(pg)
|
| 350 |
+
local_dp_ranks.append(i)
|
| 351 |
+
if len(placement_groups) < num_pg_to_create:
|
| 352 |
+
raise ValueError(
|
| 353 |
+
f"Not enough resources to allocate {num_pg_to_create} "
|
| 354 |
+
"placement groups, only created "
|
| 355 |
+
f"{len(placement_groups)} placement groups. "
|
| 356 |
+
"Available resources: "
|
| 357 |
+
f"{available_resources}")
|
| 358 |
+
return placement_groups, local_dp_ranks
|
| 359 |
+
|
| 360 |
+
@staticmethod
|
| 361 |
+
def add_dp_placement_groups(
|
| 362 |
+
old_vllm_config: VllmConfig, new_data_parallel_size: int
|
| 363 |
+
) -> tuple[list["PlacementGroup"], list[int]]:
|
| 364 |
+
"""
|
| 365 |
+
Add placement groups for new data parallel size.
|
| 366 |
+
"""
|
| 367 |
+
import ray
|
| 368 |
+
from ray._private.state import (available_resources_per_node,
|
| 369 |
+
total_resources_per_node)
|
| 370 |
+
from ray.util.state import list_nodes
|
| 371 |
+
|
| 372 |
+
old_dp_size = old_vllm_config.parallel_config.data_parallel_size
|
| 373 |
+
num_pg_to_create = new_data_parallel_size - old_dp_size
|
| 374 |
+
|
| 375 |
+
if num_pg_to_create <= 0:
|
| 376 |
+
return [], []
|
| 377 |
+
|
| 378 |
+
dp_master_ip = old_vllm_config.parallel_config.data_parallel_master_ip
|
| 379 |
+
world_size = old_vllm_config.parallel_config.world_size
|
| 380 |
+
|
| 381 |
+
nodes = list_nodes()
|
| 382 |
+
nodes = sorted(nodes, key=lambda node: node.node_ip != dp_master_ip)
|
| 383 |
+
assert nodes[0].node_ip == dp_master_ip, (
|
| 384 |
+
"The first node must be the head node")
|
| 385 |
+
assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
|
| 386 |
+
"There can only be one head node")
|
| 387 |
+
|
| 388 |
+
available_resources = available_resources_per_node()
|
| 389 |
+
total_resources = total_resources_per_node()
|
| 390 |
+
|
| 391 |
+
placement_groups = []
|
| 392 |
+
local_dp_ranks = []
|
| 393 |
+
num_pg_created = 0
|
| 394 |
+
|
| 395 |
+
for node in nodes:
|
| 396 |
+
if num_pg_created >= num_pg_to_create:
|
| 397 |
+
break
|
| 398 |
+
|
| 399 |
+
node_ip = node.node_ip
|
| 400 |
+
node_id = node.node_id
|
| 401 |
+
available_gpus = int(available_resources[node_id]["GPU"])
|
| 402 |
+
|
| 403 |
+
# Get total GPUs on this node from the node's resources
|
| 404 |
+
# Ray stores node resources with node ID as key
|
| 405 |
+
total_gpus = int(total_resources[node_id]["GPU"])
|
| 406 |
+
|
| 407 |
+
# Calculate used GPUs and used engines on this node
|
| 408 |
+
used_gpus = max(0, total_gpus - available_gpus)
|
| 409 |
+
used_engines_on_node = used_gpus // world_size
|
| 410 |
+
|
| 411 |
+
# Calculate how many new engines this node can accommodate
|
| 412 |
+
available_engine_count = available_gpus // world_size
|
| 413 |
+
|
| 414 |
+
# Create placement groups for new engines on this node
|
| 415 |
+
for i in range(available_engine_count):
|
| 416 |
+
if num_pg_created >= num_pg_to_create:
|
| 417 |
+
break
|
| 418 |
+
|
| 419 |
+
rank = old_dp_size + num_pg_created
|
| 420 |
+
|
| 421 |
+
# Create bundles with node constraint for master node
|
| 422 |
+
if node_ip == dp_master_ip:
|
| 423 |
+
bundles = [{
|
| 424 |
+
"GPU": 1.0,
|
| 425 |
+
"node:" + dp_master_ip: 0.001
|
| 426 |
+
}] * world_size + [{
|
| 427 |
+
"CPU": 1.0
|
| 428 |
+
}]
|
| 429 |
+
else:
|
| 430 |
+
bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]
|
| 431 |
+
|
| 432 |
+
pg = ray.util.placement_group(
|
| 433 |
+
name=f"dp_rank_{rank}",
|
| 434 |
+
strategy="STRICT_PACK",
|
| 435 |
+
bundles=bundles,
|
| 436 |
+
)
|
| 437 |
+
placement_groups.append(pg)
|
| 438 |
+
|
| 439 |
+
# Local rank starts from the number of engines already used
|
| 440 |
+
# on this node
|
| 441 |
+
local_rank = used_engines_on_node + i
|
| 442 |
+
local_dp_ranks.append(local_rank)
|
| 443 |
+
num_pg_created += 1
|
| 444 |
+
|
| 445 |
+
return placement_groups, local_dp_ranks
|
| 446 |
+
|
| 447 |
+
def scale_up_elastic_ep(self, cur_vllm_config: VllmConfig,
|
| 448 |
+
new_data_parallel_size: int) -> None:
|
| 449 |
+
import copy
|
| 450 |
+
|
| 451 |
+
import ray
|
| 452 |
+
from ray.runtime_env import RuntimeEnv
|
| 453 |
+
from ray.util.scheduling_strategies import (
|
| 454 |
+
PlacementGroupSchedulingStrategy)
|
| 455 |
+
|
| 456 |
+
from vllm.v1.engine.core import DPEngineCoreActor
|
| 457 |
+
|
| 458 |
+
cur_data_parallel_size = len(self.local_engine_actors) + \
|
| 459 |
+
len(self.remote_engine_actors)
|
| 460 |
+
|
| 461 |
+
assert new_data_parallel_size > cur_data_parallel_size, (
|
| 462 |
+
f"New data parallel size {new_data_parallel_size} must be greater "
|
| 463 |
+
f"than current data parallel size {cur_data_parallel_size} "
|
| 464 |
+
"for scale up")
|
| 465 |
+
|
| 466 |
+
placement_groups, local_dp_ranks = \
|
| 467 |
+
self.add_dp_placement_groups(
|
| 468 |
+
cur_vllm_config, new_data_parallel_size)
|
| 469 |
+
|
| 470 |
+
world_size = cur_vllm_config.parallel_config.world_size
|
| 471 |
+
dp_master_ip = cur_vllm_config.parallel_config.data_parallel_master_ip
|
| 472 |
+
new_local_engines = 0
|
| 473 |
+
|
| 474 |
+
runtime_env = RuntimeEnv(env_vars=self.env_vars_dict
|
| 475 |
+
| {"VLLM_ELASTIC_EP_SCALE_UP_LAUNCH": "1"})
|
| 476 |
+
for i, (pg,
|
| 477 |
+
local_rank) in enumerate(zip(placement_groups,
|
| 478 |
+
local_dp_ranks)):
|
| 479 |
+
rank = cur_data_parallel_size + i
|
| 480 |
+
dp_vllm_config = copy.deepcopy(cur_vllm_config)
|
| 481 |
+
dp_vllm_config.parallel_config.data_parallel_size = \
|
| 482 |
+
new_data_parallel_size
|
| 483 |
+
dp_vllm_config.parallel_config.placement_group = pg
|
| 484 |
+
|
| 485 |
+
# Check if this placement group is on the head node
|
| 486 |
+
local_client = any(
|
| 487 |
+
bundle.get("node:" + dp_master_ip, 0) > 0
|
| 488 |
+
for bundle in pg.bundle_specs)
|
| 489 |
+
|
| 490 |
+
if local_client:
|
| 491 |
+
new_local_engines += 1
|
| 492 |
+
# Update data_parallel_size_local
|
| 493 |
+
dp_vllm_config.parallel_config.data_parallel_size_local = (
|
| 494 |
+
cur_vllm_config.parallel_config.data_parallel_size_local +
|
| 495 |
+
new_local_engines)
|
| 496 |
+
|
| 497 |
+
actor = ray.remote(DPEngineCoreActor).options(
|
| 498 |
+
scheduling_strategy=PlacementGroupSchedulingStrategy(
|
| 499 |
+
placement_group=pg,
|
| 500 |
+
placement_group_bundle_index=world_size,
|
| 501 |
+
),
|
| 502 |
+
runtime_env=runtime_env).remote(
|
| 503 |
+
vllm_config=dp_vllm_config,
|
| 504 |
+
executor_class=self.executor_class,
|
| 505 |
+
log_stats=self.log_stats,
|
| 506 |
+
local_client=local_client,
|
| 507 |
+
addresses=self.addresses,
|
| 508 |
+
dp_rank=rank,
|
| 509 |
+
local_dp_rank=local_rank)
|
| 510 |
+
|
| 511 |
+
if local_client:
|
| 512 |
+
self.local_engine_actors.append(actor)
|
| 513 |
+
else:
|
| 514 |
+
self.remote_engine_actors.append(actor)
|
| 515 |
+
self.created_placement_groups.append(pg)
|
| 516 |
+
self.placement_group_is_local.append(local_client)
|
| 517 |
+
|
| 518 |
+
ray.get([
|
| 519 |
+
actor.wait_for_init.remote()
|
| 520 |
+
for actor in (self.local_engine_actors[-new_local_engines:]
|
| 521 |
+
if new_local_engines > 0 else []) +
|
| 522 |
+
self.remote_engine_actors[-(len(placement_groups) -
|
| 523 |
+
new_local_engines):]
|
| 524 |
+
])
|
| 525 |
+
|
| 526 |
+
actors = (self.local_engine_actors[-new_local_engines:]
|
| 527 |
+
if new_local_engines > 0 else []) + \
|
| 528 |
+
self.remote_engine_actors[-(len(placement_groups) -
|
| 529 |
+
new_local_engines):]
|
| 530 |
+
|
| 531 |
+
for actor in actors:
|
| 532 |
+
self.run_refs.append(actor.run.remote())
|
| 533 |
+
|
| 534 |
+
cur_vllm_config.parallel_config.data_parallel_size = \
|
| 535 |
+
new_data_parallel_size
|
| 536 |
+
# Update old_vllm_config with new data_parallel_size_local if any new
|
| 537 |
+
# local engines were added
|
| 538 |
+
if new_local_engines > 0:
|
| 539 |
+
cur_vllm_config.parallel_config.data_parallel_size_local += \
|
| 540 |
+
new_local_engines
|
| 541 |
+
|
| 542 |
+
def scale_down_elastic_ep(self, cur_data_parallel_size: int,
|
| 543 |
+
new_data_parallel_size: int) -> None:
|
| 544 |
+
import ray
|
| 545 |
+
assert cur_data_parallel_size > new_data_parallel_size, (
|
| 546 |
+
f"cur_data_parallel_size {cur_data_parallel_size} must be greater "
|
| 547 |
+
f"than new_data_parallel_size {new_data_parallel_size} "
|
| 548 |
+
"for scale down")
|
| 549 |
+
for _ in range(cur_data_parallel_size - new_data_parallel_size):
|
| 550 |
+
pg = self.created_placement_groups.pop()
|
| 551 |
+
is_local = self.placement_group_is_local.pop()
|
| 552 |
+
if is_local:
|
| 553 |
+
self.local_engine_actors.pop()
|
| 554 |
+
else:
|
| 555 |
+
self.remote_engine_actors.pop()
|
| 556 |
+
ray.util.remove_placement_group(pg)
|
| 557 |
+
|
| 558 |
+
def get_run_refs(self):
|
| 559 |
+
return self.run_refs
|
| 560 |
+
|
| 561 |
+
def close(self):
|
| 562 |
+
import ray
|
| 563 |
+
for actor in self.local_engine_actors + self.remote_engine_actors:
|
| 564 |
+
ray.kill(actor)
|
| 565 |
+
for pg in self.created_placement_groups:
|
| 566 |
+
ray.util.remove_placement_group(pg)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@contextlib.contextmanager
|
| 570 |
+
def launch_core_engines(
|
| 571 |
+
vllm_config: VllmConfig,
|
| 572 |
+
executor_class: type[Executor],
|
| 573 |
+
log_stats: bool,
|
| 574 |
+
num_api_servers: int = 1,
|
| 575 |
+
) -> Iterator[tuple[
|
| 576 |
+
Optional[Union[CoreEngineProcManager, CoreEngineActorManager]],
|
| 577 |
+
Optional[DPCoordinator],
|
| 578 |
+
EngineZmqAddresses,
|
| 579 |
+
]]:
|
| 580 |
+
"""Launch engine and DP coordinator processes as needed."""
|
| 581 |
+
|
| 582 |
+
parallel_config = vllm_config.parallel_config
|
| 583 |
+
dp_size = parallel_config.data_parallel_size
|
| 584 |
+
local_engine_count = parallel_config.data_parallel_size_local
|
| 585 |
+
local_start_index = parallel_config.data_parallel_rank_local
|
| 586 |
+
dp_rank = parallel_config.data_parallel_rank
|
| 587 |
+
host = parallel_config.data_parallel_master_ip
|
| 588 |
+
local_engines_only = (parallel_config.data_parallel_hybrid_lb
|
| 589 |
+
or parallel_config.data_parallel_external_lb)
|
| 590 |
+
|
| 591 |
+
# In offline mode there is an LLM instance per DP rank and
|
| 592 |
+
# one core engine per LLM, see
|
| 593 |
+
# examples/offline_inference/data_parallel.py.
|
| 594 |
+
offline_mode = local_start_index is not None
|
| 595 |
+
|
| 596 |
+
# client_local_only = True for cases where this front-end
|
| 597 |
+
# sends requests only to colocated engines.
|
| 598 |
+
client_local_only = (offline_mode or local_engines_only
|
| 599 |
+
or (local_engine_count == dp_size))
|
| 600 |
+
|
| 601 |
+
# Set up input and output addresses.
|
| 602 |
+
addresses = EngineZmqAddresses(
|
| 603 |
+
inputs=[
|
| 604 |
+
get_engine_client_zmq_addr(client_local_only, host)
|
| 605 |
+
for _ in range(num_api_servers)
|
| 606 |
+
],
|
| 607 |
+
outputs=[
|
| 608 |
+
get_engine_client_zmq_addr(client_local_only, host)
|
| 609 |
+
for _ in range(num_api_servers)
|
| 610 |
+
],
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
# Run the DP Coordinator process with rank 0 when in
|
| 614 |
+
# online DP mode.
|
| 615 |
+
run_coordinator = dp_size > 1 and not offline_mode and dp_rank == 0
|
| 616 |
+
|
| 617 |
+
if run_coordinator:
|
| 618 |
+
coordinator = DPCoordinator(parallel_config)
|
| 619 |
+
|
| 620 |
+
addresses.coordinator_input, addresses.coordinator_output = (
|
| 621 |
+
coordinator.get_engine_socket_addresses())
|
| 622 |
+
addresses.frontend_stats_publish_address = (
|
| 623 |
+
coordinator.get_stats_publish_address())
|
| 624 |
+
|
| 625 |
+
logger.info("Started DP Coordinator process (PID: %d)",
|
| 626 |
+
coordinator.proc.pid)
|
| 627 |
+
else:
|
| 628 |
+
coordinator = None
|
| 629 |
+
|
| 630 |
+
if parallel_config.data_parallel_backend == "ray":
|
| 631 |
+
logger.info("Starting ray-based data parallel backend")
|
| 632 |
+
|
| 633 |
+
engine_actor_manager = CoreEngineActorManager(
|
| 634 |
+
vllm_config=vllm_config,
|
| 635 |
+
addresses=addresses,
|
| 636 |
+
executor_class=executor_class,
|
| 637 |
+
log_stats=log_stats,
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
yield engine_actor_manager, coordinator, addresses
|
| 641 |
+
return
|
| 642 |
+
|
| 643 |
+
if offline_mode:
|
| 644 |
+
assert local_engine_count == 1
|
| 645 |
+
engines_to_handshake = [CoreEngine(index=dp_rank, local=True)]
|
| 646 |
+
elif dp_rank == 0:
|
| 647 |
+
# Rank 0 holds Coordinator, so it handshakes with all Cores
|
| 648 |
+
# in both external dplb and internal dplb mode.
|
| 649 |
+
# Note this also covers the case where we have zero local engines
|
| 650 |
+
# and rank 0 is headless.
|
| 651 |
+
engines_to_handshake = [
|
| 652 |
+
CoreEngine(index=i, local=(i < local_engine_count))
|
| 653 |
+
for i in range(dp_size)
|
| 654 |
+
]
|
| 655 |
+
else:
|
| 656 |
+
# Rank > 0 handshakes with just the local cores it is managing.
|
| 657 |
+
assert local_engines_only, (
|
| 658 |
+
"Attempting to launch core_engines from dp_rank > 0, but "
|
| 659 |
+
"found internal DPLB, which is incompatible.")
|
| 660 |
+
engines_to_handshake = [
|
| 661 |
+
CoreEngine(index=i, local=True)
|
| 662 |
+
for i in range(dp_rank, dp_rank + local_engine_count)
|
| 663 |
+
]
|
| 664 |
+
|
| 665 |
+
# Whether the started engines will handshake only with co-located
|
| 666 |
+
# front-end processes. In external_dp_lb mode, ranks > 0 handshake with
|
| 667 |
+
# their co-located frontend and also the rank 0 front-end, and hence this
|
| 668 |
+
# will be False.
|
| 669 |
+
handshake_local_only = offline_mode or local_engine_count == dp_size
|
| 670 |
+
|
| 671 |
+
handshake_address = get_engine_client_zmq_addr(
|
| 672 |
+
handshake_local_only, host, parallel_config.data_parallel_rpc_port)
|
| 673 |
+
|
| 674 |
+
if local_engines_only and dp_rank > 0:
|
| 675 |
+
assert not handshake_local_only
|
| 676 |
+
local_handshake_address = get_open_zmq_ipc_path()
|
| 677 |
+
client_handshake_address = local_handshake_address
|
| 678 |
+
else:
|
| 679 |
+
local_handshake_address = handshake_address
|
| 680 |
+
client_handshake_address = None
|
| 681 |
+
|
| 682 |
+
with zmq_socket_ctx(local_handshake_address, zmq.ROUTER,
|
| 683 |
+
bind=True) as handshake_socket:
|
| 684 |
+
|
| 685 |
+
from vllm.v1.engine.core import EngineCoreProc
|
| 686 |
+
|
| 687 |
+
# Start local engines.
|
| 688 |
+
if local_engine_count:
|
| 689 |
+
local_engine_manager = CoreEngineProcManager(
|
| 690 |
+
EngineCoreProc.run_engine_core,
|
| 691 |
+
vllm_config=vllm_config,
|
| 692 |
+
executor_class=executor_class,
|
| 693 |
+
log_stats=log_stats,
|
| 694 |
+
handshake_address=handshake_address,
|
| 695 |
+
client_handshake_address=client_handshake_address,
|
| 696 |
+
local_client=True,
|
| 697 |
+
local_engine_count=local_engine_count,
|
| 698 |
+
start_index=dp_rank,
|
| 699 |
+
local_start_index=local_start_index or 0)
|
| 700 |
+
else:
|
| 701 |
+
local_engine_manager = None
|
| 702 |
+
|
| 703 |
+
yield local_engine_manager, coordinator, addresses
|
| 704 |
+
|
| 705 |
+
# Now wait for engines to start.
|
| 706 |
+
wait_for_engine_startup(
|
| 707 |
+
handshake_socket,
|
| 708 |
+
addresses,
|
| 709 |
+
engines_to_handshake,
|
| 710 |
+
parallel_config,
|
| 711 |
+
vllm_config.cache_config,
|
| 712 |
+
local_engine_manager,
|
| 713 |
+
coordinator.proc if coordinator else None,
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
def wait_for_engine_startup(
|
| 718 |
+
handshake_socket: zmq.Socket,
|
| 719 |
+
addresses: EngineZmqAddresses,
|
| 720 |
+
core_engines: list[CoreEngine],
|
| 721 |
+
parallel_config: ParallelConfig,
|
| 722 |
+
cache_config: CacheConfig,
|
| 723 |
+
proc_manager: Optional[CoreEngineProcManager],
|
| 724 |
+
coord_process: Optional[Process],
|
| 725 |
+
):
|
| 726 |
+
# Wait for engine core process(es) to send ready messages.
|
| 727 |
+
local_count = parallel_config.data_parallel_size_local
|
| 728 |
+
remote_count = len(core_engines) - local_count
|
| 729 |
+
# [local, remote] counts
|
| 730 |
+
conn_pending, start_pending = [local_count, remote_count], [0, 0]
|
| 731 |
+
poller = zmq.Poller()
|
| 732 |
+
poller.register(handshake_socket, zmq.POLLIN)
|
| 733 |
+
|
| 734 |
+
remote_should_be_headless = not parallel_config.data_parallel_hybrid_lb \
|
| 735 |
+
and not parallel_config.data_parallel_external_lb
|
| 736 |
+
|
| 737 |
+
if proc_manager is not None:
|
| 738 |
+
for sentinel in proc_manager.sentinels():
|
| 739 |
+
poller.register(sentinel, zmq.POLLIN)
|
| 740 |
+
if coord_process is not None:
|
| 741 |
+
poller.register(coord_process.sentinel, zmq.POLLIN)
|
| 742 |
+
while any(conn_pending) or any(start_pending):
|
| 743 |
+
events = poller.poll(STARTUP_POLL_PERIOD_MS)
|
| 744 |
+
if not events:
|
| 745 |
+
if any(conn_pending):
|
| 746 |
+
logger.debug(
|
| 747 |
+
"Waiting for %d local, %d remote core engine proc(s) "
|
| 748 |
+
"to connect.", *conn_pending)
|
| 749 |
+
if any(start_pending):
|
| 750 |
+
logger.debug(
|
| 751 |
+
"Waiting for %d local, %d remote core engine proc(s) "
|
| 752 |
+
"to start.", *start_pending)
|
| 753 |
+
continue
|
| 754 |
+
if len(events) > 1 or events[0][0] != handshake_socket:
|
| 755 |
+
# One of the local core processes exited.
|
| 756 |
+
finished = proc_manager.finished_procs() if proc_manager else {}
|
| 757 |
+
if coord_process is not None and coord_process.exitcode is not None:
|
| 758 |
+
finished[coord_process.name] = coord_process.exitcode
|
| 759 |
+
raise RuntimeError("Engine core initialization failed. "
|
| 760 |
+
"See root cause above. "
|
| 761 |
+
f"Failed core proc(s): {finished}")
|
| 762 |
+
|
| 763 |
+
# Receive HELLO and READY messages from the input socket.
|
| 764 |
+
eng_identity, ready_msg_bytes = handshake_socket.recv_multipart()
|
| 765 |
+
eng_index = int.from_bytes(eng_identity, "little")
|
| 766 |
+
engine = next((e for e in core_engines if e.identity == eng_identity),
|
| 767 |
+
None)
|
| 768 |
+
if engine is None:
|
| 769 |
+
raise RuntimeError(f"Message from engine with unexpected data "
|
| 770 |
+
f"parallel rank: {eng_index}")
|
| 771 |
+
msg = msgspec.msgpack.decode(ready_msg_bytes)
|
| 772 |
+
status, local, headless = msg["status"], msg["local"], msg["headless"]
|
| 773 |
+
if local != engine.local:
|
| 774 |
+
raise RuntimeError(f"{status} message from "
|
| 775 |
+
f"{'local' if local else 'remote'} "
|
| 776 |
+
f"engine {eng_index}, expected it to be "
|
| 777 |
+
f"{'local' if engine.local else 'remote'}")
|
| 778 |
+
|
| 779 |
+
# Remote engines must be headless iff we aren't in hybrid dp lb mode.
|
| 780 |
+
if not local and headless != remote_should_be_headless:
|
| 781 |
+
if headless:
|
| 782 |
+
raise RuntimeError(f"Remote engine {eng_index} must not use "
|
| 783 |
+
f"--headless in external or hybrid dp lb "
|
| 784 |
+
f"mode")
|
| 785 |
+
else:
|
| 786 |
+
raise RuntimeError(f"Remote engine {eng_index} must use "
|
| 787 |
+
f"--headless unless in external or hybrid "
|
| 788 |
+
f"dp lb mode")
|
| 789 |
+
|
| 790 |
+
if status == "HELLO" and engine.state == CoreEngineState.NEW:
|
| 791 |
+
|
| 792 |
+
# Send init message with DP config info.
|
| 793 |
+
init_message = msgspec.msgpack.encode(
|
| 794 |
+
EngineHandshakeMetadata(
|
| 795 |
+
addresses=addresses,
|
| 796 |
+
parallel_config={
|
| 797 |
+
"data_parallel_master_ip":
|
| 798 |
+
parallel_config.data_parallel_master_ip,
|
| 799 |
+
"data_parallel_master_port":
|
| 800 |
+
parallel_config.data_parallel_master_port,
|
| 801 |
+
"data_parallel_size":
|
| 802 |
+
parallel_config.data_parallel_size,
|
| 803 |
+
}))
|
| 804 |
+
handshake_socket.send_multipart((eng_identity, init_message),
|
| 805 |
+
copy=False)
|
| 806 |
+
conn_pending[0 if local else 1] -= 1
|
| 807 |
+
start_pending[0 if local else 1] += 1
|
| 808 |
+
engine.state = CoreEngineState.CONNECTED
|
| 809 |
+
elif status == "READY" and engine.state == CoreEngineState.CONNECTED:
|
| 810 |
+
# Setup KV cache config with initialization state from
|
| 811 |
+
# engine core process. Sum values from all engines in DP case.
|
| 812 |
+
num_gpu_blocks = cache_config.num_gpu_blocks or 0
|
| 813 |
+
num_gpu_blocks += msg["num_gpu_blocks"]
|
| 814 |
+
cache_config.num_gpu_blocks = num_gpu_blocks
|
| 815 |
+
|
| 816 |
+
# In external DP LB mode, the coordinator address that the
|
| 817 |
+
# front-end procs connect to is obtained from rank 0 via
|
| 818 |
+
# one of the engine handshakes, and passed to the local
|
| 819 |
+
# front-end process in the response from the other.
|
| 820 |
+
if addresses.frontend_stats_publish_address is None:
|
| 821 |
+
addresses.frontend_stats_publish_address = msg.get(
|
| 822 |
+
"dp_stats_address")
|
| 823 |
+
|
| 824 |
+
start_pending[0 if local else 1] -= 1
|
| 825 |
+
engine.state = CoreEngineState.READY
|
| 826 |
+
else:
|
| 827 |
+
raise RuntimeError(f"Unexpected {status} message for "
|
| 828 |
+
f"{'local' if local else 'remote'} engine "
|
| 829 |
+
f"{eng_index} in {engine.state} state.")
|
| 830 |
+
|
| 831 |
+
logger.debug("%s from %s core engine process %s.", status,
|
| 832 |
+
"local" if local else "remote", eng_index)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/abstract.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from concurrent.futures import Future
|
| 5 |
+
from typing import Callable, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
|
| 10 |
+
from vllm.config import VllmConfig
|
| 11 |
+
from vllm.executor.executor_base import ExecutorBase
|
| 12 |
+
from vllm.executor.uniproc_executor import ( # noqa
|
| 13 |
+
ExecutorWithExternalLauncher as ExecutorWithExternalLauncherV0)
|
| 14 |
+
from vllm.executor.uniproc_executor import ( # noqa
|
| 15 |
+
UniProcExecutor as UniProcExecutorV0)
|
| 16 |
+
from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec
|
| 17 |
+
from vllm.v1.outputs import ModelRunnerOutput
|
| 18 |
+
|
| 19 |
+
FailureCallback = Callable[[], None]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Executor(ExecutorBase):
|
| 23 |
+
"""
|
| 24 |
+
Abstract class for v1 executors, mainly define some methods for v1.
|
| 25 |
+
For methods shared by v0 and v1, define them in ExecutorBase"""
|
| 26 |
+
|
| 27 |
+
@staticmethod
|
| 28 |
+
def get_class(vllm_config: VllmConfig) -> type["Executor"]:
|
| 29 |
+
executor_class: type[Executor]
|
| 30 |
+
parallel_config = vllm_config.parallel_config
|
| 31 |
+
distributed_executor_backend = (
|
| 32 |
+
parallel_config.distributed_executor_backend)
|
| 33 |
+
# distributed_executor_backend must be set in VllmConfig.__post_init__
|
| 34 |
+
if isinstance(distributed_executor_backend, type):
|
| 35 |
+
if not issubclass(distributed_executor_backend, ExecutorBase):
|
| 36 |
+
raise TypeError(
|
| 37 |
+
"distributed_executor_backend must be a subclass of "
|
| 38 |
+
f"ExecutorBase. Got {distributed_executor_backend}.")
|
| 39 |
+
executor_class = distributed_executor_backend
|
| 40 |
+
elif distributed_executor_backend == "ray":
|
| 41 |
+
from vllm.v1.executor.ray_distributed_executor import ( # noqa
|
| 42 |
+
RayDistributedExecutor)
|
| 43 |
+
executor_class = RayDistributedExecutor
|
| 44 |
+
elif distributed_executor_backend == "mp":
|
| 45 |
+
from vllm.v1.executor.multiproc_executor import MultiprocExecutor
|
| 46 |
+
executor_class = MultiprocExecutor
|
| 47 |
+
elif distributed_executor_backend == "uni":
|
| 48 |
+
executor_class = UniProcExecutor
|
| 49 |
+
elif distributed_executor_backend == "external_launcher":
|
| 50 |
+
# TODO: make v1 scheduling deterministic
|
| 51 |
+
# to support external launcher
|
| 52 |
+
executor_class = ExecutorWithExternalLauncher
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError("Unknown distributed executor backend: "
|
| 55 |
+
f"{distributed_executor_backend}")
|
| 56 |
+
return executor_class
|
| 57 |
+
|
| 58 |
+
def initialize_from_config(self,
|
| 59 |
+
kv_cache_configs: list[KVCacheConfig]) -> None:
|
| 60 |
+
"""
|
| 61 |
+
Initialize the KV caches and begin the model execution loop of the
|
| 62 |
+
underlying workers.
|
| 63 |
+
"""
|
| 64 |
+
self.collective_rpc("initialize_from_config",
|
| 65 |
+
args=(kv_cache_configs, ))
|
| 66 |
+
self.collective_rpc("compile_or_warm_up_model")
|
| 67 |
+
|
| 68 |
+
def register_failure_callback(self, callback: FailureCallback):
|
| 69 |
+
"""
|
| 70 |
+
Register a function to be called if the executor enters a permanent
|
| 71 |
+
failed state.
|
| 72 |
+
"""
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
def determine_available_memory(self) -> list[int]: # in bytes
|
| 76 |
+
output = self.collective_rpc("determine_available_memory")
|
| 77 |
+
return output
|
| 78 |
+
|
| 79 |
+
def get_kv_cache_specs(self) -> list[dict[str, KVCacheSpec]]:
|
| 80 |
+
output = self.collective_rpc("get_kv_cache_spec")
|
| 81 |
+
return output
|
| 82 |
+
|
| 83 |
+
def execute_model(
|
| 84 |
+
self,
|
| 85 |
+
scheduler_output,
|
| 86 |
+
) -> Union[ModelRunnerOutput, Future[ModelRunnerOutput]]:
|
| 87 |
+
output = self.collective_rpc("execute_model",
|
| 88 |
+
args=(scheduler_output, ))
|
| 89 |
+
return output[0]
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def max_concurrent_batches(self) -> int:
|
| 93 |
+
return 1
|
| 94 |
+
|
| 95 |
+
def profile(self, is_start: bool = True):
|
| 96 |
+
self.collective_rpc("profile", args=(is_start, ))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class UniProcExecutor(UniProcExecutorV0, Executor):
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class ExecutorWithExternalLauncher(ExecutorWithExternalLauncherV0, Executor):
|
| 104 |
+
|
| 105 |
+
def determine_available_memory(self) -> list[int]: # in bytes
|
| 106 |
+
# same as determine_num_available_blocks in v0,
|
| 107 |
+
# we need to get the min across all ranks.
|
| 108 |
+
memory = super().determine_available_memory()
|
| 109 |
+
from vllm.distributed.parallel_state import get_world_group
|
| 110 |
+
cpu_group = get_world_group().cpu_group
|
| 111 |
+
memory_tensor = torch.tensor([memory], device="cpu", dtype=torch.int64)
|
| 112 |
+
dist.all_reduce(memory_tensor, group=cpu_group, op=dist.ReduceOp.MIN)
|
| 113 |
+
return [memory_tensor.item()]
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/multiproc_executor.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import multiprocessing
|
| 4 |
+
import os
|
| 5 |
+
import pickle
|
| 6 |
+
import signal
|
| 7 |
+
import threading
|
| 8 |
+
import time
|
| 9 |
+
import traceback
|
| 10 |
+
import weakref
|
| 11 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
| 12 |
+
from dataclasses import dataclass
|
| 13 |
+
from enum import Enum, auto
|
| 14 |
+
from functools import partial
|
| 15 |
+
from multiprocessing.connection import Connection
|
| 16 |
+
from multiprocessing.process import BaseProcess
|
| 17 |
+
from threading import Thread
|
| 18 |
+
from typing import Any, Callable, Optional, Union, cast
|
| 19 |
+
|
| 20 |
+
import cloudpickle
|
| 21 |
+
|
| 22 |
+
import vllm.envs as envs
|
| 23 |
+
from vllm.config import VllmConfig
|
| 24 |
+
from vllm.distributed import (destroy_distributed_environment,
|
| 25 |
+
destroy_model_parallel)
|
| 26 |
+
from vllm.distributed.device_communicators.shm_broadcast import (Handle,
|
| 27 |
+
MessageQueue)
|
| 28 |
+
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
|
| 29 |
+
from vllm.executor.multiproc_worker_utils import (
|
| 30 |
+
set_multiprocessing_worker_envs)
|
| 31 |
+
from vllm.logger import init_logger
|
| 32 |
+
from vllm.utils import (decorate_logs, get_distributed_init_method,
|
| 33 |
+
get_loopback_ip, get_mp_context, get_open_port,
|
| 34 |
+
set_process_title)
|
| 35 |
+
from vllm.v1.executor.abstract import Executor, FailureCallback
|
| 36 |
+
from vllm.v1.outputs import ModelRunnerOutput
|
| 37 |
+
from vllm.worker.worker_base import WorkerWrapperBase
|
| 38 |
+
|
| 39 |
+
logger = init_logger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class MultiprocExecutor(Executor):
|
| 43 |
+
|
| 44 |
+
supports_pp: bool = True
|
| 45 |
+
|
| 46 |
+
def _init_executor(self) -> None:
|
| 47 |
+
# Call self.shutdown at exit to clean up
|
| 48 |
+
# and ensure workers will be terminated.
|
| 49 |
+
self._finalizer = weakref.finalize(self, self.shutdown)
|
| 50 |
+
self.is_failed = False
|
| 51 |
+
self.shutdown_event = threading.Event()
|
| 52 |
+
self.failure_callback: Optional[FailureCallback] = None
|
| 53 |
+
self.io_thread_pool: Optional[ThreadPoolExecutor] = None
|
| 54 |
+
|
| 55 |
+
self.world_size = self.parallel_config.world_size
|
| 56 |
+
tensor_parallel_size = self.parallel_config.tensor_parallel_size
|
| 57 |
+
pp_parallel_size = self.parallel_config.pipeline_parallel_size
|
| 58 |
+
assert self.world_size == tensor_parallel_size * pp_parallel_size, (
|
| 59 |
+
f"world_size ({self.world_size}) must be equal to the "
|
| 60 |
+
f"tensor_parallel_size ({tensor_parallel_size}) x pipeline"
|
| 61 |
+
f"_parallel_size ({pp_parallel_size}). ")
|
| 62 |
+
|
| 63 |
+
# Set multiprocessing envs that are common to V0 and V1
|
| 64 |
+
set_multiprocessing_worker_envs(self.parallel_config)
|
| 65 |
+
|
| 66 |
+
# Multiprocessing-based executor does not support multi-node setting.
|
| 67 |
+
# Since it only works for single node, we can use the loopback address
|
| 68 |
+
# get_loopback_ip() for communication.
|
| 69 |
+
distributed_init_method = get_distributed_init_method(
|
| 70 |
+
get_loopback_ip(), get_open_port())
|
| 71 |
+
|
| 72 |
+
# Initialize worker and set up message queues for SchedulerOutputs
|
| 73 |
+
# and ModelRunnerOutputs
|
| 74 |
+
max_chunk_bytes = envs.VLLM_MQ_MAX_CHUNK_BYTES_MB * 1024 * 1024
|
| 75 |
+
self.rpc_broadcast_mq = MessageQueue(self.world_size,
|
| 76 |
+
self.world_size,
|
| 77 |
+
max_chunk_bytes=max_chunk_bytes)
|
| 78 |
+
scheduler_output_handle = self.rpc_broadcast_mq.export_handle()
|
| 79 |
+
|
| 80 |
+
# Create workers
|
| 81 |
+
unready_workers: list[UnreadyWorkerProcHandle] = []
|
| 82 |
+
success = False
|
| 83 |
+
try:
|
| 84 |
+
for rank in range(self.world_size):
|
| 85 |
+
unready_workers.append(
|
| 86 |
+
WorkerProc.make_worker_process(
|
| 87 |
+
vllm_config=self.vllm_config,
|
| 88 |
+
local_rank=rank,
|
| 89 |
+
rank=rank,
|
| 90 |
+
distributed_init_method=distributed_init_method,
|
| 91 |
+
input_shm_handle=scheduler_output_handle,
|
| 92 |
+
))
|
| 93 |
+
|
| 94 |
+
# Workers must be created before wait_for_ready to avoid
|
| 95 |
+
# deadlock, since worker.init_device() does a device sync.
|
| 96 |
+
self.workers = WorkerProc.wait_for_ready(unready_workers)
|
| 97 |
+
|
| 98 |
+
# Ensure message queues are ready. Will deadlock if re-ordered
|
| 99 |
+
# Must be kept consistent with the WorkerProc.
|
| 100 |
+
self.rpc_broadcast_mq.wait_until_ready()
|
| 101 |
+
for w in self.workers:
|
| 102 |
+
w.worker_response_mq.wait_until_ready()
|
| 103 |
+
|
| 104 |
+
self.start_worker_monitor()
|
| 105 |
+
success = True
|
| 106 |
+
finally:
|
| 107 |
+
if not success:
|
| 108 |
+
# Clean up the worker procs if there was a failure.
|
| 109 |
+
# Close death_writers first to signal workers to exit
|
| 110 |
+
for uw in unready_workers:
|
| 111 |
+
if uw.death_writer is not None:
|
| 112 |
+
uw.death_writer.close()
|
| 113 |
+
self._ensure_worker_termination(
|
| 114 |
+
[uw.proc for uw in unready_workers])
|
| 115 |
+
|
| 116 |
+
# For pipeline parallel, we use a thread pool for asynchronous
|
| 117 |
+
# execute_model.
|
| 118 |
+
if self.max_concurrent_batches > 1:
|
| 119 |
+
# Note: must use only 1 IO thread to keep dequeue sequence
|
| 120 |
+
# from the response queue
|
| 121 |
+
# _async_aggregate_workers_output also assumes a single IO thread
|
| 122 |
+
self.io_thread_pool = ThreadPoolExecutor(
|
| 123 |
+
max_workers=1, thread_name_prefix="mp_exec_io")
|
| 124 |
+
|
| 125 |
+
self.output_rank = self._get_output_rank()
|
| 126 |
+
self.has_connector = self.vllm_config.kv_transfer_config is not None
|
| 127 |
+
self.kv_output_aggregator = KVOutputAggregator(
|
| 128 |
+
self.parallel_config.world_size)
|
| 129 |
+
|
| 130 |
+
def start_worker_monitor(self):
|
| 131 |
+
workers = self.workers
|
| 132 |
+
self_ref = weakref.ref(self)
|
| 133 |
+
|
| 134 |
+
# Monitors worker process liveness. If any die unexpectedly,
|
| 135 |
+
# logs an error, shuts down the executor and invokes the failure
|
| 136 |
+
# callback to inform the engine.
|
| 137 |
+
def monitor_workers():
|
| 138 |
+
sentinels = [h.proc.sentinel for h in workers]
|
| 139 |
+
died = multiprocessing.connection.wait(sentinels)
|
| 140 |
+
_self = self_ref()
|
| 141 |
+
if not _self or getattr(_self, 'shutting_down', False):
|
| 142 |
+
return
|
| 143 |
+
_self.is_failed = True
|
| 144 |
+
proc_name = next(h.proc.name for h in workers
|
| 145 |
+
if h.proc.sentinel == died[0])
|
| 146 |
+
logger.error(
|
| 147 |
+
"Worker proc %s died unexpectedly, "
|
| 148 |
+
"shutting down executor.", proc_name)
|
| 149 |
+
_self.shutdown()
|
| 150 |
+
callback = _self.failure_callback
|
| 151 |
+
if callback is not None:
|
| 152 |
+
_self.failure_callback = None
|
| 153 |
+
callback()
|
| 154 |
+
|
| 155 |
+
Thread(target=monitor_workers,
|
| 156 |
+
daemon=True,
|
| 157 |
+
name="MultiprocWorkerMonitor").start()
|
| 158 |
+
|
| 159 |
+
def register_failure_callback(self, callback: FailureCallback):
|
| 160 |
+
if self.is_failed:
|
| 161 |
+
callback()
|
| 162 |
+
else:
|
| 163 |
+
self.failure_callback = callback
|
| 164 |
+
|
| 165 |
+
def execute_model(
|
| 166 |
+
self,
|
| 167 |
+
scheduler_output,
|
| 168 |
+
) -> Union[ModelRunnerOutput, Future[ModelRunnerOutput]]:
|
| 169 |
+
non_block = self.max_concurrent_batches > 1
|
| 170 |
+
|
| 171 |
+
if not self.has_connector:
|
| 172 |
+
# get output only from a single worker (output_rank)
|
| 173 |
+
(output, ) = self.collective_rpc(
|
| 174 |
+
"execute_model",
|
| 175 |
+
args=(scheduler_output, ),
|
| 176 |
+
unique_reply_rank=self.output_rank,
|
| 177 |
+
non_block=non_block,
|
| 178 |
+
timeout=envs.VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS)
|
| 179 |
+
return output
|
| 180 |
+
|
| 181 |
+
# get output from all workers
|
| 182 |
+
outputs = self.collective_rpc(
|
| 183 |
+
"execute_model",
|
| 184 |
+
args=(scheduler_output, ),
|
| 185 |
+
non_block=non_block,
|
| 186 |
+
timeout=envs.VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS)
|
| 187 |
+
|
| 188 |
+
# aggregate all workers output to a single output
|
| 189 |
+
if non_block:
|
| 190 |
+
return self.kv_output_aggregator.async_aggregate(
|
| 191 |
+
outputs, self.output_rank)
|
| 192 |
+
return self.kv_output_aggregator.aggregate(outputs, self.output_rank)
|
| 193 |
+
|
| 194 |
+
def collective_rpc(self,
|
| 195 |
+
method: Union[str, Callable],
|
| 196 |
+
timeout: Optional[float] = None,
|
| 197 |
+
args: tuple = (),
|
| 198 |
+
kwargs: Optional[dict] = None,
|
| 199 |
+
non_block: bool = False,
|
| 200 |
+
unique_reply_rank: Optional[int] = None) -> list[Any]:
|
| 201 |
+
if self.is_failed:
|
| 202 |
+
raise RuntimeError("Executor failed.")
|
| 203 |
+
|
| 204 |
+
deadline = None if timeout is None else time.monotonic() + timeout
|
| 205 |
+
kwargs = kwargs or {}
|
| 206 |
+
|
| 207 |
+
# NOTE: If the args are heterogeneous, then we pack them into a list,
|
| 208 |
+
# and unpack them in the method of every worker, because every worker
|
| 209 |
+
# knows their own rank.
|
| 210 |
+
try:
|
| 211 |
+
if isinstance(method, str):
|
| 212 |
+
send_method = method
|
| 213 |
+
else:
|
| 214 |
+
send_method = cloudpickle.dumps(
|
| 215 |
+
method, protocol=pickle.HIGHEST_PROTOCOL)
|
| 216 |
+
self.rpc_broadcast_mq.enqueue(
|
| 217 |
+
(send_method, args, kwargs, unique_reply_rank))
|
| 218 |
+
|
| 219 |
+
workers = (self.workers[unique_reply_rank],
|
| 220 |
+
) if unique_reply_rank is not None else self.workers
|
| 221 |
+
responses = []
|
| 222 |
+
|
| 223 |
+
def get_response(w: WorkerProcHandle,
|
| 224 |
+
dequeue_timeout: Optional[float] = None,
|
| 225 |
+
cancel_event: Optional[threading.Event] = None):
|
| 226 |
+
status, result = w.worker_response_mq.dequeue(
|
| 227 |
+
timeout=dequeue_timeout, cancel=cancel_event)
|
| 228 |
+
|
| 229 |
+
if status != WorkerProc.ResponseStatus.SUCCESS:
|
| 230 |
+
raise RuntimeError(
|
| 231 |
+
f"Worker failed with error '{result}', please check the"
|
| 232 |
+
" stack trace above for the root cause")
|
| 233 |
+
return result
|
| 234 |
+
|
| 235 |
+
for w in workers:
|
| 236 |
+
dequeue_timeout = None if deadline is None else (
|
| 237 |
+
deadline - time.monotonic())
|
| 238 |
+
|
| 239 |
+
if non_block:
|
| 240 |
+
result = self.io_thread_pool.submit( # type: ignore
|
| 241 |
+
get_response, w, dequeue_timeout, self.shutdown_event)
|
| 242 |
+
else:
|
| 243 |
+
result = get_response(w, dequeue_timeout)
|
| 244 |
+
|
| 245 |
+
responses.append(result)
|
| 246 |
+
|
| 247 |
+
return responses
|
| 248 |
+
except TimeoutError as e:
|
| 249 |
+
raise TimeoutError(f"RPC call to {method} timed out.") from e
|
| 250 |
+
|
| 251 |
+
@staticmethod
|
| 252 |
+
def _ensure_worker_termination(worker_procs: list[BaseProcess]):
|
| 253 |
+
"""Ensure that all worker processes are terminated. Assumes workers have
|
| 254 |
+
received termination requests. Waits for processing, then sends
|
| 255 |
+
termination and kill signals if needed."""
|
| 256 |
+
|
| 257 |
+
def wait_for_termination(procs, timeout):
|
| 258 |
+
if not time:
|
| 259 |
+
# If we are in late stage shutdown, the interpreter may replace
|
| 260 |
+
# `time` with `None`.
|
| 261 |
+
return all(not proc.is_alive() for proc in procs)
|
| 262 |
+
start_time = time.time()
|
| 263 |
+
while time.time() - start_time < timeout:
|
| 264 |
+
if all(not proc.is_alive() for proc in procs):
|
| 265 |
+
return True
|
| 266 |
+
time.sleep(0.1)
|
| 267 |
+
return False
|
| 268 |
+
|
| 269 |
+
# Send SIGTERM if still running
|
| 270 |
+
active_procs = [proc for proc in worker_procs if proc.is_alive()]
|
| 271 |
+
for p in active_procs:
|
| 272 |
+
p.terminate()
|
| 273 |
+
if not wait_for_termination(active_procs, 4):
|
| 274 |
+
# Send SIGKILL if still running
|
| 275 |
+
active_procs = [p for p in active_procs if p.is_alive()]
|
| 276 |
+
for p in active_procs:
|
| 277 |
+
p.kill()
|
| 278 |
+
|
| 279 |
+
def shutdown(self):
|
| 280 |
+
"""Properly shut down the executor and its workers"""
|
| 281 |
+
if not getattr(self, 'shutting_down', False):
|
| 282 |
+
self.shutting_down = True
|
| 283 |
+
self.shutdown_event.set()
|
| 284 |
+
|
| 285 |
+
if self.io_thread_pool is not None:
|
| 286 |
+
self.io_thread_pool.shutdown(wait=False, cancel_futures=True)
|
| 287 |
+
self.io_thread_pool = None
|
| 288 |
+
|
| 289 |
+
if workers := getattr(self, 'workers', None):
|
| 290 |
+
for w in workers:
|
| 291 |
+
# Close death_writer to signal child processes to exit
|
| 292 |
+
if w.death_writer is not None:
|
| 293 |
+
w.death_writer.close()
|
| 294 |
+
w.death_writer = None
|
| 295 |
+
w.worker_response_mq = None
|
| 296 |
+
self._ensure_worker_termination([w.proc for w in workers])
|
| 297 |
+
|
| 298 |
+
self.rpc_broadcast_mq = None
|
| 299 |
+
|
| 300 |
+
def check_health(self) -> None:
|
| 301 |
+
self.collective_rpc("check_health", timeout=10)
|
| 302 |
+
return
|
| 303 |
+
|
| 304 |
+
@property
|
| 305 |
+
def max_concurrent_batches(self) -> int:
|
| 306 |
+
if self.scheduler_config.async_scheduling:
|
| 307 |
+
return 2
|
| 308 |
+
return self.parallel_config.pipeline_parallel_size
|
| 309 |
+
|
| 310 |
+
def _get_output_rank(self) -> int:
|
| 311 |
+
# Only returns ModelRunnerOutput from TP rank=0 and PP rank=-1
|
| 312 |
+
# (the first TP worker of the last PP stage).
|
| 313 |
+
# Example:
|
| 314 |
+
# Assuming TP=8, PP=4, then the world_size=32
|
| 315 |
+
# 0-7, PP rank 0
|
| 316 |
+
# 8-15, PP rank 1
|
| 317 |
+
# 16-23, PP rank 2
|
| 318 |
+
# 24-31, PP rank 3
|
| 319 |
+
# so world_size - tp_size = 32 - 8 = 24 should be PP rank = -1 (i.e. 3)
|
| 320 |
+
return self.world_size - self.parallel_config.tensor_parallel_size
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
@dataclass
|
| 324 |
+
class UnreadyWorkerProcHandle:
|
| 325 |
+
"""WorkerProcess handle before READY."""
|
| 326 |
+
proc: BaseProcess
|
| 327 |
+
rank: int
|
| 328 |
+
ready_pipe: Connection
|
| 329 |
+
death_writer: Optional[Connection] = None
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
@dataclass
|
| 333 |
+
class WorkerProcHandle:
|
| 334 |
+
proc: BaseProcess
|
| 335 |
+
rank: int
|
| 336 |
+
worker_response_mq: MessageQueue # The worker process writes to this MQ
|
| 337 |
+
death_writer: Optional[Connection] = None
|
| 338 |
+
|
| 339 |
+
@classmethod
|
| 340 |
+
def from_unready_handle(
|
| 341 |
+
cls, unready_handle: UnreadyWorkerProcHandle,
|
| 342 |
+
worker_response_mq: MessageQueue) -> "WorkerProcHandle":
|
| 343 |
+
return cls(
|
| 344 |
+
proc=unready_handle.proc,
|
| 345 |
+
rank=unready_handle.rank,
|
| 346 |
+
worker_response_mq=worker_response_mq,
|
| 347 |
+
death_writer=unready_handle.death_writer,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class WorkerProc:
|
| 352 |
+
"""Wrapper that runs one Worker in a separate process."""
|
| 353 |
+
|
| 354 |
+
READY_STR = "READY"
|
| 355 |
+
|
| 356 |
+
def __init__(
|
| 357 |
+
self,
|
| 358 |
+
vllm_config: VllmConfig,
|
| 359 |
+
local_rank: int,
|
| 360 |
+
rank: int,
|
| 361 |
+
distributed_init_method: str,
|
| 362 |
+
input_shm_handle: Handle,
|
| 363 |
+
):
|
| 364 |
+
self.rank = rank
|
| 365 |
+
wrapper = WorkerWrapperBase(vllm_config=vllm_config, rpc_rank=rank)
|
| 366 |
+
# TODO: move `init_worker` to executor level as a collective rpc call
|
| 367 |
+
all_kwargs: list[dict] = [
|
| 368 |
+
{} for _ in range(vllm_config.parallel_config.world_size)
|
| 369 |
+
]
|
| 370 |
+
is_driver_worker = (
|
| 371 |
+
rank % vllm_config.parallel_config.tensor_parallel_size == 0)
|
| 372 |
+
all_kwargs[rank] = {
|
| 373 |
+
"vllm_config": vllm_config,
|
| 374 |
+
"local_rank": local_rank,
|
| 375 |
+
"rank": rank,
|
| 376 |
+
"distributed_init_method": distributed_init_method,
|
| 377 |
+
"is_driver_worker": is_driver_worker,
|
| 378 |
+
}
|
| 379 |
+
wrapper.init_worker(all_kwargs)
|
| 380 |
+
self.worker = wrapper
|
| 381 |
+
|
| 382 |
+
pp_size = vllm_config.parallel_config.pipeline_parallel_size
|
| 383 |
+
tp_size = vllm_config.parallel_config.tensor_parallel_size
|
| 384 |
+
pp_str = f"PP{rank // tp_size}" if pp_size > 1 else ""
|
| 385 |
+
tp_str = f"TP{rank % tp_size}" if tp_size > 1 else ""
|
| 386 |
+
suffix = f"{pp_str}{'_' if pp_str and tp_str else ''}{tp_str}"
|
| 387 |
+
process_name = "VllmWorker"
|
| 388 |
+
if suffix:
|
| 389 |
+
set_process_title(suffix, append=True)
|
| 390 |
+
process_name = f"{process_name} {suffix}"
|
| 391 |
+
decorate_logs(process_name)
|
| 392 |
+
|
| 393 |
+
# Initialize MessageQueue for receiving SchedulerOutput
|
| 394 |
+
self.rpc_broadcast_mq = MessageQueue.create_from_handle(
|
| 395 |
+
input_shm_handle, self.worker.rank)
|
| 396 |
+
|
| 397 |
+
# Initializes a message queue for sending the model output
|
| 398 |
+
self.worker_response_mq = MessageQueue(1, 1)
|
| 399 |
+
|
| 400 |
+
# Initialize device and loads weights
|
| 401 |
+
self.worker.init_device()
|
| 402 |
+
self.worker.load_model()
|
| 403 |
+
|
| 404 |
+
@staticmethod
|
| 405 |
+
def make_worker_process(
|
| 406 |
+
vllm_config: VllmConfig,
|
| 407 |
+
local_rank: int,
|
| 408 |
+
rank: int,
|
| 409 |
+
distributed_init_method: str,
|
| 410 |
+
input_shm_handle, # Receive SchedulerOutput
|
| 411 |
+
) -> UnreadyWorkerProcHandle:
|
| 412 |
+
context = get_mp_context()
|
| 413 |
+
# (reader, writer)
|
| 414 |
+
reader, writer = context.Pipe(duplex=False)
|
| 415 |
+
|
| 416 |
+
# Create death pipe to detect parent process exit
|
| 417 |
+
death_reader, death_writer = context.Pipe(duplex=False)
|
| 418 |
+
|
| 419 |
+
process_kwargs = {
|
| 420 |
+
"vllm_config": vllm_config,
|
| 421 |
+
"local_rank": local_rank,
|
| 422 |
+
"rank": rank,
|
| 423 |
+
"distributed_init_method": distributed_init_method,
|
| 424 |
+
"input_shm_handle": input_shm_handle,
|
| 425 |
+
"ready_pipe": (reader, writer),
|
| 426 |
+
"death_pipe": death_reader,
|
| 427 |
+
}
|
| 428 |
+
# Run EngineCore busy loop in background process.
|
| 429 |
+
proc = context.Process(target=WorkerProc.worker_main,
|
| 430 |
+
kwargs=process_kwargs,
|
| 431 |
+
name=f"VllmWorker-{rank}",
|
| 432 |
+
daemon=True)
|
| 433 |
+
|
| 434 |
+
proc.start()
|
| 435 |
+
writer.close()
|
| 436 |
+
# Keep death_writer open in parent - when parent exits,
|
| 437 |
+
# death_reader in child will get EOFError
|
| 438 |
+
return UnreadyWorkerProcHandle(proc, rank, reader, death_writer)
|
| 439 |
+
|
| 440 |
+
@staticmethod
|
| 441 |
+
def wait_for_ready(
|
| 442 |
+
unready_proc_handles: list[UnreadyWorkerProcHandle]
|
| 443 |
+
) -> list[WorkerProcHandle]:
|
| 444 |
+
|
| 445 |
+
e = Exception("WorkerProc initialization failed due to "
|
| 446 |
+
"an exception in a background process. "
|
| 447 |
+
"See stack trace for root cause.")
|
| 448 |
+
|
| 449 |
+
pipes = {handle.ready_pipe: handle for handle in unready_proc_handles}
|
| 450 |
+
ready_proc_handles: list[Optional[WorkerProcHandle]] = (
|
| 451 |
+
[None] * len(unready_proc_handles))
|
| 452 |
+
while pipes:
|
| 453 |
+
ready = multiprocessing.connection.wait(pipes.keys())
|
| 454 |
+
for pipe in ready:
|
| 455 |
+
assert isinstance(pipe, Connection)
|
| 456 |
+
try:
|
| 457 |
+
# Wait until the WorkerProc is ready.
|
| 458 |
+
unready_proc_handle = pipes.pop(pipe)
|
| 459 |
+
response: dict[str, Any] = pipe.recv()
|
| 460 |
+
if response["status"] != "READY":
|
| 461 |
+
raise e
|
| 462 |
+
|
| 463 |
+
# Extract the message queue handle.
|
| 464 |
+
worker_response_mq = MessageQueue.create_from_handle(
|
| 465 |
+
response["handle"], 0)
|
| 466 |
+
ready_proc_handles[unready_proc_handle.rank] = (
|
| 467 |
+
WorkerProcHandle.from_unready_handle(
|
| 468 |
+
unready_proc_handle, worker_response_mq))
|
| 469 |
+
|
| 470 |
+
except EOFError:
|
| 471 |
+
e.__suppress_context__ = True
|
| 472 |
+
raise e from None
|
| 473 |
+
|
| 474 |
+
finally:
|
| 475 |
+
# Close connection.
|
| 476 |
+
pipe.close()
|
| 477 |
+
|
| 478 |
+
return cast(list[WorkerProcHandle], ready_proc_handles)
|
| 479 |
+
|
| 480 |
+
def shutdown(self):
|
| 481 |
+
self.rpc_broadcast_mq = None
|
| 482 |
+
self.worker_response_mq = None
|
| 483 |
+
destroy_model_parallel()
|
| 484 |
+
destroy_distributed_environment()
|
| 485 |
+
|
| 486 |
+
@staticmethod
|
| 487 |
+
def worker_main(*args, **kwargs):
|
| 488 |
+
""" Worker initialization and execution loops.
|
| 489 |
+
This runs a background process """
|
| 490 |
+
|
| 491 |
+
# Signal handler used for graceful termination.
|
| 492 |
+
# SystemExit exception is only raised once to allow this and worker
|
| 493 |
+
# processes to terminate without error
|
| 494 |
+
shutdown_requested = False
|
| 495 |
+
|
| 496 |
+
def signal_handler(signum, frame):
|
| 497 |
+
nonlocal shutdown_requested
|
| 498 |
+
if not shutdown_requested:
|
| 499 |
+
shutdown_requested = True
|
| 500 |
+
raise SystemExit()
|
| 501 |
+
|
| 502 |
+
# Either SIGTERM or SIGINT will terminate the worker
|
| 503 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
| 504 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 505 |
+
|
| 506 |
+
worker = None
|
| 507 |
+
# tuple[Connection, Connection]
|
| 508 |
+
reader, ready_writer = kwargs.pop("ready_pipe")
|
| 509 |
+
death_pipe = kwargs.pop("death_pipe", None)
|
| 510 |
+
|
| 511 |
+
# Start death monitoring thread if death_pipe is provided
|
| 512 |
+
if death_pipe is not None:
|
| 513 |
+
|
| 514 |
+
def monitor_parent_death():
|
| 515 |
+
try:
|
| 516 |
+
# This will block until parent process exits (pipe closes)
|
| 517 |
+
death_pipe.recv()
|
| 518 |
+
except EOFError:
|
| 519 |
+
# Parent process has exited, terminate this worker
|
| 520 |
+
logger.info("Parent process exited, terminating worker")
|
| 521 |
+
# Send signal to self to trigger clean shutdown
|
| 522 |
+
os.kill(os.getpid(), signal.SIGTERM)
|
| 523 |
+
except Exception as e:
|
| 524 |
+
logger.warning("Death monitoring error: %s", e)
|
| 525 |
+
|
| 526 |
+
death_monitor = Thread(target=monitor_parent_death,
|
| 527 |
+
daemon=True,
|
| 528 |
+
name="WorkerDeathMonitor")
|
| 529 |
+
death_monitor.start()
|
| 530 |
+
|
| 531 |
+
try:
|
| 532 |
+
reader.close()
|
| 533 |
+
worker = WorkerProc(*args, **kwargs)
|
| 534 |
+
|
| 535 |
+
# Send READY once we know everything is loaded
|
| 536 |
+
ready_writer.send({
|
| 537 |
+
"status":
|
| 538 |
+
WorkerProc.READY_STR,
|
| 539 |
+
"handle":
|
| 540 |
+
worker.worker_response_mq.export_handle(),
|
| 541 |
+
})
|
| 542 |
+
|
| 543 |
+
# Ensure message queues are ready. Will deadlock if re-ordered.
|
| 544 |
+
# Must be kept consistent with the Executor
|
| 545 |
+
worker.rpc_broadcast_mq.wait_until_ready()
|
| 546 |
+
worker.worker_response_mq.wait_until_ready()
|
| 547 |
+
ready_writer.close()
|
| 548 |
+
ready_writer = None
|
| 549 |
+
|
| 550 |
+
worker.worker_busy_loop()
|
| 551 |
+
|
| 552 |
+
except Exception:
|
| 553 |
+
# NOTE: if an Exception arises in busy_loop, we send
|
| 554 |
+
# a FAILURE message over the MQ RPC to notify the Executor,
|
| 555 |
+
# which triggers system shutdown.
|
| 556 |
+
# TODO(rob): handle case where the MQ itself breaks.
|
| 557 |
+
|
| 558 |
+
if ready_writer is not None:
|
| 559 |
+
logger.exception("WorkerProc failed to start.")
|
| 560 |
+
else:
|
| 561 |
+
logger.exception("WorkerProc failed.")
|
| 562 |
+
|
| 563 |
+
# The parent sends a SIGTERM to all worker processes if
|
| 564 |
+
# any worker dies. Set this value so we don't re-throw
|
| 565 |
+
# SystemExit() to avoid zmq exceptions in __del__.
|
| 566 |
+
shutdown_requested = True
|
| 567 |
+
|
| 568 |
+
finally:
|
| 569 |
+
if ready_writer is not None:
|
| 570 |
+
ready_writer.close()
|
| 571 |
+
if death_pipe is not None:
|
| 572 |
+
death_pipe.close()
|
| 573 |
+
# Clean up once worker exits busy loop
|
| 574 |
+
if worker is not None:
|
| 575 |
+
worker.shutdown()
|
| 576 |
+
|
| 577 |
+
class ResponseStatus(Enum):
|
| 578 |
+
SUCCESS = auto()
|
| 579 |
+
FAILURE = auto()
|
| 580 |
+
|
| 581 |
+
def worker_busy_loop(self):
|
| 582 |
+
"""Main busy loop for Multiprocessing Workers"""
|
| 583 |
+
while True:
|
| 584 |
+
method, args, kwargs, output_rank = self.rpc_broadcast_mq.dequeue()
|
| 585 |
+
|
| 586 |
+
try:
|
| 587 |
+
if isinstance(method, str):
|
| 588 |
+
func = getattr(self.worker, method)
|
| 589 |
+
elif isinstance(method, bytes):
|
| 590 |
+
func = partial(cloudpickle.loads(method), self.worker)
|
| 591 |
+
output = func(*args, **kwargs)
|
| 592 |
+
except Exception as e:
|
| 593 |
+
# Notes have been introduced in python 3.11
|
| 594 |
+
if hasattr(e, "add_note"):
|
| 595 |
+
e.add_note(traceback.format_exc())
|
| 596 |
+
logger.exception("WorkerProc hit an exception.")
|
| 597 |
+
# exception might not be serializable, so we convert it to
|
| 598 |
+
# string, only for logging purpose.
|
| 599 |
+
if output_rank is None or self.rank == output_rank:
|
| 600 |
+
self.worker_response_mq.enqueue(
|
| 601 |
+
(WorkerProc.ResponseStatus.FAILURE, str(e)))
|
| 602 |
+
continue
|
| 603 |
+
|
| 604 |
+
if output_rank is None or self.rank == output_rank:
|
| 605 |
+
self.worker_response_mq.enqueue(
|
| 606 |
+
(WorkerProc.ResponseStatus.SUCCESS, output))
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/ray_distributed_executor.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from concurrent.futures import Future
|
| 5 |
+
from typing import Optional, Union
|
| 6 |
+
|
| 7 |
+
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
|
| 8 |
+
from vllm.executor.ray_distributed_executor import ( # noqa
|
| 9 |
+
RayDistributedExecutor as RayDistributedExecutorV0)
|
| 10 |
+
from vllm.logger import init_logger
|
| 11 |
+
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
|
| 12 |
+
from vllm.v1.executor.abstract import Executor
|
| 13 |
+
from vllm.v1.outputs import ModelRunnerOutput
|
| 14 |
+
|
| 15 |
+
logger = init_logger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class FutureWrapper(Future):
|
| 19 |
+
"""A wrapper around Ray output reference to meet the interface
|
| 20 |
+
of .execute_model(): The top level (core busy loop) expects .result() api
|
| 21 |
+
to block and return a single output.
|
| 22 |
+
|
| 23 |
+
If aggregator is provided, the outputs from all workers are aggregated upon
|
| 24 |
+
the result() call. If not only the first worker's output is returned.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, refs, aggregator: Optional[KVOutputAggregator] = None):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.refs = refs
|
| 30 |
+
self.aggregator = aggregator
|
| 31 |
+
|
| 32 |
+
def result(self, timeout=None):
|
| 33 |
+
if timeout is not None:
|
| 34 |
+
raise NotImplementedError("timeout is not supported")
|
| 35 |
+
|
| 36 |
+
if self.aggregator is None:
|
| 37 |
+
return self.refs[0].get()
|
| 38 |
+
|
| 39 |
+
outputs = [ref.get() for ref in self.refs]
|
| 40 |
+
return self.aggregator.aggregate(outputs, output_rank=0)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class RayDistributedExecutor(RayDistributedExecutorV0, Executor):
|
| 44 |
+
"""Ray distributed executor using Ray Compiled Graphs."""
|
| 45 |
+
|
| 46 |
+
supports_pp: bool = True
|
| 47 |
+
|
| 48 |
+
def _init_executor(self) -> None:
|
| 49 |
+
super()._init_executor()
|
| 50 |
+
|
| 51 |
+
# KV connector setup
|
| 52 |
+
self.has_connector = self.vllm_config.kv_transfer_config is not None
|
| 53 |
+
self.kv_output_aggregator = KVOutputAggregator(
|
| 54 |
+
self.parallel_config.world_size)
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def max_concurrent_batches(self) -> int:
|
| 58 |
+
"""Ray distributed executor supports pipeline parallelism,
|
| 59 |
+
meaning that it allows PP size batches to be executed concurrently.
|
| 60 |
+
"""
|
| 61 |
+
if self.scheduler_config.async_scheduling:
|
| 62 |
+
return 2
|
| 63 |
+
return self.parallel_config.pipeline_parallel_size
|
| 64 |
+
|
| 65 |
+
def execute_model(
|
| 66 |
+
self,
|
| 67 |
+
scheduler_output,
|
| 68 |
+
) -> Union[ModelRunnerOutput, Future[ModelRunnerOutput]]:
|
| 69 |
+
"""Execute the model on the Ray workers.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
scheduler_output: The scheduler output to execute.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
The model runner output.
|
| 76 |
+
"""
|
| 77 |
+
# Build the compiled DAG for the first time.
|
| 78 |
+
if self.forward_dag is None: # type: ignore
|
| 79 |
+
self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)
|
| 80 |
+
|
| 81 |
+
refs = self.forward_dag.execute(scheduler_output) # type: ignore
|
| 82 |
+
|
| 83 |
+
if not self.has_connector:
|
| 84 |
+
# Get output only from a single worker (output_rank)
|
| 85 |
+
# When PP is not used, we block here until the result is available.
|
| 86 |
+
if self.max_concurrent_batches == 1:
|
| 87 |
+
return refs[0].get()
|
| 88 |
+
|
| 89 |
+
# When PP is used, we return a FutureWrapper immediately so that
|
| 90 |
+
# the scheduler can yield to the next batch.
|
| 91 |
+
return FutureWrapper(refs)
|
| 92 |
+
|
| 93 |
+
# Get output from all workers when connector is present
|
| 94 |
+
if self.max_concurrent_batches == 1:
|
| 95 |
+
# Block and get results from all workers
|
| 96 |
+
outputs = [ref.get() for ref in refs]
|
| 97 |
+
return self.kv_output_aggregator.aggregate(outputs)
|
| 98 |
+
|
| 99 |
+
# Return a future that will aggregate outputs from all workers
|
| 100 |
+
return FutureWrapper(refs, self.kv_output_aggregator)
|
| 101 |
+
|
| 102 |
+
def reinitialize_distributed(
|
| 103 |
+
self, reconfig_request: ReconfigureDistributedRequest) -> None:
|
| 104 |
+
self._run_workers("reinitialize_distributed", reconfig_request)
|
| 105 |
+
if reconfig_request.new_data_parallel_rank == \
|
| 106 |
+
ReconfigureRankType.SHUTDOWN_CURRENT_RANK:
|
| 107 |
+
self.shutdown()
|
| 108 |
+
return
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/loggers.py
ADDED
|
@@ -0,0 +1,695 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import logging
|
| 5 |
+
import time
|
| 6 |
+
from abc import ABC, abstractmethod
|
| 7 |
+
from typing import Callable, Optional, Union
|
| 8 |
+
|
| 9 |
+
import prometheus_client
|
| 10 |
+
|
| 11 |
+
from vllm.config import SupportsMetricsInfo, VllmConfig
|
| 12 |
+
from vllm.logger import init_logger
|
| 13 |
+
from vllm.v1.core.kv_cache_utils import PrefixCachingMetrics
|
| 14 |
+
from vllm.v1.engine import FinishReason
|
| 15 |
+
from vllm.v1.metrics.prometheus import unregister_vllm_metrics
|
| 16 |
+
from vllm.v1.metrics.stats import IterationStats, SchedulerStats
|
| 17 |
+
from vllm.v1.spec_decode.metrics import SpecDecodingLogging, SpecDecodingProm
|
| 18 |
+
|
| 19 |
+
logger = init_logger(__name__)
|
| 20 |
+
|
| 21 |
+
StatLoggerFactory = Callable[[VllmConfig, int], "StatLoggerBase"]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class StatLoggerBase(ABC):
|
| 25 |
+
"""Interface for logging metrics.
|
| 26 |
+
|
| 27 |
+
API users may define custom loggers that implement this interface.
|
| 28 |
+
However, note that the `SchedulerStats` and `IterationStats` classes
|
| 29 |
+
are not considered stable interfaces and may change in future versions.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
@abstractmethod
|
| 33 |
+
def __init__(self, vllm_config: VllmConfig, engine_index: int = 0):
|
| 34 |
+
...
|
| 35 |
+
|
| 36 |
+
@abstractmethod
|
| 37 |
+
def record(self,
|
| 38 |
+
scheduler_stats: Optional[SchedulerStats],
|
| 39 |
+
iteration_stats: Optional[IterationStats],
|
| 40 |
+
engine_idx: int = 0):
|
| 41 |
+
...
|
| 42 |
+
|
| 43 |
+
@abstractmethod
|
| 44 |
+
def log_engine_initialized(self):
|
| 45 |
+
...
|
| 46 |
+
|
| 47 |
+
def log(self): # noqa
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class LoggingStatLogger(StatLoggerBase):
|
| 52 |
+
|
| 53 |
+
def __init__(self, vllm_config: VllmConfig, engine_index: int = 0):
|
| 54 |
+
self.engine_index = engine_index
|
| 55 |
+
self.vllm_config = vllm_config
|
| 56 |
+
self._reset(time.monotonic())
|
| 57 |
+
self.last_scheduler_stats = SchedulerStats()
|
| 58 |
+
# Prefix cache metrics. This cannot be reset.
|
| 59 |
+
# TODO: Make the interval configurable.
|
| 60 |
+
self.prefix_caching_metrics = PrefixCachingMetrics()
|
| 61 |
+
self.spec_decoding_logging = SpecDecodingLogging()
|
| 62 |
+
self.last_prompt_throughput: float = 0.0
|
| 63 |
+
self.last_generation_throughput: float = 0.0
|
| 64 |
+
|
| 65 |
+
def _reset(self, now):
|
| 66 |
+
self.last_log_time = now
|
| 67 |
+
|
| 68 |
+
# Tracked stats over current local logging interval.
|
| 69 |
+
self.num_prompt_tokens: int = 0
|
| 70 |
+
self.num_generation_tokens: int = 0
|
| 71 |
+
|
| 72 |
+
def _track_iteration_stats(self, iteration_stats: IterationStats):
|
| 73 |
+
# Save tracked stats for token counters.
|
| 74 |
+
self.num_prompt_tokens += iteration_stats.num_prompt_tokens
|
| 75 |
+
self.num_generation_tokens += iteration_stats.num_generation_tokens
|
| 76 |
+
|
| 77 |
+
def _get_throughput(self, tracked_stats: int, now: float) -> float:
|
| 78 |
+
# Compute summary metrics for tracked stats
|
| 79 |
+
delta_time = now - self.last_log_time
|
| 80 |
+
if delta_time <= 0.0:
|
| 81 |
+
return 0.0
|
| 82 |
+
return float(tracked_stats / delta_time)
|
| 83 |
+
|
| 84 |
+
def record(self,
|
| 85 |
+
scheduler_stats: Optional[SchedulerStats],
|
| 86 |
+
iteration_stats: Optional[IterationStats],
|
| 87 |
+
engine_idx: int = 0):
|
| 88 |
+
"""Log Stats to standard output."""
|
| 89 |
+
|
| 90 |
+
if iteration_stats:
|
| 91 |
+
self._track_iteration_stats(iteration_stats)
|
| 92 |
+
|
| 93 |
+
if scheduler_stats is not None:
|
| 94 |
+
self.prefix_caching_metrics.observe(
|
| 95 |
+
scheduler_stats.prefix_cache_stats)
|
| 96 |
+
|
| 97 |
+
if scheduler_stats.spec_decoding_stats is not None:
|
| 98 |
+
self.spec_decoding_logging.observe(
|
| 99 |
+
scheduler_stats.spec_decoding_stats)
|
| 100 |
+
|
| 101 |
+
self.last_scheduler_stats = scheduler_stats
|
| 102 |
+
|
| 103 |
+
def log(self):
|
| 104 |
+
now = time.monotonic()
|
| 105 |
+
prompt_throughput = self._get_throughput(self.num_prompt_tokens, now)
|
| 106 |
+
generation_throughput = self._get_throughput(
|
| 107 |
+
self.num_generation_tokens, now)
|
| 108 |
+
|
| 109 |
+
self._reset(now)
|
| 110 |
+
|
| 111 |
+
scheduler_stats = self.last_scheduler_stats
|
| 112 |
+
|
| 113 |
+
log_fn = logger.info
|
| 114 |
+
if not any(
|
| 115 |
+
(prompt_throughput, generation_throughput,
|
| 116 |
+
self.last_prompt_throughput, self.last_generation_throughput)):
|
| 117 |
+
# Avoid log noise on an idle production system
|
| 118 |
+
log_fn = logger.debug
|
| 119 |
+
self.last_generation_throughput = generation_throughput
|
| 120 |
+
self.last_prompt_throughput = prompt_throughput
|
| 121 |
+
|
| 122 |
+
# Format and print output.
|
| 123 |
+
log_fn(
|
| 124 |
+
"Engine %03d: "
|
| 125 |
+
"Avg prompt throughput: %.1f tokens/s, "
|
| 126 |
+
"Avg generation throughput: %.1f tokens/s, "
|
| 127 |
+
"Running: %d reqs, Waiting: %d reqs, "
|
| 128 |
+
"GPU KV cache usage: %.1f%%, "
|
| 129 |
+
"Prefix cache hit rate: %.1f%%",
|
| 130 |
+
self.engine_index,
|
| 131 |
+
prompt_throughput,
|
| 132 |
+
generation_throughput,
|
| 133 |
+
scheduler_stats.num_running_reqs,
|
| 134 |
+
scheduler_stats.num_waiting_reqs,
|
| 135 |
+
scheduler_stats.kv_cache_usage * 100,
|
| 136 |
+
self.prefix_caching_metrics.hit_rate * 100,
|
| 137 |
+
)
|
| 138 |
+
self.spec_decoding_logging.log(log_fn=log_fn)
|
| 139 |
+
|
| 140 |
+
def log_engine_initialized(self):
|
| 141 |
+
if self.vllm_config.cache_config.num_gpu_blocks:
|
| 142 |
+
logger.info(
|
| 143 |
+
"Engine %03d: vllm cache_config_info with initialization "
|
| 144 |
+
"after num_gpu_blocks is: %d", self.engine_index,
|
| 145 |
+
self.vllm_config.cache_config.num_gpu_blocks)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class PrometheusStatLogger(StatLoggerBase):
|
| 149 |
+
_gauge_cls = prometheus_client.Gauge
|
| 150 |
+
_counter_cls = prometheus_client.Counter
|
| 151 |
+
_histogram_cls = prometheus_client.Histogram
|
| 152 |
+
_spec_decoding_cls = SpecDecodingProm
|
| 153 |
+
|
| 154 |
+
def __init__(self,
|
| 155 |
+
vllm_config: VllmConfig,
|
| 156 |
+
engine_indexes: Optional[list[int]] = None):
|
| 157 |
+
if engine_indexes is None:
|
| 158 |
+
engine_indexes = [0]
|
| 159 |
+
self.engine_indexes = engine_indexes
|
| 160 |
+
|
| 161 |
+
unregister_vllm_metrics()
|
| 162 |
+
self.vllm_config = vllm_config
|
| 163 |
+
# Use this flag to hide metrics that were deprecated in
|
| 164 |
+
# a previous release and which will be removed future
|
| 165 |
+
self.show_hidden_metrics = \
|
| 166 |
+
vllm_config.observability_config.show_hidden_metrics
|
| 167 |
+
|
| 168 |
+
labelnames = ["model_name", "engine"]
|
| 169 |
+
model_name = vllm_config.model_config.served_model_name
|
| 170 |
+
max_model_len = vllm_config.model_config.max_model_len
|
| 171 |
+
|
| 172 |
+
if (len(self.engine_indexes) > 1
|
| 173 |
+
and vllm_config.speculative_config is not None):
|
| 174 |
+
raise NotImplementedError("Prometheus metrics with Spec Decoding "
|
| 175 |
+
"with >1 EngineCore per AsyncLLM is not "
|
| 176 |
+
"supported yet.")
|
| 177 |
+
spec_decode_labelvalues = [
|
| 178 |
+
vllm_config.model_config.served_model_name,
|
| 179 |
+
str(self.engine_indexes[0])
|
| 180 |
+
]
|
| 181 |
+
self.spec_decoding_prom = self._spec_decoding_cls(
|
| 182 |
+
vllm_config.speculative_config, labelnames,
|
| 183 |
+
spec_decode_labelvalues)
|
| 184 |
+
|
| 185 |
+
#
|
| 186 |
+
# Scheduler state
|
| 187 |
+
#
|
| 188 |
+
gauge_scheduler_running = self._gauge_cls(
|
| 189 |
+
name="vllm:num_requests_running",
|
| 190 |
+
documentation="Number of requests in model execution batches.",
|
| 191 |
+
multiprocess_mode="mostrecent",
|
| 192 |
+
labelnames=labelnames)
|
| 193 |
+
self.gauge_scheduler_running = make_per_engine(gauge_scheduler_running,
|
| 194 |
+
engine_indexes,
|
| 195 |
+
model_name)
|
| 196 |
+
|
| 197 |
+
gauge_scheduler_waiting = self._gauge_cls(
|
| 198 |
+
name="vllm:num_requests_waiting",
|
| 199 |
+
documentation="Number of requests waiting to be processed.",
|
| 200 |
+
multiprocess_mode="mostrecent",
|
| 201 |
+
labelnames=labelnames)
|
| 202 |
+
self.gauge_scheduler_waiting = make_per_engine(gauge_scheduler_waiting,
|
| 203 |
+
engine_indexes,
|
| 204 |
+
model_name)
|
| 205 |
+
|
| 206 |
+
#
|
| 207 |
+
# GPU cache
|
| 208 |
+
#
|
| 209 |
+
# Deprecated in 0.9 - Renamed as vllm:kv_cache_usage_perc
|
| 210 |
+
# TODO: in 0.10, only enable if show_hidden_metrics=True
|
| 211 |
+
gauge_gpu_cache_usage = self._gauge_cls(
|
| 212 |
+
name="vllm:gpu_cache_usage_perc",
|
| 213 |
+
documentation=(
|
| 214 |
+
"GPU KV-cache usage. 1 means 100 percent usage."
|
| 215 |
+
"DEPRECATED: Use vllm:kv_cache_usage_perc instead."),
|
| 216 |
+
multiprocess_mode="mostrecent",
|
| 217 |
+
labelnames=labelnames)
|
| 218 |
+
self.gauge_gpu_cache_usage = make_per_engine(gauge_gpu_cache_usage,
|
| 219 |
+
engine_indexes,
|
| 220 |
+
model_name)
|
| 221 |
+
|
| 222 |
+
# Deprecated in 0.9 - Renamed as vllm:prefix_cache_queries
|
| 223 |
+
# TODO: in 0.10, only enable if show_hidden_metrics=True
|
| 224 |
+
counter_gpu_prefix_cache_queries = self._counter_cls(
|
| 225 |
+
name="vllm:gpu_prefix_cache_queries",
|
| 226 |
+
documentation=(
|
| 227 |
+
"GPU prefix cache queries, in terms of number of queried"
|
| 228 |
+
"tokens. DEPRECATED: Use vllm:prefix_cache_queries instead."),
|
| 229 |
+
labelnames=labelnames)
|
| 230 |
+
self.counter_gpu_prefix_cache_queries = make_per_engine(
|
| 231 |
+
counter_gpu_prefix_cache_queries, engine_indexes, model_name)
|
| 232 |
+
|
| 233 |
+
# Deprecated in 0.9 - Renamed as vllm:prefix_cache_hits
|
| 234 |
+
# TODO: in 0.10, only enable if show_hidden_metrics=True
|
| 235 |
+
counter_gpu_prefix_cache_hits = self._counter_cls(
|
| 236 |
+
name="vllm:gpu_prefix_cache_hits",
|
| 237 |
+
documentation=(
|
| 238 |
+
"GPU prefix cache hits, in terms of number of cached "
|
| 239 |
+
"tokens. DEPRECATED: Use vllm:prefix_cache_hits instead."),
|
| 240 |
+
labelnames=labelnames)
|
| 241 |
+
self.counter_gpu_prefix_cache_hits = make_per_engine(
|
| 242 |
+
counter_gpu_prefix_cache_hits, engine_indexes, model_name)
|
| 243 |
+
|
| 244 |
+
gauge_kv_cache_usage = self._gauge_cls(
|
| 245 |
+
name="vllm:kv_cache_usage_perc",
|
| 246 |
+
documentation="KV-cache usage. 1 means 100 percent usage.",
|
| 247 |
+
labelnames=labelnames)
|
| 248 |
+
self.gauge_kv_cache_usage = make_per_engine(gauge_kv_cache_usage,
|
| 249 |
+
engine_indexes, model_name)
|
| 250 |
+
|
| 251 |
+
counter_prefix_cache_queries = self._counter_cls(
|
| 252 |
+
name="vllm:prefix_cache_queries",
|
| 253 |
+
documentation=(
|
| 254 |
+
"Prefix cache queries, in terms of number of queried tokens."),
|
| 255 |
+
labelnames=labelnames)
|
| 256 |
+
self.counter_prefix_cache_queries = make_per_engine(
|
| 257 |
+
counter_prefix_cache_queries, engine_indexes, model_name)
|
| 258 |
+
|
| 259 |
+
counter_prefix_cache_hits = self._counter_cls(
|
| 260 |
+
name="vllm:prefix_cache_hits",
|
| 261 |
+
documentation=(
|
| 262 |
+
"Prefix cache hits, in terms of number of cached tokens."),
|
| 263 |
+
labelnames=labelnames)
|
| 264 |
+
self.counter_prefix_cache_hits = make_per_engine(
|
| 265 |
+
counter_prefix_cache_hits, engine_indexes, model_name)
|
| 266 |
+
|
| 267 |
+
#
|
| 268 |
+
# Counters
|
| 269 |
+
#
|
| 270 |
+
counter_num_preempted_reqs = self._counter_cls(
|
| 271 |
+
name="vllm:num_preemptions",
|
| 272 |
+
documentation="Cumulative number of preemption from the engine.",
|
| 273 |
+
labelnames=labelnames)
|
| 274 |
+
self.counter_num_preempted_reqs = make_per_engine(
|
| 275 |
+
counter_num_preempted_reqs, engine_indexes, model_name)
|
| 276 |
+
|
| 277 |
+
counter_prompt_tokens = self._counter_cls(
|
| 278 |
+
name="vllm:prompt_tokens",
|
| 279 |
+
documentation="Number of prefill tokens processed.",
|
| 280 |
+
labelnames=labelnames)
|
| 281 |
+
self.counter_prompt_tokens = make_per_engine(counter_prompt_tokens,
|
| 282 |
+
engine_indexes,
|
| 283 |
+
model_name)
|
| 284 |
+
|
| 285 |
+
counter_generation_tokens = self._counter_cls(
|
| 286 |
+
name="vllm:generation_tokens",
|
| 287 |
+
documentation="Number of generation tokens processed.",
|
| 288 |
+
labelnames=labelnames)
|
| 289 |
+
self.counter_generation_tokens = make_per_engine(
|
| 290 |
+
counter_generation_tokens, engine_indexes, model_name)
|
| 291 |
+
|
| 292 |
+
self.counter_request_success: dict[FinishReason, dict[
|
| 293 |
+
int, prometheus_client.Counter]] = {}
|
| 294 |
+
counter_request_success_base = self._counter_cls(
|
| 295 |
+
name="vllm:request_success",
|
| 296 |
+
documentation="Count of successfully processed requests.",
|
| 297 |
+
labelnames=labelnames + ["finished_reason"])
|
| 298 |
+
for reason in FinishReason:
|
| 299 |
+
self.counter_request_success[reason] = {
|
| 300 |
+
idx:
|
| 301 |
+
counter_request_success_base.labels(model_name, str(idx),
|
| 302 |
+
str(reason))
|
| 303 |
+
for idx in engine_indexes
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
#
|
| 307 |
+
# Histograms of counts
|
| 308 |
+
#
|
| 309 |
+
histogram_num_prompt_tokens_request = self._histogram_cls(
|
| 310 |
+
name="vllm:request_prompt_tokens",
|
| 311 |
+
documentation="Number of prefill tokens processed.",
|
| 312 |
+
buckets=build_1_2_5_buckets(max_model_len),
|
| 313 |
+
labelnames=labelnames)
|
| 314 |
+
self.histogram_num_prompt_tokens_request = make_per_engine(
|
| 315 |
+
histogram_num_prompt_tokens_request, engine_indexes, model_name)
|
| 316 |
+
|
| 317 |
+
histogram_num_generation_tokens_request = self._histogram_cls(
|
| 318 |
+
name="vllm:request_generation_tokens",
|
| 319 |
+
documentation="Number of generation tokens processed.",
|
| 320 |
+
buckets=build_1_2_5_buckets(max_model_len),
|
| 321 |
+
labelnames=labelnames)
|
| 322 |
+
self.histogram_num_generation_tokens_request = make_per_engine(
|
| 323 |
+
histogram_num_generation_tokens_request, engine_indexes,
|
| 324 |
+
model_name)
|
| 325 |
+
|
| 326 |
+
# TODO: This metric might be incorrect in case of using multiple
|
| 327 |
+
# api_server counts which uses prometheus mp.
|
| 328 |
+
# See: https://github.com/vllm-project/vllm/pull/18053
|
| 329 |
+
histogram_iteration_tokens = self._histogram_cls(
|
| 330 |
+
name="vllm:iteration_tokens_total",
|
| 331 |
+
documentation="Histogram of number of tokens per engine_step.",
|
| 332 |
+
buckets=[
|
| 333 |
+
1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384
|
| 334 |
+
],
|
| 335 |
+
labelnames=labelnames)
|
| 336 |
+
self.histogram_iteration_tokens = make_per_engine(
|
| 337 |
+
histogram_iteration_tokens, engine_indexes, model_name)
|
| 338 |
+
|
| 339 |
+
histogram_max_num_generation_tokens_request = self._histogram_cls(
|
| 340 |
+
name="vllm:request_max_num_generation_tokens",
|
| 341 |
+
documentation=
|
| 342 |
+
"Histogram of maximum number of requested generation tokens.",
|
| 343 |
+
buckets=build_1_2_5_buckets(max_model_len),
|
| 344 |
+
labelnames=labelnames)
|
| 345 |
+
self.histogram_max_num_generation_tokens_request = make_per_engine(
|
| 346 |
+
histogram_max_num_generation_tokens_request, engine_indexes,
|
| 347 |
+
model_name)
|
| 348 |
+
|
| 349 |
+
histogram_n_request = self._histogram_cls(
|
| 350 |
+
name="vllm:request_params_n",
|
| 351 |
+
documentation="Histogram of the n request parameter.",
|
| 352 |
+
buckets=[1, 2, 5, 10, 20],
|
| 353 |
+
labelnames=labelnames)
|
| 354 |
+
self.histogram_n_request = make_per_engine(histogram_n_request,
|
| 355 |
+
engine_indexes, model_name)
|
| 356 |
+
|
| 357 |
+
histogram_max_tokens_request = self._histogram_cls(
|
| 358 |
+
name="vllm:request_params_max_tokens",
|
| 359 |
+
documentation="Histogram of the max_tokens request parameter.",
|
| 360 |
+
buckets=build_1_2_5_buckets(max_model_len),
|
| 361 |
+
labelnames=labelnames)
|
| 362 |
+
self.histogram_max_tokens_request = make_per_engine(
|
| 363 |
+
histogram_max_tokens_request, engine_indexes, model_name)
|
| 364 |
+
|
| 365 |
+
#
|
| 366 |
+
# Histogram of timing intervals
|
| 367 |
+
#
|
| 368 |
+
histogram_time_to_first_token = self._histogram_cls(
|
| 369 |
+
name="vllm:time_to_first_token_seconds",
|
| 370 |
+
documentation="Histogram of time to first token in seconds.",
|
| 371 |
+
buckets=[
|
| 372 |
+
0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5,
|
| 373 |
+
0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, 160.0, 640.0,
|
| 374 |
+
2560.0
|
| 375 |
+
],
|
| 376 |
+
labelnames=labelnames)
|
| 377 |
+
self.histogram_time_to_first_token = make_per_engine(
|
| 378 |
+
histogram_time_to_first_token, engine_indexes, model_name)
|
| 379 |
+
|
| 380 |
+
histogram_time_per_output_token = self._histogram_cls(
|
| 381 |
+
name="vllm:time_per_output_token_seconds",
|
| 382 |
+
documentation="Histogram of time per output token in seconds.",
|
| 383 |
+
buckets=[
|
| 384 |
+
0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75,
|
| 385 |
+
1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0
|
| 386 |
+
],
|
| 387 |
+
labelnames=labelnames)
|
| 388 |
+
self.histogram_time_per_output_token = make_per_engine(
|
| 389 |
+
histogram_time_per_output_token, engine_indexes, model_name)
|
| 390 |
+
|
| 391 |
+
request_latency_buckets = [
|
| 392 |
+
0.3, 0.5, 0.8, 1.0, 1.5, 2.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0,
|
| 393 |
+
40.0, 50.0, 60.0, 120.0, 240.0, 480.0, 960.0, 1920.0, 7680.0
|
| 394 |
+
]
|
| 395 |
+
histogram_e2e_time_request = self._histogram_cls(
|
| 396 |
+
name="vllm:e2e_request_latency_seconds",
|
| 397 |
+
documentation="Histogram of e2e request latency in seconds.",
|
| 398 |
+
buckets=request_latency_buckets,
|
| 399 |
+
labelnames=labelnames)
|
| 400 |
+
self.histogram_e2e_time_request = make_per_engine(
|
| 401 |
+
histogram_e2e_time_request, engine_indexes, model_name)
|
| 402 |
+
|
| 403 |
+
histogram_queue_time_request = self._histogram_cls(
|
| 404 |
+
name="vllm:request_queue_time_seconds",
|
| 405 |
+
documentation=
|
| 406 |
+
"Histogram of time spent in WAITING phase for request.",
|
| 407 |
+
buckets=request_latency_buckets,
|
| 408 |
+
labelnames=labelnames)
|
| 409 |
+
self.histogram_queue_time_request = make_per_engine(
|
| 410 |
+
histogram_queue_time_request, engine_indexes, model_name)
|
| 411 |
+
|
| 412 |
+
histogram_inference_time_request = self._histogram_cls(
|
| 413 |
+
name="vllm:request_inference_time_seconds",
|
| 414 |
+
documentation=
|
| 415 |
+
"Histogram of time spent in RUNNING phase for request.",
|
| 416 |
+
buckets=request_latency_buckets,
|
| 417 |
+
labelnames=labelnames)
|
| 418 |
+
self.histogram_inference_time_request = make_per_engine(
|
| 419 |
+
histogram_inference_time_request, engine_indexes, model_name)
|
| 420 |
+
|
| 421 |
+
histogram_prefill_time_request = self._histogram_cls(
|
| 422 |
+
name="vllm:request_prefill_time_seconds",
|
| 423 |
+
documentation=
|
| 424 |
+
"Histogram of time spent in PREFILL phase for request.",
|
| 425 |
+
buckets=request_latency_buckets,
|
| 426 |
+
labelnames=labelnames)
|
| 427 |
+
self.histogram_prefill_time_request = make_per_engine(
|
| 428 |
+
histogram_prefill_time_request, engine_indexes, model_name)
|
| 429 |
+
|
| 430 |
+
histogram_decode_time_request = self._histogram_cls(
|
| 431 |
+
name="vllm:request_decode_time_seconds",
|
| 432 |
+
documentation=
|
| 433 |
+
"Histogram of time spent in DECODE phase for request.",
|
| 434 |
+
buckets=request_latency_buckets,
|
| 435 |
+
labelnames=labelnames)
|
| 436 |
+
self.histogram_decode_time_request = make_per_engine(
|
| 437 |
+
histogram_decode_time_request, engine_indexes, model_name)
|
| 438 |
+
|
| 439 |
+
#
|
| 440 |
+
# LoRA metrics
|
| 441 |
+
#
|
| 442 |
+
|
| 443 |
+
# TODO: This metric might be incorrect in case of using multiple
|
| 444 |
+
# api_server counts which uses prometheus mp.
|
| 445 |
+
self.gauge_lora_info: Optional[prometheus_client.Gauge] = None
|
| 446 |
+
if vllm_config.lora_config is not None:
|
| 447 |
+
if len(self.engine_indexes) > 1:
|
| 448 |
+
raise NotImplementedError(
|
| 449 |
+
"LoRA in DP mode is not supported yet.")
|
| 450 |
+
self.labelname_max_lora = "max_lora"
|
| 451 |
+
self.labelname_waiting_lora_adapters = "waiting_lora_adapters"
|
| 452 |
+
self.labelname_running_lora_adapters = "running_lora_adapters"
|
| 453 |
+
self.max_lora = vllm_config.lora_config.max_loras
|
| 454 |
+
self.gauge_lora_info = \
|
| 455 |
+
self._gauge_cls(
|
| 456 |
+
name="vllm:lora_requests_info",
|
| 457 |
+
documentation="Running stats on lora requests.",
|
| 458 |
+
multiprocess_mode="sum",
|
| 459 |
+
labelnames=[
|
| 460 |
+
self.labelname_max_lora,
|
| 461 |
+
self.labelname_waiting_lora_adapters,
|
| 462 |
+
self.labelname_running_lora_adapters,
|
| 463 |
+
],
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
def log_metrics_info(self, type: str, config_obj: SupportsMetricsInfo):
|
| 467 |
+
metrics_info = config_obj.metrics_info()
|
| 468 |
+
metrics_info["engine"] = ""
|
| 469 |
+
|
| 470 |
+
name, documentation = None, None
|
| 471 |
+
if type == "cache_config":
|
| 472 |
+
name = "vllm:cache_config_info"
|
| 473 |
+
documentation = "Information of the LLMEngine CacheConfig"
|
| 474 |
+
assert name is not None, f"Unknown metrics info type {type}"
|
| 475 |
+
|
| 476 |
+
# Info type metrics are syntactic sugar for a gauge permanently set to 1
|
| 477 |
+
# Since prometheus multiprocessing mode does not support Info, emulate
|
| 478 |
+
# info here with a gauge.
|
| 479 |
+
info_gauge = self._gauge_cls(
|
| 480 |
+
name=name,
|
| 481 |
+
documentation=documentation,
|
| 482 |
+
multiprocess_mode="mostrecent",
|
| 483 |
+
labelnames=metrics_info.keys(),
|
| 484 |
+
)
|
| 485 |
+
for engine_index in self.engine_indexes:
|
| 486 |
+
metrics_info = config_obj.metrics_info()
|
| 487 |
+
metrics_info["engine"] = str(engine_index)
|
| 488 |
+
info_gauge.labels(**metrics_info).set(1)
|
| 489 |
+
|
| 490 |
+
def record(self,
|
| 491 |
+
scheduler_stats: Optional[SchedulerStats],
|
| 492 |
+
iteration_stats: Optional[IterationStats],
|
| 493 |
+
engine_idx: int = 0):
|
| 494 |
+
"""Log to prometheus."""
|
| 495 |
+
if scheduler_stats is not None:
|
| 496 |
+
self.gauge_scheduler_running[engine_idx].set(
|
| 497 |
+
scheduler_stats.num_running_reqs)
|
| 498 |
+
self.gauge_scheduler_waiting[engine_idx].set(
|
| 499 |
+
scheduler_stats.num_waiting_reqs)
|
| 500 |
+
|
| 501 |
+
self.gauge_gpu_cache_usage[engine_idx].set(
|
| 502 |
+
scheduler_stats.kv_cache_usage)
|
| 503 |
+
self.gauge_kv_cache_usage[engine_idx].set(
|
| 504 |
+
scheduler_stats.kv_cache_usage)
|
| 505 |
+
|
| 506 |
+
self.counter_gpu_prefix_cache_queries[engine_idx].inc(
|
| 507 |
+
scheduler_stats.prefix_cache_stats.queries)
|
| 508 |
+
self.counter_gpu_prefix_cache_hits[engine_idx].inc(
|
| 509 |
+
scheduler_stats.prefix_cache_stats.hits)
|
| 510 |
+
|
| 511 |
+
self.counter_prefix_cache_queries[engine_idx].inc(
|
| 512 |
+
scheduler_stats.prefix_cache_stats.queries)
|
| 513 |
+
self.counter_prefix_cache_hits[engine_idx].inc(
|
| 514 |
+
scheduler_stats.prefix_cache_stats.hits)
|
| 515 |
+
|
| 516 |
+
if scheduler_stats.spec_decoding_stats is not None:
|
| 517 |
+
self.spec_decoding_prom.observe(
|
| 518 |
+
scheduler_stats.spec_decoding_stats)
|
| 519 |
+
|
| 520 |
+
if iteration_stats is None:
|
| 521 |
+
return
|
| 522 |
+
|
| 523 |
+
self.counter_num_preempted_reqs[engine_idx].inc(
|
| 524 |
+
iteration_stats.num_preempted_reqs)
|
| 525 |
+
self.counter_prompt_tokens[engine_idx].inc(
|
| 526 |
+
iteration_stats.num_prompt_tokens)
|
| 527 |
+
self.counter_generation_tokens[engine_idx].inc(
|
| 528 |
+
iteration_stats.num_generation_tokens)
|
| 529 |
+
self.histogram_iteration_tokens[engine_idx].observe(
|
| 530 |
+
iteration_stats.num_prompt_tokens + \
|
| 531 |
+
iteration_stats.num_generation_tokens)
|
| 532 |
+
|
| 533 |
+
for max_gen_tokens in iteration_stats.max_num_generation_tokens_iter:
|
| 534 |
+
self.histogram_max_num_generation_tokens_request[
|
| 535 |
+
engine_idx].observe(max_gen_tokens)
|
| 536 |
+
for n_param in iteration_stats.n_params_iter:
|
| 537 |
+
self.histogram_n_request[engine_idx].observe(n_param)
|
| 538 |
+
for ttft in iteration_stats.time_to_first_tokens_iter:
|
| 539 |
+
self.histogram_time_to_first_token[engine_idx].observe(ttft)
|
| 540 |
+
for tpot in iteration_stats.time_per_output_tokens_iter:
|
| 541 |
+
self.histogram_time_per_output_token[engine_idx].observe(tpot)
|
| 542 |
+
|
| 543 |
+
for finished_request in iteration_stats.finished_requests:
|
| 544 |
+
self.counter_request_success[
|
| 545 |
+
finished_request.finish_reason][engine_idx].inc()
|
| 546 |
+
self.histogram_e2e_time_request[engine_idx].observe(
|
| 547 |
+
finished_request.e2e_latency)
|
| 548 |
+
self.histogram_queue_time_request[engine_idx].observe(
|
| 549 |
+
finished_request.queued_time)
|
| 550 |
+
self.histogram_prefill_time_request[engine_idx].observe(
|
| 551 |
+
finished_request.prefill_time)
|
| 552 |
+
self.histogram_inference_time_request[engine_idx].observe(
|
| 553 |
+
finished_request.inference_time)
|
| 554 |
+
self.histogram_decode_time_request[engine_idx].observe(
|
| 555 |
+
finished_request.decode_time)
|
| 556 |
+
self.histogram_num_prompt_tokens_request[engine_idx].observe(
|
| 557 |
+
finished_request.num_prompt_tokens)
|
| 558 |
+
self.histogram_num_generation_tokens_request[engine_idx].observe(
|
| 559 |
+
finished_request.num_generation_tokens)
|
| 560 |
+
if finished_request.max_tokens_param:
|
| 561 |
+
self.histogram_max_tokens_request[engine_idx].observe(
|
| 562 |
+
finished_request.max_tokens_param)
|
| 563 |
+
|
| 564 |
+
if self.gauge_lora_info is not None:
|
| 565 |
+
running_lora_adapters = \
|
| 566 |
+
",".join(iteration_stats.running_lora_adapters.keys())
|
| 567 |
+
waiting_lora_adapters = \
|
| 568 |
+
",".join(iteration_stats.waiting_lora_adapters.keys())
|
| 569 |
+
lora_info_labels = {
|
| 570 |
+
self.labelname_running_lora_adapters: running_lora_adapters,
|
| 571 |
+
self.labelname_waiting_lora_adapters: waiting_lora_adapters,
|
| 572 |
+
self.labelname_max_lora: self.max_lora,
|
| 573 |
+
}
|
| 574 |
+
self.gauge_lora_info.labels(**lora_info_labels)\
|
| 575 |
+
.set_to_current_time()
|
| 576 |
+
|
| 577 |
+
def log_engine_initialized(self):
|
| 578 |
+
self.log_metrics_info("cache_config", self.vllm_config.cache_config)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
PromMetric = Union[
|
| 582 |
+
prometheus_client.Gauge,
|
| 583 |
+
prometheus_client.Counter,
|
| 584 |
+
prometheus_client.Histogram,
|
| 585 |
+
]
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def make_per_engine(metric: PromMetric, engine_idxs: list[int],
|
| 589 |
+
model_name: str) -> dict[int, PromMetric]:
|
| 590 |
+
return {idx: metric.labels(model_name, str(idx)) for idx in engine_idxs}
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def build_buckets(mantissa_lst: list[int], max_value: int) -> list[int]:
|
| 594 |
+
"""
|
| 595 |
+
Builds a list of buckets with increasing powers of 10 multiplied by
|
| 596 |
+
mantissa values until the value exceeds the specified maximum.
|
| 597 |
+
|
| 598 |
+
"""
|
| 599 |
+
exponent = 0
|
| 600 |
+
buckets: list[int] = []
|
| 601 |
+
while True:
|
| 602 |
+
for m in mantissa_lst:
|
| 603 |
+
value = m * 10**exponent
|
| 604 |
+
if value <= max_value:
|
| 605 |
+
buckets.append(value)
|
| 606 |
+
else:
|
| 607 |
+
return buckets
|
| 608 |
+
exponent += 1
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def build_1_2_5_buckets(max_value: int) -> list[int]:
|
| 612 |
+
"""
|
| 613 |
+
Example:
|
| 614 |
+
>>> build_1_2_5_buckets(100)
|
| 615 |
+
[1, 2, 5, 10, 20, 50, 100]
|
| 616 |
+
"""
|
| 617 |
+
return build_buckets([1, 2, 5], max_value)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
class StatLoggerManager:
|
| 621 |
+
"""
|
| 622 |
+
StatLoggerManager:
|
| 623 |
+
Logging happens at the level of the EngineCore (per scheduler).
|
| 624 |
+
* DP: >1 EngineCore per AsyncLLM - loggers for each EngineCore.
|
| 625 |
+
* With Local Logger, just make N copies for N EngineCores.
|
| 626 |
+
* With Prometheus, we need a single logger with N "labels"
|
| 627 |
+
|
| 628 |
+
This class abstracts away this implementation detail from
|
| 629 |
+
the AsyncLLM, allowing the AsyncLLM to just call .record()
|
| 630 |
+
and .log() to a simple interface.
|
| 631 |
+
"""
|
| 632 |
+
|
| 633 |
+
def __init__(
|
| 634 |
+
self,
|
| 635 |
+
vllm_config: VllmConfig,
|
| 636 |
+
engine_idxs: Optional[list[int]] = None,
|
| 637 |
+
custom_stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 638 |
+
):
|
| 639 |
+
self.engine_idxs = engine_idxs if engine_idxs else [0]
|
| 640 |
+
|
| 641 |
+
factories: list[StatLoggerFactory]
|
| 642 |
+
if custom_stat_loggers is not None:
|
| 643 |
+
factories = custom_stat_loggers
|
| 644 |
+
else:
|
| 645 |
+
factories = []
|
| 646 |
+
if logger.isEnabledFor(logging.INFO):
|
| 647 |
+
factories.append(LoggingStatLogger)
|
| 648 |
+
|
| 649 |
+
# engine_idx: StatLogger
|
| 650 |
+
self.per_engine_logger_dict: dict[int, list[StatLoggerBase]] = {}
|
| 651 |
+
prometheus_factory = PrometheusStatLogger
|
| 652 |
+
for engine_idx in self.engine_idxs:
|
| 653 |
+
loggers: list[StatLoggerBase] = []
|
| 654 |
+
for logger_factory in factories:
|
| 655 |
+
# If we get a custom prometheus logger, use that
|
| 656 |
+
# instead. This is typically used for the ray case.
|
| 657 |
+
if (isinstance(logger_factory, type)
|
| 658 |
+
and issubclass(logger_factory, PrometheusStatLogger)):
|
| 659 |
+
prometheus_factory = logger_factory
|
| 660 |
+
continue
|
| 661 |
+
loggers.append(logger_factory(vllm_config,
|
| 662 |
+
engine_idx)) # type: ignore
|
| 663 |
+
self.per_engine_logger_dict[engine_idx] = loggers
|
| 664 |
+
|
| 665 |
+
# For Prometheus, need to share the metrics between EngineCores.
|
| 666 |
+
# Each EngineCore's metrics are expressed as a unique label.
|
| 667 |
+
self.prometheus_logger = prometheus_factory(vllm_config, engine_idxs)
|
| 668 |
+
|
| 669 |
+
def record(
|
| 670 |
+
self,
|
| 671 |
+
scheduler_stats: Optional[SchedulerStats],
|
| 672 |
+
iteration_stats: Optional[IterationStats],
|
| 673 |
+
engine_idx: Optional[int] = None,
|
| 674 |
+
):
|
| 675 |
+
if engine_idx is None:
|
| 676 |
+
engine_idx = 0
|
| 677 |
+
|
| 678 |
+
per_engine_loggers = self.per_engine_logger_dict[engine_idx]
|
| 679 |
+
for logger in per_engine_loggers:
|
| 680 |
+
logger.record(scheduler_stats, iteration_stats, engine_idx)
|
| 681 |
+
|
| 682 |
+
self.prometheus_logger.record(scheduler_stats, iteration_stats,
|
| 683 |
+
engine_idx)
|
| 684 |
+
|
| 685 |
+
def log(self):
|
| 686 |
+
for per_engine_loggers in self.per_engine_logger_dict.values():
|
| 687 |
+
for logger in per_engine_loggers:
|
| 688 |
+
logger.log()
|
| 689 |
+
|
| 690 |
+
def log_engine_initialized(self):
|
| 691 |
+
self.prometheus_logger.log_engine_initialized()
|
| 692 |
+
|
| 693 |
+
for per_engine_loggers in self.per_engine_logger_dict.values():
|
| 694 |
+
for logger in per_engine_loggers:
|
| 695 |
+
logger.log_engine_initialized()
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/prometheus.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import tempfile
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
from prometheus_client import REGISTRY, CollectorRegistry, multiprocess
|
| 9 |
+
|
| 10 |
+
from vllm.logger import init_logger
|
| 11 |
+
|
| 12 |
+
logger = init_logger(__name__)
|
| 13 |
+
|
| 14 |
+
# Global temporary directory for prometheus multiprocessing
|
| 15 |
+
_prometheus_multiproc_dir: Optional[tempfile.TemporaryDirectory] = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def setup_multiprocess_prometheus():
|
| 19 |
+
"""Set up prometheus multiprocessing directory if not already configured.
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
global _prometheus_multiproc_dir
|
| 23 |
+
|
| 24 |
+
if "PROMETHEUS_MULTIPROC_DIR" not in os.environ:
|
| 25 |
+
# Make TemporaryDirectory for prometheus multiprocessing
|
| 26 |
+
# Note: global TemporaryDirectory will be automatically
|
| 27 |
+
# cleaned up upon exit.
|
| 28 |
+
_prometheus_multiproc_dir = tempfile.TemporaryDirectory()
|
| 29 |
+
os.environ["PROMETHEUS_MULTIPROC_DIR"] = _prometheus_multiproc_dir.name
|
| 30 |
+
logger.debug("Created PROMETHEUS_MULTIPROC_DIR at %s",
|
| 31 |
+
_prometheus_multiproc_dir.name)
|
| 32 |
+
else:
|
| 33 |
+
logger.warning("Found PROMETHEUS_MULTIPROC_DIR was set by user. "
|
| 34 |
+
"This directory must be wiped between vLLM runs or "
|
| 35 |
+
"you will find inaccurate metrics. Unset the variable "
|
| 36 |
+
"and vLLM will properly handle cleanup.")
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_prometheus_registry():
|
| 40 |
+
"""Get the appropriate prometheus registry based on multiprocessing
|
| 41 |
+
configuration.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
Registry: A prometheus registry
|
| 45 |
+
"""
|
| 46 |
+
if os.getenv("PROMETHEUS_MULTIPROC_DIR") is not None:
|
| 47 |
+
logger.debug("Using multiprocess registry for prometheus metrics")
|
| 48 |
+
registry = CollectorRegistry()
|
| 49 |
+
multiprocess.MultiProcessCollector(registry)
|
| 50 |
+
return registry
|
| 51 |
+
|
| 52 |
+
return REGISTRY
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def unregister_vllm_metrics():
|
| 56 |
+
"""Unregister any existing vLLM collectors from the prometheus registry.
|
| 57 |
+
|
| 58 |
+
This is useful for testing and CI/CD where metrics may be registered
|
| 59 |
+
multiple times across test runs.
|
| 60 |
+
|
| 61 |
+
Also, in case of multiprocess, we need to unregister the metrics from the
|
| 62 |
+
global registry.
|
| 63 |
+
"""
|
| 64 |
+
registry = REGISTRY
|
| 65 |
+
# Unregister any existing vLLM collectors
|
| 66 |
+
for collector in list(registry._collector_to_names):
|
| 67 |
+
if hasattr(collector, "_name") and "vllm" in collector._name:
|
| 68 |
+
registry.unregister(collector)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def shutdown_prometheus():
|
| 72 |
+
"""Shutdown prometheus metrics."""
|
| 73 |
+
|
| 74 |
+
path = _prometheus_multiproc_dir
|
| 75 |
+
if path is None:
|
| 76 |
+
return
|
| 77 |
+
try:
|
| 78 |
+
pid = os.getpid()
|
| 79 |
+
multiprocess.mark_process_dead(pid, path)
|
| 80 |
+
logger.debug("Marked Prometheus metrics for process %d as dead", pid)
|
| 81 |
+
except Exception as e:
|
| 82 |
+
logger.error("Error during metrics cleanup: %s", str(e))
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/ray_wrappers.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import time
|
| 4 |
+
from typing import Optional, Union
|
| 5 |
+
|
| 6 |
+
from vllm.v1.metrics.loggers import PrometheusStatLogger
|
| 7 |
+
from vllm.v1.spec_decode.metrics import SpecDecodingProm
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from ray.util import metrics as ray_metrics
|
| 11 |
+
from ray.util.metrics import Metric
|
| 12 |
+
except ImportError:
|
| 13 |
+
ray_metrics = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class RayPrometheusMetric:
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
if ray_metrics is None:
|
| 20 |
+
raise ImportError(
|
| 21 |
+
"RayPrometheusMetric requires Ray to be installed.")
|
| 22 |
+
|
| 23 |
+
self.metric: Metric = None
|
| 24 |
+
|
| 25 |
+
def labels(self, *labels, **labelskwargs):
|
| 26 |
+
if labelskwargs:
|
| 27 |
+
for k, v in labelskwargs.items():
|
| 28 |
+
if not isinstance(v, str):
|
| 29 |
+
labelskwargs[k] = str(v)
|
| 30 |
+
|
| 31 |
+
self.metric.set_default_tags(labelskwargs)
|
| 32 |
+
|
| 33 |
+
if labels:
|
| 34 |
+
if len(labels) != len(self.metric._tag_keys):
|
| 35 |
+
raise ValueError(
|
| 36 |
+
"Number of labels must match the number of tag keys. "
|
| 37 |
+
f"Expected {len(self.metric._tag_keys)}, got {len(labels)}"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
self.metric.set_default_tags(
|
| 41 |
+
dict(zip(self.metric._tag_keys, labels)))
|
| 42 |
+
|
| 43 |
+
return self
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class RayGaugeWrapper(RayPrometheusMetric):
|
| 47 |
+
"""Wraps around ray.util.metrics.Gauge to provide same API as
|
| 48 |
+
prometheus_client.Gauge"""
|
| 49 |
+
|
| 50 |
+
def __init__(self,
|
| 51 |
+
name: str,
|
| 52 |
+
documentation: Optional[str] = "",
|
| 53 |
+
labelnames: Optional[list[str]] = None,
|
| 54 |
+
multiprocess_mode: Optional[str] = ""):
|
| 55 |
+
|
| 56 |
+
# All Ray metrics are keyed by WorkerId, so multiprocess modes like
|
| 57 |
+
# "mostrecent", "all", "sum" do not apply. This logic can be manually
|
| 58 |
+
# implemented at the observability layer (Prometheus/Grafana).
|
| 59 |
+
del multiprocess_mode
|
| 60 |
+
labelnames_tuple = tuple(labelnames) if labelnames else None
|
| 61 |
+
self.metric = ray_metrics.Gauge(name=name,
|
| 62 |
+
description=documentation,
|
| 63 |
+
tag_keys=labelnames_tuple)
|
| 64 |
+
|
| 65 |
+
def set(self, value: Union[int, float]):
|
| 66 |
+
return self.metric.set(value)
|
| 67 |
+
|
| 68 |
+
def set_to_current_time(self):
|
| 69 |
+
# ray metrics doesn't have set_to_current time, https://docs.ray.io/en/latest/_modules/ray/util/metrics.html
|
| 70 |
+
return self.metric.set(time.time())
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class RayCounterWrapper(RayPrometheusMetric):
|
| 74 |
+
"""Wraps around ray.util.metrics.Counter to provide same API as
|
| 75 |
+
prometheus_client.Counter"""
|
| 76 |
+
|
| 77 |
+
def __init__(self,
|
| 78 |
+
name: str,
|
| 79 |
+
documentation: Optional[str] = "",
|
| 80 |
+
labelnames: Optional[list[str]] = None):
|
| 81 |
+
labelnames_tuple = tuple(labelnames) if labelnames else None
|
| 82 |
+
self.metric = ray_metrics.Counter(name=name,
|
| 83 |
+
description=documentation,
|
| 84 |
+
tag_keys=labelnames_tuple)
|
| 85 |
+
|
| 86 |
+
def inc(self, value: Union[int, float] = 1.0):
|
| 87 |
+
if value == 0:
|
| 88 |
+
return
|
| 89 |
+
return self.metric.inc(value)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class RayHistogramWrapper(RayPrometheusMetric):
|
| 93 |
+
"""Wraps around ray.util.metrics.Histogram to provide same API as
|
| 94 |
+
prometheus_client.Histogram"""
|
| 95 |
+
|
| 96 |
+
def __init__(self,
|
| 97 |
+
name: str,
|
| 98 |
+
documentation: Optional[str] = "",
|
| 99 |
+
labelnames: Optional[list[str]] = None,
|
| 100 |
+
buckets: Optional[list[float]] = None):
|
| 101 |
+
labelnames_tuple = tuple(labelnames) if labelnames else None
|
| 102 |
+
boundaries = buckets if buckets else []
|
| 103 |
+
self.metric = ray_metrics.Histogram(name=name,
|
| 104 |
+
description=documentation,
|
| 105 |
+
tag_keys=labelnames_tuple,
|
| 106 |
+
boundaries=boundaries)
|
| 107 |
+
|
| 108 |
+
def observe(self, value: Union[int, float]):
|
| 109 |
+
return self.metric.observe(value)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class RaySpecDecodingProm(SpecDecodingProm):
|
| 113 |
+
"""
|
| 114 |
+
RaySpecDecodingProm is used by RayMetrics to log to Ray metrics.
|
| 115 |
+
Provides the same metrics as SpecDecodingProm but uses Ray's
|
| 116 |
+
util.metrics library.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
_counter_cls = RayCounterWrapper
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class RayPrometheusStatLogger(PrometheusStatLogger):
|
| 123 |
+
"""RayPrometheusStatLogger uses Ray metrics instead."""
|
| 124 |
+
|
| 125 |
+
_gauge_cls = RayGaugeWrapper
|
| 126 |
+
_counter_cls = RayCounterWrapper
|
| 127 |
+
_histogram_cls = RayHistogramWrapper
|
| 128 |
+
_spec_decoding_cls = RaySpecDecodingProm
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
def _unregister_vllm_metrics():
|
| 132 |
+
# No-op on purpose
|
| 133 |
+
pass
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/reader.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from prometheus_client import REGISTRY
|
| 8 |
+
from prometheus_client import Metric as PromMetric
|
| 9 |
+
from prometheus_client.samples import Sample
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass
|
| 13 |
+
class Metric:
|
| 14 |
+
"""A base class for prometheus metrics.
|
| 15 |
+
|
| 16 |
+
Each metric may be associated with key=value labels, and
|
| 17 |
+
in some cases a single vLLM instance may have multiple
|
| 18 |
+
metrics with the same name but different sets of labels.
|
| 19 |
+
"""
|
| 20 |
+
name: str
|
| 21 |
+
labels: dict[str, str]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class Counter(Metric):
|
| 26 |
+
"""A monotonically increasing integer counter."""
|
| 27 |
+
value: int
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class Vector(Metric):
|
| 32 |
+
"""An ordered array of integer counters.
|
| 33 |
+
|
| 34 |
+
This type - which doesn't exist in Prometheus - models one very
|
| 35 |
+
specific metric, vllm:spec_decode_num_accepted_tokens_per_pos.
|
| 36 |
+
"""
|
| 37 |
+
values: list[int]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@dataclass
|
| 41 |
+
class Gauge(Metric):
|
| 42 |
+
"""A numerical value that can go up or down."""
|
| 43 |
+
value: float
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclass
|
| 47 |
+
class Histogram(Metric):
|
| 48 |
+
"""Observations recorded in configurable buckets.
|
| 49 |
+
|
| 50 |
+
Buckets are represented by a dictionary. The key is
|
| 51 |
+
the upper limit of the bucket, and the value is the
|
| 52 |
+
observed count in that bucket. A '+Inf' key always
|
| 53 |
+
exists.
|
| 54 |
+
|
| 55 |
+
The count property is the total count across all
|
| 56 |
+
buckets, identical to the count of the '+Inf' bucket.
|
| 57 |
+
|
| 58 |
+
The sum property is the total sum of all observed
|
| 59 |
+
values.
|
| 60 |
+
"""
|
| 61 |
+
count: int
|
| 62 |
+
sum: float
|
| 63 |
+
buckets: dict[str, int]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_metrics_snapshot() -> list[Metric]:
|
| 67 |
+
"""An API for accessing in-memory Prometheus metrics.
|
| 68 |
+
|
| 69 |
+
Example:
|
| 70 |
+
>>> for metric in llm.get_metrics():
|
| 71 |
+
... if isinstance(metric, Counter):
|
| 72 |
+
... print(f"{metric} = {metric.value}")
|
| 73 |
+
... elif isinstance(metric, Gauge):
|
| 74 |
+
... print(f"{metric} = {metric.value}")
|
| 75 |
+
... elif isinstance(metric, Histogram):
|
| 76 |
+
... print(f"{metric}")
|
| 77 |
+
... print(f" sum = {metric.sum}")
|
| 78 |
+
... print(f" count = {metric.count}")
|
| 79 |
+
... for bucket_le, value in metrics.buckets.items():
|
| 80 |
+
... print(f" {bucket_le} = {value}")
|
| 81 |
+
"""
|
| 82 |
+
collected: list[Metric] = []
|
| 83 |
+
for metric in REGISTRY.collect():
|
| 84 |
+
if not metric.name.startswith("vllm:"):
|
| 85 |
+
continue
|
| 86 |
+
if metric.type == "gauge":
|
| 87 |
+
samples = _get_samples(metric)
|
| 88 |
+
for s in samples:
|
| 89 |
+
collected.append(
|
| 90 |
+
Gauge(name=metric.name, labels=s.labels, value=s.value))
|
| 91 |
+
elif metric.type == "counter":
|
| 92 |
+
samples = _get_samples(metric, "_total")
|
| 93 |
+
if metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos":
|
| 94 |
+
#
|
| 95 |
+
# Ugly vllm:num_accepted_tokens_per_pos special case.
|
| 96 |
+
#
|
| 97 |
+
# This metric is a vector of counters - for each spec
|
| 98 |
+
# decoding token position, we observe the number of
|
| 99 |
+
# accepted tokens using a Counter labeled with 'position'.
|
| 100 |
+
# We convert these into a vector of integer values.
|
| 101 |
+
#
|
| 102 |
+
for labels, values in _digest_num_accepted_by_pos_samples(
|
| 103 |
+
samples):
|
| 104 |
+
collected.append(
|
| 105 |
+
Vector(name=metric.name, labels=labels, values=values))
|
| 106 |
+
else:
|
| 107 |
+
for s in samples:
|
| 108 |
+
collected.append(
|
| 109 |
+
Counter(name=metric.name,
|
| 110 |
+
labels=s.labels,
|
| 111 |
+
value=int(s.value)))
|
| 112 |
+
|
| 113 |
+
elif metric.type == "histogram":
|
| 114 |
+
#
|
| 115 |
+
# A histogram has a number of '_bucket' samples where
|
| 116 |
+
# the 'le' label represents the upper limit of the bucket.
|
| 117 |
+
# We convert these bucketized values into a dict of values
|
| 118 |
+
# indexed by the value of the 'le' label. The 'le=+Inf'
|
| 119 |
+
# label is a special case, catching all values observed.
|
| 120 |
+
#
|
| 121 |
+
bucket_samples = _get_samples(metric, "_bucket")
|
| 122 |
+
count_samples = _get_samples(metric, "_count")
|
| 123 |
+
sum_samples = _get_samples(metric, "_sum")
|
| 124 |
+
for labels, buckets, count_value, sum_value in _digest_histogram(
|
| 125 |
+
bucket_samples, count_samples, sum_samples):
|
| 126 |
+
collected.append(
|
| 127 |
+
Histogram(name=metric.name,
|
| 128 |
+
labels=labels,
|
| 129 |
+
buckets=buckets,
|
| 130 |
+
count=count_value,
|
| 131 |
+
sum=sum_value))
|
| 132 |
+
else:
|
| 133 |
+
raise AssertionError(f"Unknown metric type {metric.type}")
|
| 134 |
+
|
| 135 |
+
return collected
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _get_samples(metric: PromMetric,
|
| 139 |
+
suffix: Optional[str] = None) -> list[Sample]:
|
| 140 |
+
name = (metric.name + suffix) if suffix is not None else metric.name
|
| 141 |
+
return [s for s in metric.samples if s.name == name]
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def _strip_label(labels: dict[str, str], key_to_remove: str) -> dict[str, str]:
|
| 145 |
+
labels_copy = labels.copy()
|
| 146 |
+
labels_copy.pop(key_to_remove)
|
| 147 |
+
return labels_copy
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _digest_histogram(
|
| 151 |
+
bucket_samples: list[Sample], count_samples: list[Sample],
|
| 152 |
+
sum_samples: list[Sample]
|
| 153 |
+
) -> list[tuple[dict[str, str], dict[str, int], int, float]]:
|
| 154 |
+
#
|
| 155 |
+
# In the case of DP, we have an indigestable
|
| 156 |
+
# per-bucket-per-engine count as a list of labelled
|
| 157 |
+
# samples, along with total and sum samples
|
| 158 |
+
#
|
| 159 |
+
# bucket_samples (in):
|
| 160 |
+
# labels = {bucket: 100, idx: 0}, value = 2
|
| 161 |
+
# labels = {bucket: 200, idx: 0}, value = 4
|
| 162 |
+
# labels = {bucket: Inf, idx: 0}, value = 10
|
| 163 |
+
# labels = {bucket: 100, idx: 1}, value = 1
|
| 164 |
+
# labels = {bucket: 200, idx: 2}, value = 5
|
| 165 |
+
# labels = {bucket: Inf, idx: 3}, value = 7
|
| 166 |
+
# count_samples (in):
|
| 167 |
+
# labels = {idx: 0}, value = 10
|
| 168 |
+
# labels = {idx: 1}, value = 7
|
| 169 |
+
# sum_samples (in):
|
| 170 |
+
# labels = {idx: 0}, value = 2000
|
| 171 |
+
# labels = {idx: 1}, value = 1200
|
| 172 |
+
#
|
| 173 |
+
# output: [
|
| 174 |
+
# {idx: 0}, {"100": 2, "200": 4, "Inf": 10}, 10, 2000
|
| 175 |
+
# {idx: 1}, {"100": 1, "200": 5, "Inf": 7}, 7, 1200
|
| 176 |
+
# ]
|
| 177 |
+
buckets_by_labels: dict[frozenset[tuple[str, str]], dict[str, int]] = {}
|
| 178 |
+
for s in bucket_samples:
|
| 179 |
+
bucket = s.labels["le"]
|
| 180 |
+
labels_key = frozenset(_strip_label(s.labels, "le").items())
|
| 181 |
+
if labels_key not in buckets_by_labels:
|
| 182 |
+
buckets_by_labels[labels_key] = {}
|
| 183 |
+
buckets_by_labels[labels_key][bucket] = int(s.value)
|
| 184 |
+
|
| 185 |
+
counts_by_labels: dict[frozenset[tuple[str, str]], int] = {}
|
| 186 |
+
for s in count_samples:
|
| 187 |
+
labels_key = frozenset(s.labels.items())
|
| 188 |
+
counts_by_labels[labels_key] = int(s.value)
|
| 189 |
+
|
| 190 |
+
sums_by_labels: dict[frozenset[tuple[str, str]], float] = {}
|
| 191 |
+
for s in sum_samples:
|
| 192 |
+
labels_key = frozenset(s.labels.items())
|
| 193 |
+
sums_by_labels[labels_key] = s.value
|
| 194 |
+
|
| 195 |
+
assert set(buckets_by_labels.keys()) == set(
|
| 196 |
+
counts_by_labels.keys()) == set(sums_by_labels.keys())
|
| 197 |
+
|
| 198 |
+
output = []
|
| 199 |
+
label_keys = list(buckets_by_labels.keys())
|
| 200 |
+
for k in label_keys:
|
| 201 |
+
labels = dict(k)
|
| 202 |
+
output.append((labels, buckets_by_labels[k], counts_by_labels[k],
|
| 203 |
+
sums_by_labels[k]))
|
| 204 |
+
return output
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _digest_num_accepted_by_pos_samples(
|
| 208 |
+
samples: list[Sample]) -> list[tuple[dict[str, str], list[int]]]:
|
| 209 |
+
#
|
| 210 |
+
# In the case of DP, we have an indigestable
|
| 211 |
+
# per-position-per-engine count as a list of
|
| 212 |
+
# labelled samples
|
| 213 |
+
#
|
| 214 |
+
# samples (in):
|
| 215 |
+
# labels = {pos: 0, idx: 0}, value = 10
|
| 216 |
+
# labels = {pos: 1, idx: 0}, value = 7
|
| 217 |
+
# labels = {pos: 2, idx: 0}, value = 2
|
| 218 |
+
# labels = {pos: 0, idx: 1}, value = 5
|
| 219 |
+
# labels = {pos: 1, idx: 1}, value = 3
|
| 220 |
+
# labels = {pos: 2, idx: 1}, value = 1
|
| 221 |
+
#
|
| 222 |
+
# output: [
|
| 223 |
+
# {idx: 0}, [10, 7, 2]
|
| 224 |
+
# {idx: 1}, [5, 3, 1]
|
| 225 |
+
# ]
|
| 226 |
+
#
|
| 227 |
+
max_pos = 0
|
| 228 |
+
values_by_labels: dict[frozenset[tuple[str, str]], dict[int, int]] = {}
|
| 229 |
+
|
| 230 |
+
for s in samples:
|
| 231 |
+
position = int(s.labels["position"])
|
| 232 |
+
max_pos = max(max_pos, position)
|
| 233 |
+
|
| 234 |
+
labels_key = frozenset(_strip_label(s.labels, "position").items())
|
| 235 |
+
if labels_key not in values_by_labels:
|
| 236 |
+
values_by_labels[labels_key] = {}
|
| 237 |
+
values_by_labels[labels_key][position] = int(s.value)
|
| 238 |
+
|
| 239 |
+
output = []
|
| 240 |
+
for labels_key, values_by_position in values_by_labels.items():
|
| 241 |
+
labels = dict(labels_key)
|
| 242 |
+
values = [0] * (max_pos + 1)
|
| 243 |
+
for pos, val in values_by_position.items():
|
| 244 |
+
values[pos] = val
|
| 245 |
+
output.append((labels, values))
|
| 246 |
+
return output
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/stats.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import time
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import TYPE_CHECKING, Optional
|
| 7 |
+
|
| 8 |
+
from vllm.v1.spec_decode.metrics import SpecDecodingStats
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from vllm.v1.engine import EngineCoreEvent, EngineCoreOutput, FinishReason
|
| 12 |
+
from vllm.v1.engine.output_processor import RequestState
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class PrefixCacheStats:
|
| 17 |
+
"""Stores prefix cache hit statistics."""
|
| 18 |
+
# Whether reset_prefix_cache was invoked.
|
| 19 |
+
reset: bool = False
|
| 20 |
+
# The number of requests in this update.
|
| 21 |
+
requests: int = 0
|
| 22 |
+
# The number of queries in these requests. Note that "queries" here
|
| 23 |
+
# means the number of tokens that were queried from the cache.
|
| 24 |
+
queries: int = 0
|
| 25 |
+
# The number of hits in these requests.
|
| 26 |
+
hits: int = 0
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class SchedulerStats:
|
| 31 |
+
"""Stats associated with the scheduler."""
|
| 32 |
+
|
| 33 |
+
num_running_reqs: int = 0
|
| 34 |
+
num_waiting_reqs: int = 0
|
| 35 |
+
|
| 36 |
+
# These are used for internal DP load-balancing.
|
| 37 |
+
step_counter: int = 0
|
| 38 |
+
current_wave: int = 0
|
| 39 |
+
|
| 40 |
+
kv_cache_usage: float = 0.0
|
| 41 |
+
|
| 42 |
+
prefix_cache_stats: PrefixCacheStats = field(
|
| 43 |
+
default_factory=PrefixCacheStats)
|
| 44 |
+
|
| 45 |
+
spec_decoding_stats: Optional[SpecDecodingStats] = None
|
| 46 |
+
|
| 47 |
+
num_corrupted_reqs: int = 0
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@dataclass
|
| 51 |
+
class LoRAStats:
|
| 52 |
+
waiting_requests: set[str] = field(default_factory=set)
|
| 53 |
+
running_requests: set[str] = field(default_factory=set)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclass
|
| 57 |
+
class RequestStateStats:
|
| 58 |
+
"""Stats that need to be tracked across delta updates."""
|
| 59 |
+
|
| 60 |
+
num_generation_tokens: int = 0
|
| 61 |
+
|
| 62 |
+
# This is a engine frontend timestamp (wall-clock)
|
| 63 |
+
arrival_time: float = 0.0
|
| 64 |
+
|
| 65 |
+
# These are engine core timestamps (monotonic)
|
| 66 |
+
queued_ts: float = 0.0
|
| 67 |
+
scheduled_ts: float = 0.0
|
| 68 |
+
first_token_ts: float = 0.0
|
| 69 |
+
last_token_ts: float = 0.0
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class FinishedRequestStats:
|
| 74 |
+
"""Stats associated with a finished request."""
|
| 75 |
+
|
| 76 |
+
finish_reason: "FinishReason"
|
| 77 |
+
e2e_latency: float = 0.0
|
| 78 |
+
num_prompt_tokens: int = 0
|
| 79 |
+
num_generation_tokens: int = 0
|
| 80 |
+
max_tokens_param: Optional[int] = None
|
| 81 |
+
queued_time: float = 0.0
|
| 82 |
+
prefill_time: float = 0.0
|
| 83 |
+
inference_time: float = 0.0
|
| 84 |
+
decode_time: float = 0.0
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class IterationStats:
|
| 88 |
+
"""Stats associated with a single set of EngineCoreOutputs."""
|
| 89 |
+
|
| 90 |
+
def __init__(self):
|
| 91 |
+
self.iteration_timestamp = time.time()
|
| 92 |
+
self.num_generation_tokens = 0
|
| 93 |
+
self.num_prompt_tokens = 0
|
| 94 |
+
self.num_preempted_reqs = 0
|
| 95 |
+
self.finished_requests: list[FinishedRequestStats] = []
|
| 96 |
+
self.max_num_generation_tokens_iter: list[int] = []
|
| 97 |
+
self.n_params_iter: list[int] = []
|
| 98 |
+
self.time_to_first_tokens_iter: list[float] = []
|
| 99 |
+
self.time_per_output_tokens_iter: list[float] = []
|
| 100 |
+
self.waiting_lora_adapters: dict[str, int] = {}
|
| 101 |
+
self.running_lora_adapters: dict[str, int] = {}
|
| 102 |
+
|
| 103 |
+
def _time_since(self, start: float) -> float:
|
| 104 |
+
"""Calculate an interval relative to this iteration's timestamp."""
|
| 105 |
+
return self.iteration_timestamp - start
|
| 106 |
+
|
| 107 |
+
def update_from_output(self, output: "EngineCoreOutput",
|
| 108 |
+
engine_core_timestamp: float, is_prefilling: bool,
|
| 109 |
+
prompt_len: int, req_stats: RequestStateStats,
|
| 110 |
+
lora_stats: Optional[LoRAStats]):
|
| 111 |
+
num_new_generation_tokens = len(output.new_token_ids)
|
| 112 |
+
|
| 113 |
+
self.num_generation_tokens += num_new_generation_tokens
|
| 114 |
+
if is_prefilling:
|
| 115 |
+
self.num_prompt_tokens += prompt_len
|
| 116 |
+
|
| 117 |
+
first_token_latency = self._time_since(req_stats.arrival_time)
|
| 118 |
+
self.time_to_first_tokens_iter.append(first_token_latency)
|
| 119 |
+
|
| 120 |
+
req_stats.num_generation_tokens += num_new_generation_tokens
|
| 121 |
+
|
| 122 |
+
# Process request-level engine core events
|
| 123 |
+
if output.events is not None:
|
| 124 |
+
self.update_from_events(output.request_id, output.events,
|
| 125 |
+
is_prefilling, req_stats, lora_stats)
|
| 126 |
+
|
| 127 |
+
# Process the batch-level "new tokens" engine core event
|
| 128 |
+
if is_prefilling:
|
| 129 |
+
req_stats.first_token_ts = engine_core_timestamp
|
| 130 |
+
else:
|
| 131 |
+
tpot = engine_core_timestamp - req_stats.last_token_ts
|
| 132 |
+
self.time_per_output_tokens_iter.append(tpot)
|
| 133 |
+
|
| 134 |
+
req_stats.last_token_ts = engine_core_timestamp
|
| 135 |
+
|
| 136 |
+
def update_from_events(self, req_id: str, events: list["EngineCoreEvent"],
|
| 137 |
+
is_prefilling: bool, req_stats: RequestStateStats,
|
| 138 |
+
lora_stats: Optional[LoRAStats]):
|
| 139 |
+
# Avoid circular dependency
|
| 140 |
+
from vllm.v1.engine import EngineCoreEventType
|
| 141 |
+
for event in events:
|
| 142 |
+
if event.type == EngineCoreEventType.QUEUED:
|
| 143 |
+
req_stats.queued_ts = event.timestamp
|
| 144 |
+
if lora_stats is not None:
|
| 145 |
+
lora_stats.waiting_requests.add(req_id)
|
| 146 |
+
elif event.type == EngineCoreEventType.SCHEDULED:
|
| 147 |
+
if req_stats.scheduled_ts == 0.0: # ignore preemptions
|
| 148 |
+
req_stats.scheduled_ts = event.timestamp
|
| 149 |
+
LoRARequestStates.scheduled_request(lora_stats, req_id)
|
| 150 |
+
elif event.type == EngineCoreEventType.PREEMPTED:
|
| 151 |
+
self.num_preempted_reqs += 1
|
| 152 |
+
LoRARequestStates.preempted_request(lora_stats, req_id)
|
| 153 |
+
|
| 154 |
+
def update_from_finished_request(self, finish_reason: "FinishReason",
|
| 155 |
+
num_prompt_tokens: int,
|
| 156 |
+
max_tokens_param: Optional[int],
|
| 157 |
+
req_stats: RequestStateStats):
|
| 158 |
+
e2e_latency = self._time_since(req_stats.arrival_time)
|
| 159 |
+
|
| 160 |
+
# Queued interval is from first QUEUED event to first SCHEDULED
|
| 161 |
+
queued_time = req_stats.scheduled_ts - req_stats.queued_ts
|
| 162 |
+
|
| 163 |
+
# Prefill interval is from first SCHEDULED to first NEW_TOKEN
|
| 164 |
+
# Any preemptions during prefill is included in the interval
|
| 165 |
+
prefill_time = req_stats.first_token_ts - req_stats.scheduled_ts
|
| 166 |
+
|
| 167 |
+
# Decode interval is from first NEW_TOKEN to last NEW_TOKEN
|
| 168 |
+
# Any preemptions during decode are included
|
| 169 |
+
decode_time = req_stats.last_token_ts - req_stats.first_token_ts
|
| 170 |
+
|
| 171 |
+
# Inference interval is from first SCHEDULED to last NEW_TOKEN
|
| 172 |
+
# Any preemptions during prefill or decode are included
|
| 173 |
+
inference_time = req_stats.last_token_ts - req_stats.scheduled_ts
|
| 174 |
+
|
| 175 |
+
finished_req = \
|
| 176 |
+
FinishedRequestStats(finish_reason=finish_reason,
|
| 177 |
+
e2e_latency=e2e_latency,
|
| 178 |
+
num_prompt_tokens=num_prompt_tokens,
|
| 179 |
+
num_generation_tokens=req_stats.num_generation_tokens,
|
| 180 |
+
max_tokens_param=max_tokens_param,
|
| 181 |
+
queued_time=queued_time,
|
| 182 |
+
prefill_time=prefill_time,
|
| 183 |
+
inference_time=inference_time,
|
| 184 |
+
decode_time=decode_time)
|
| 185 |
+
self.finished_requests.append(finished_req)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class LoRARequestStates:
|
| 189 |
+
"""Per-LoRA request state stats."""
|
| 190 |
+
|
| 191 |
+
def __init__(self):
|
| 192 |
+
self.lora_name_to_stats: dict[str, LoRAStats] = {}
|
| 193 |
+
|
| 194 |
+
def get_stats(self, req_state: 'RequestState') -> Optional[LoRAStats]:
|
| 195 |
+
if req_state.lora_name is None:
|
| 196 |
+
return None
|
| 197 |
+
if req_state.lora_name not in self.lora_name_to_stats:
|
| 198 |
+
self.lora_name_to_stats[req_state.lora_name] = LoRAStats()
|
| 199 |
+
return self.lora_name_to_stats[req_state.lora_name]
|
| 200 |
+
|
| 201 |
+
def add_request(self, req_state: 'RequestState'):
|
| 202 |
+
if (lora_stats := self.get_stats(req_state)) is not None:
|
| 203 |
+
lora_stats.waiting_requests.add(req_state.request_id)
|
| 204 |
+
|
| 205 |
+
def finish_request(self, req_state: 'RequestState'):
|
| 206 |
+
if req_state.lora_name is None:
|
| 207 |
+
return
|
| 208 |
+
lora_stats = self.lora_name_to_stats[req_state.lora_name]
|
| 209 |
+
lora_stats.running_requests.remove(req_state.request_id)
|
| 210 |
+
|
| 211 |
+
def abort_request(self, req_state: 'RequestState'):
|
| 212 |
+
if req_state.lora_name is None:
|
| 213 |
+
return
|
| 214 |
+
lora_stats = self.lora_name_to_stats[req_state.lora_name]
|
| 215 |
+
lora_stats.waiting_requests.discard(req_state.request_id)
|
| 216 |
+
lora_stats.running_requests.discard(req_state.request_id)
|
| 217 |
+
|
| 218 |
+
# Break the pattern for this lifecycle methods so we can
|
| 219 |
+
# call this from IterationStats.update_from_events()
|
| 220 |
+
@staticmethod
|
| 221 |
+
def scheduled_request(lora_stats: Optional[LoRAStats], request_id: str):
|
| 222 |
+
if lora_stats is None:
|
| 223 |
+
return
|
| 224 |
+
lora_stats.waiting_requests.remove(request_id)
|
| 225 |
+
lora_stats.running_requests.add(request_id)
|
| 226 |
+
|
| 227 |
+
@staticmethod
|
| 228 |
+
def preempted_request(lora_stats: Optional[LoRAStats], request_id: str):
|
| 229 |
+
if lora_stats is None:
|
| 230 |
+
return
|
| 231 |
+
lora_stats.running_requests.remove(request_id)
|
| 232 |
+
lora_stats.waiting_requests.add(request_id)
|
| 233 |
+
|
| 234 |
+
def update_iteration_stats(self,
|
| 235 |
+
iteration_stats: Optional[IterationStats]):
|
| 236 |
+
if iteration_stats is None:
|
| 237 |
+
return
|
| 238 |
+
for lora_name, stats in self.lora_name_to_stats.items():
|
| 239 |
+
if stats.waiting_requests:
|
| 240 |
+
iteration_stats.waiting_lora_adapters[lora_name] = \
|
| 241 |
+
len(stats.waiting_requests)
|
| 242 |
+
if stats.running_requests:
|
| 243 |
+
iteration_stats.running_lora_adapters[lora_name] = \
|
| 244 |
+
len(stats.running_requests)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/metadata.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from vllm.pooling_params import PoolingParams
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class PoolingMetadata:
|
| 13 |
+
"""Tensors for pooling."""
|
| 14 |
+
|
| 15 |
+
prompt_lens: torch.Tensor
|
| 16 |
+
prompt_token_ids: Optional[torch.Tensor]
|
| 17 |
+
pooling_params: list[PoolingParams]
|
| 18 |
+
|
| 19 |
+
def __getitem__(self, indices: slice):
|
| 20 |
+
return PoolingMetadata(
|
| 21 |
+
prompt_lens=self.prompt_lens[indices],
|
| 22 |
+
prompt_token_ids=None if self.prompt_token_ids is None else
|
| 23 |
+
self.prompt_token_ids[indices],
|
| 24 |
+
pooling_params=self.pooling_params[indices],
|
| 25 |
+
)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__init__.py
ADDED
|
File without changes
|