repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/qk_norm_rope_fusion.py
vllm/compilation/qk_norm_rope_fusion.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Callable import torch import torch._inductor.pattern_matcher as pm from torch import fx from torch._higher_order_ops.auto_functionalize import auto_functionalized from torch._inductor.pattern_matcher import PatternMatcherPass from vllm.attention.layer import Attention from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.logger import init_logger from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding from .fusion import empty_bf16, empty_fp32, empty_i64 from .inductor_pass import enable_fake_mode from .matcher_utils import MatcherRMSNorm, MatcherRotaryEmbedding from .vllm_inductor_pass import VllmInductorPass, VllmPatternMatcherPass logger = init_logger(__name__) FUSED_QK_ROPE_OP = torch.ops._C.fused_qk_norm_rope.default class QkNormRopePattern: """ Match the unfused sequence in attention blocks and replace with the fused op. Unfused (conceptually): q, k, v = split(qkv, [qsz, kvsz, kvsz], -1) qh = reshape(q, [-1, num_heads, head_dim]) kh = reshape(k, [-1, num_kv_heads, head_dim]) qn = rms_norm(qh, q_weight, eps) kn = rms_norm(kh, k_weight, eps) qf = reshape(qn, [-1, num_heads * head_dim]) kf = reshape(kn, [-1, num_kv_heads * head_dim]) qf, kf = rotary_embedding(positions, qf, kf, head_dim, cos_sin_cache, is_neox) return qf, kf, v Fused replacement: fused_qk_norm_rope(qkv, num_heads, num_kv_heads, num_kv_heads, head_dim, eps, q_weight, k_weight, cos_sin_cache, is_neox, positions.view(-1)) return split(qkv, [qsz, kvsz, kvsz], -1) """ def __init__( self, head_dim: int, num_heads: int, num_kv_heads: int, eps: float, is_neox: bool, rope_flashinfer: bool = False, ) -> None: self.num_heads = num_heads self.num_kv_heads = num_kv_heads self.head_dim = head_dim self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.eps = eps self.rmsnorm_matcher = MatcherRMSNorm(eps) self.is_neox = is_neox self.rope_flashinfer = rope_flashinfer self.rope_matcher = MatcherRotaryEmbedding( is_neox=is_neox, head_size=self.head_dim, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, use_flashinfer=self.rope_flashinfer, ) def get_inputs(self): # Sample inputs to help pattern tracing T = 5 qkv = empty_bf16(T, self.q_size + 2 * self.kv_size) positions = empty_i64(T) q_weight = empty_bf16(1, self.head_dim) k_weight = empty_bf16(1, self.head_dim) if self.rope_flashinfer: cos_sin_cache = empty_fp32(4096, self.head_dim) else: cos_sin_cache = empty_bf16(4096, self.head_dim) return [ qkv, positions, q_weight, k_weight, cos_sin_cache, ] @staticmethod def wrap_trace_fn(trace_fn, *process_fx_fns: Callable[[fx.GraphModule], None]): def wrapped(*args, **kwargs): gm = trace_fn(*args, **kwargs) for process_fx in process_fx_fns: process_fx(gm) return gm return wrapped @staticmethod def fx_view_to_reshape(gm: torch.fx.GraphModule): from torch._inductor.fx_passes.post_grad import view_to_reshape view_to_reshape(gm) def register(self, pm_pass: PatternMatcherPass): def pattern( qkv: torch.Tensor, positions: torch.Tensor, q_weight: torch.Tensor, k_weight: torch.Tensor, cos_sin_cache: torch.Tensor, ): # split qkv -> q,k,v q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) # Q path: view -> RMS -> view back to q.shape q_by_head = q.view( *q.shape[:-1], q.shape[-1] // self.head_dim, self.head_dim ) q_normed_by_head = self.rmsnorm_matcher(q_by_head, q_weight) q_flat = q_normed_by_head.view(q.shape) # K path: view -> RMS -> view back to k.shape k_by_head = k.view( *k.shape[:-1], k.shape[-1] // self.head_dim, self.head_dim ) k_normed_by_head = self.rmsnorm_matcher(k_by_head, k_weight) k_flat = k_normed_by_head.view(k.shape) # RoPE: apply to flattened q/k q_rope, k_rope = self.rope_matcher(positions, q_flat, k_flat, cos_sin_cache) return q_rope, k_rope, v def replacement( qkv: torch.Tensor, positions: torch.Tensor, q_weight: torch.Tensor, k_weight: torch.Tensor, cos_sin_cache: torch.Tensor, ): # Run fused qk_norm_rope op result = auto_functionalized( FUSED_QK_ROPE_OP, qkv=qkv, num_heads_q=self.num_heads, num_heads_k=self.num_kv_heads, num_heads_v=self.num_kv_heads, head_dim=self.head_dim, eps=self.eps, q_weight=q_weight, k_weight=k_weight, cos_sin_cache=cos_sin_cache, is_neox=self.is_neox, position_ids=positions.view(-1), ) result_qkv = result[1] # Split back to q,k,v and return return result_qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) # NOTE: use fx_view_to_reshape to unify view/reshape to simplify # pattern and increase matching opportunities pm.register_replacement( pattern, replacement, self.get_inputs(), QkNormRopePattern.wrap_trace_fn( pm.fwd_only, QkNormRopePattern.fx_view_to_reshape, ), pm_pass, ) class QKNormRoPEFusionPass(VllmPatternMatcherPass): """Fuse Q/K RMSNorm + RoPE into fused_qk_norm_rope when the custom op exists.""" @enable_fake_mode def __init__(self, config: VllmConfig): super().__init__(config) self.patterns: PatternMatcherPass = PatternMatcherPass( pass_name="qk_norm_rope_fusion_pass" ) dtype = config.model_config.dtype if dtype not in (torch.bfloat16, torch.float16): logger.warning_once( "QK Norm+RoPE fusion not enabled: unsupported dtype %s", dtype ) return # use one attn layer to get meta (such as head_dim) for QkNormRopePattern attn_layers: dict[str, Attention] = get_layers_from_vllm_config( config, Attention ) if len(attn_layers) == 0: logger.warning_once( "QK Norm+RoPE fusion enabled, but no Attention layers were discovered." ) return layer = next(iter(attn_layers.values())) for epsilon in [1e-5, 1e-6]: for neox in [True, False]: if RotaryEmbedding.enabled(): for rope_flashinfer in [False, True]: QkNormRopePattern( head_dim=layer.head_size, num_heads=layer.num_heads, num_kv_heads=layer.num_kv_heads, eps=epsilon, is_neox=neox, rope_flashinfer=rope_flashinfer, ).register(self.patterns) else: QkNormRopePattern( head_dim=layer.head_size, num_heads=layer.num_heads, num_kv_heads=layer.num_kv_heads, eps=epsilon, is_neox=neox, ).register(self.patterns) self.dump_patterns(config, self.patterns) @VllmInductorPass.time_and_log def __call__(self, graph: fx.Graph) -> None: self.matched_count = self.patterns.apply(graph) logger.debug("Fused QK Norm+RoPE on %s sites", self.matched_count) def uuid(self): return VllmInductorPass.hash_source(self, QkNormRopePattern)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/vllm_inductor_pass.py
vllm/compilation/vllm_inductor_pass.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import functools import operator import time from dataclasses import dataclass from typing import ClassVar import regex as re import torch from torch._dynamo.utils import lazy_format_graph_code from torch._inductor.pattern_matcher import PatternMatcherPass, PatternPrettyPrinter from vllm.config import VllmConfig from vllm.logger import init_logger from .inductor_pass import InductorPass logger = init_logger(__name__) @dataclass class InductorCompilationConfig: splitting_ops: list[str] | None = None use_inductor_graph_partition: bool = False class VllmInductorPass(InductorPass): """ An inductor pass with access to vLLM PassConfig. It provides timing, logging, and dumping utilities. """ dump_prefix: ClassVar[int | None] = None """Keep track of pass index for debug dump ordering.""" def __init__(self, config: VllmConfig): # Get only the necessary CompilationConfig for the inductor pass, since # full `CompilationConfig` contains pointer to model which is unsafe. self.compilation_config = InductorCompilationConfig( splitting_ops=config.compilation_config.splitting_ops, use_inductor_graph_partition=config.compilation_config.use_inductor_graph_partition, ) self.pass_config = config.compilation_config.pass_config self.model_dtype = config.model_config.dtype if config.model_config else None self.device = config.device_config.device if config.device_config else None self.pass_name = self.__class__.__name__ @staticmethod def time_and_log(call_fn): @functools.wraps(call_fn) def wrapped(self: VllmInductorPass, graph: torch.fx.Graph): self.begin() self.dump_graph(graph, "before") call_fn(self, graph) self.dump_graph(graph, "after") self.end_and_log() return wrapped def dump_graph(self, graph: torch.fx.Graph, stage: str): i = VllmInductorPass.dump_prefix i_str = "" if i is None else f".{i}" lazy_format_graph_code( f"post_grad{i_str}.{self.pass_name}.{stage}", graph.owning_module ) def begin(self): self._start_time = time.perf_counter_ns() def end_and_log(self): self._end_time = time.perf_counter_ns() duration_ms = float(self._end_time - self._start_time) / 1.0e6 logger.debug("%s completed in %.1f ms", self.pass_name, duration_ms) class VllmPatternMatcherPass(VllmInductorPass): """ A VllmInductorPass that uses the Inductor pattern matcher. Its main use is providing the dump_patterns utility that dumps the Inductor pattern matcher patterns into a file, which greatly aids debugging. TODO(luka) move more utilities to this pass. """ matched_count: int = 0 """The number of matched patterns in the pass.""" _OP_OVERLOAD_PATTERN: ClassVar[re.Pattern] = re.compile( r"<OpOverload\(op='([^']*)', overload='([^']*)'\)>" ) def _replace_op_overloads(self, string: str) -> str: """Replace <OpOverload(..., ...)> with nicer formulations""" return self._OP_OVERLOAD_PATTERN.sub( lambda m: f"torch.ops.{m.group(1)}.{m.group(2)}", string, ) def dump_patterns(self, config: VllmConfig, pm_pass: PatternMatcherPass): """ If debug dumping is enabled, dump the Inductor pattern-matcher patterns into the debug_dump_path folder next to the dumped fx graphs. This method does its best to print something that looks like Python code for easier debugging and potentially navigation. If any errors appear in the output, please add to this method. TODO(luka): use pattern object to manually produce pattern graph """ debug_dump_path = config.compile_debug_dump_path() if not debug_dump_path: return debug_dump_path.mkdir(parents=True, exist_ok=True) from vllm.utils.system_utils import unique_filepath file_path = unique_filepath( lambda i: debug_dump_path / f"patterns.{self.pass_name}.{i}.py" ) with file_path.open("w") as f: print( f"# This file was produced by VllmPatternMatcherPass." f"dump_patterns for {self.pass_name}.\n" f"# It does its best to produce valid-Python-looking code but" f" please add to dump_patterns if there are any errors.\n\n" f"from torch._higher_order_ops.auto_functionalize import " f"auto_functionalized as auto_functionalized\n" f"from torch._inductor.pattern_matcher import *\n" f"vllm = torch.ops.vllm", file=f, ) for node, patterns in pm_pass.patterns.items(): # fix the operator.getitem repr if node[1] == operator.getitem: node_repr = f"({repr(node[0])}, operator.getitem)" else: node_repr = repr(node) node_repr = self._replace_op_overloads(node_repr) print(f"\n\n# Patterns for op: {node_repr}", file=f) for i, pattern in enumerate(patterns): # reserve auto_functionalized ahead of time pp = PatternPrettyPrinter() pp.namespace.create_name("auto_functionalized", None) # Assemble pattern out_node = pp.pretty_print(pattern.pattern) pattern_repr = "\n".join( [f"def pattern_{i}():"] + [ f"{pp.memoized_objs_names[key]} = " f"{pp.memoized_objs_pp[key]}" for key in pp.memoized_objs_names ] + [f"return {out_node}"] ).replace("\n", "\n ") pattern_repr = self._replace_op_overloads(pattern_repr) print(f"{pattern_repr}\n", file=f) class PrinterInductorPass(VllmInductorPass): def __init__(self, name: str, config: VllmConfig): super().__init__(config) self.name = name def __call__(self, graph: torch.fx.Graph): self.dump_graph(graph, self.name)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/wrapper.py
vllm/compilation/wrapper.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import sys from abc import abstractmethod from contextlib import contextmanager, nullcontext from types import CodeType from typing import Any import torch import torch._C._dynamo.guards import vllm.envs as envs from vllm.config import CompilationMode, CUDAGraphMode, get_current_vllm_config from vllm.config.compilation import DynamicShapesType from vllm.logger import init_logger from vllm.utils.nvtx_pytorch_hooks import layerwise_nvtx_marker_context logger = init_logger(__name__) def _noop_add_global_state_guard(self, *args, **kwargs): """No-op to skip the GLOBAL_STATE guard entirely""" pass def _noop_add_torch_function_mode_stack_guard(self, *args, **kwargs): """No-op to skip the TORCH_FUNCTION_MODE_STACK guard entirely""" pass @contextmanager def _compilation_context(): """Context manager for compilation settings and patches. This manager: 1. Sets higher dynamo cache limits for compilation. (Needed for qwen2_5_vl see test_qwen2_5_vl_evs_functionality). Generally a recompilation can happen whenever we use a new backend instance in torch.compile. 2. Patches out add_global_state_guard to skip GLOBAL_STATE guards 3. Patches out add_torch_function_mode_stack_guard to skip TORCH_FUNCTION_MODE_STACK guards. 4. Restores everything when compilation completes """ # Save original values original_global_state_guard = ( torch._C._dynamo.guards.GuardManager.add_global_state_guard ) original_torch_function_mode_stack_guard = ( torch._C._dynamo.guards.GuardManager.add_torch_function_mode_stack_guard ) original_cache_size = torch._dynamo.config.cache_size_limit original_accumulated_cache = torch._dynamo.config.accumulated_cache_size_limit try: # Set higher cache limits for compilation torch._dynamo.config.cache_size_limit = 2048 torch._dynamo.config.accumulated_cache_size_limit = 8192 # Patch guard manager torch._C._dynamo.guards.GuardManager.add_global_state_guard = ( _noop_add_global_state_guard ) torch._C._dynamo.guards.GuardManager.add_torch_function_mode_stack_guard = ( _noop_add_torch_function_mode_stack_guard ) yield finally: # Restore original values torch._C._dynamo.guards.GuardManager.add_global_state_guard = ( original_global_state_guard ) torch._C._dynamo.guards.GuardManager.add_torch_function_mode_stack_guard = ( original_torch_function_mode_stack_guard ) torch._dynamo.config.cache_size_limit = original_cache_size torch._dynamo.config.accumulated_cache_size_limit = original_accumulated_cache class TorchCompileWithNoGuardsWrapper: """ A wrapper class for torch.compile, it ensures that all guards are dropped when CompilationMode is not CompilationMode.STOCK_TORCH_COMPILE. When guards are dropped, the first time __call__ is invoked, a single compilation is triggered. Dynamo should never be traced again after that since we drop all guards. """ def check_invariants_and_forward(self, *args, **kwargs): assert hasattr(self, "_check_shape_invariants") self._check_shape_invariants(*args, **kwargs) return self.forward(*args, **kwargs) def _call_with_optional_nvtx_range(self, callable_fn, *args, **kwargs): if self.layerwise_nvtx_tracing_enabled: args_list = list(args) kwargs_dict = dict(kwargs) with layerwise_nvtx_marker_context( "Torch Compiled Module (input):{}".format(self.__class__.__name__), self, in_tensor=args_list, kwargs=kwargs_dict, ) as ctx: ctx.result = callable_fn(*args, **kwargs) return ctx.result return callable_fn(*args, **kwargs) def __init__(self): self.compiled = False vllm_config = get_current_vllm_config() self.vllm_config = vllm_config mode = vllm_config.compilation_config.mode self.layerwise_nvtx_tracing_enabled = ( vllm_config.observability_config.enable_layerwise_nvtx_tracing ) if mode is None: raise RuntimeError("Compilation mode cannot be NO_COMPILATION") backend = vllm_config.compilation_config.init_backend(vllm_config) options = {} if isinstance(backend, str) and backend == "inductor": options = vllm_config.compilation_config.inductor_compile_config self.first_compile = True self.evaluate_guards = ( vllm_config.compilation_config.dynamic_shapes_config.evaluate_guards ) ds_type = vllm_config.compilation_config.dynamic_shapes_config.type if mode != CompilationMode.STOCK_TORCH_COMPILE: # Drop all the guards. if self.evaluate_guards: assert not envs.VLLM_USE_BYTECODE_HOOK, ( "compilation_config.dynamic_shapes_config.evaluate_guards " "requires VLLM_USE_BYTECODE_HOOK=0. " ) if envs.VLLM_USE_AOT_COMPILE: # disabled until https://github.com/pytorch/pytorch/pull/169239 # is picked up. assert ds_type != DynamicShapesType.BACKED, ( "evaluate_guards for backed shapes requires " "VLLM_USE_AOT_COMPILE=False. " ) options["guard_filter_fn"] = lambda x: [ entry.guard_type == "SHAPE_ENV" for entry in x ] else: options["guard_filter_fn"] = lambda x: [False for _ in x] compiled_ptr: Any = self.forward # Validate that unbacked dynamic shapes require VLLM_USE_BYTECODE_HOOK=False if ds_type == DynamicShapesType.UNBACKED: # reason is that bytecode does torch._dynamo.eval_frame. # remove_from_cache(self.original_code_object()) to force a new # re-compilation. And if we use # compiled_ptr = self.check_invariants_and_forward # it will reset all entries. assert not envs.VLLM_USE_BYTECODE_HOOK, ( "UNBACKED dynamic shapes requires VLLM_USE_BYTECODE_HOOK=0. " ) assert not self.evaluate_guards, "UNBACKED dynamic shapes do not add guards" compiled_ptr = self.check_invariants_and_forward aot_context = nullcontext() if envs.VLLM_USE_AOT_COMPILE: if hasattr(torch._dynamo.config, "enable_aot_compile"): aot_context = torch._dynamo.config.patch(enable_aot_compile=True) else: msg = "torch._dynamo.config.enable_aot_compile is not " msg += "available. AOT compile is disabled and please " msg += "upgrade PyTorch version to use AOT compile." logger.warning(msg) with aot_context: self._compiled_callable = torch.compile( compiled_ptr, fullgraph=True, dynamic=False, backend=backend, options=options, ) if envs.VLLM_USE_BYTECODE_HOOK and mode != CompilationMode.STOCK_TORCH_COMPILE: torch._dynamo.convert_frame.register_bytecode_hook(self.bytecode_hook) self._compiled_bytecode = None def aot_compile(self, *args, **kwargs): if not hasattr(self._compiled_callable, "aot_compile"): raise RuntimeError( "aot_compile is not supported by the current configuration. " + "Please make sure torch.compile is enabled with the latest " + f"version of PyTorch (current using torch: {torch.__version__})" ) return self._compiled_callable.aot_compile((args, kwargs)) def __call__(self, *args, **kwargs): if envs.VLLM_USE_BYTECODE_HOOK: if ( self.vllm_config.compilation_config.mode == CompilationMode.STOCK_TORCH_COMPILE ): return self._compiled_callable(*args, **kwargs) if not self._compiled_bytecode: # Make sure a compilation is triggered by clearing dynamo # cache. torch._dynamo.eval_frame.remove_from_cache(self.original_code_object()) return self._call_with_optional_nvtx_range( self._compiled_callable, *args, **kwargs ) else: with self._dispatch_to_compiled_code(): return self._call_with_optional_nvtx_range( self.forward, *args, **kwargs ) else: ctx = ( nullcontext() if self.first_compile or not self.evaluate_guards else torch.compiler.set_stance("fail_on_recompile") ) self.first_compile = False with _compilation_context(), ctx: return self._call_with_optional_nvtx_range( self._compiled_callable, *args, **kwargs ) @abstractmethod def forward(self, *args, **kwargs): ... def original_code_object(self) -> CodeType: """Return the original code object of the forward method.""" return self.__class__.forward.__code__ def bytecode_hook(self, old_code: CodeType, new_code: CodeType): """Hook to save the compiled bytecode for direct execution.""" if old_code is not self.original_code_object(): return # code borrowed from https://github.com/thuml/depyf/blob/f4ad79fadee27ea113b4c75202db1eb1a11c0dbc/depyf/explain/enable_debugging.py#L25 frame = sys._getframe() while frame and frame.f_back: frame = frame.f_back code_name = frame.f_code.co_name file_name = frame.f_code.co_filename.split(os.path.sep)[-1] if code_name == "_compile" and file_name == "convert_frame.py": break frame = frame.f_locals["frame"] assert frame.f_code == old_code if frame.f_locals["self"] is not self: return self._compiled_bytecode = new_code path = self.vllm_config.compile_debug_dump_path() if path: decompiled_file = path / "transformed_code.py" if not decompiled_file.exists(): try: # usually the decompilation will succeed for most models, # as we guarantee a full-graph compilation in Dynamo. # but there's no 100% guarantee, since decompliation is # not a reversible process. import depyf src = depyf.decompile(new_code) with open(decompiled_file, "w") as f: f.write(src) logger.debug("Dynamo transformed code saved to %s", decompiled_file) except Exception: pass if ( self.vllm_config.compilation_config.cudagraph_mode != CUDAGraphMode.NONE and "update" in new_code.co_names ): import depyf src = depyf.decompile(new_code) msg = ( "Assigning / modifying buffers of nn.Module during forward pass is not " "allowed when using cudagraph inside the compiler because it will " "cause silent errors. Please use eager mode or fix the code. The " "following code contains clues about which buffer is being modified " f"(please search for the usage of the function `update`):\n{src}" ) raise RuntimeError(msg) @contextmanager def _dispatch_to_compiled_code(self): # noqa: E501 """ Context manager to dispatch to internally compiled code for torch<2.8. Why does this work? Because Dynamo guarantees that the compiled bytecode has exactly the same arguments, cell variables, and free variables as the original code. Therefore we can directly switch the code object in the function and call it. See https://dev-discuss.pytorch.org/t/what-is-the-relationship-requirement-among-original-bytecode-transformed-bytecode-and-bytecode-returned-by-hooks-in-dynamo/1693/7 for more details. """ # noqa: E501 line too long original = self.original_code_object() assert self._compiled_bytecode is not None self.__class__.forward.__code__ = self._compiled_bytecode try: yield finally: self.__class__.forward.__code__ = original
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/__init__.py
vllm/compilation/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/torch25_custom_graph_pass.py
vllm/compilation/torch25_custom_graph_pass.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from typing import Any import torch class Torch25CustomGraphPass(ABC): # noqa (redefinition) """ This class replaces CustomGraphPass from torch==2.6 when using torch<2.6. It conforms to the 2.6 interface but also supports pickling, as that's what the inductor code cache uses to determine the cache key before 2.6. (in 2.6 and above, uuid() is used.) Subclasses can just "pretend" that uuid is used. """ @abstractmethod def __call__(self, graph: torch.fx.graph.Graph) -> None: """ Implementation of the custom pass. """ @abstractmethod def uuid(self) -> Any | None: """ Return an ID to uniquely identify your custom pass implementation. Return None to skip inductor code caching entirely. """ def __getstate__(self): """ Pickling is used instead of uuid() in torch<2.6. Just return uuid() to enable subclasses to only have to implement uuid. """ return self.uuid() def __setstate__(self, state): raise ValueError( "Cannot unpickle CustomGraphPass because pickling" " is used for cache key uuid. Use torch>=2.6 with" " native uuid support for custom passes." )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/inductor_pass.py
vllm/compilation/inductor_pass.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from __future__ import annotations import functools import hashlib import inspect import json import types from collections.abc import Callable from contextlib import contextmanager from typing import TYPE_CHECKING, Any import torch from torch import fx from torch._subclasses.fake_tensor import FakeTensorMode, unset_fake_temporarily from vllm.utils.torch_utils import is_torch_equal_or_newer if TYPE_CHECKING: from vllm.config.utils import Range if is_torch_equal_or_newer("2.6"): from torch._inductor.custom_graph_pass import CustomGraphPass else: # CustomGraphPass is not present in 2.5 or lower, import our version from .torch25_custom_graph_pass import ( Torch25CustomGraphPass as CustomGraphPass, ) _pass_context = None class PassContext: def __init__(self, compile_range: Range): self.compile_range: Range = compile_range def get_pass_context() -> PassContext: """Get the current pass context.""" assert _pass_context is not None return _pass_context @contextmanager def pass_context(compile_range: Range): """A context manager that stores the current pass context, usually it is a list of sizes to specialize. """ global _pass_context prev_context = _pass_context _pass_context = PassContext(compile_range) try: yield finally: _pass_context = prev_context class InductorPass(CustomGraphPass): """ A custom graph pass that uses a hash of its source as the UUID. This is defined as a convenience and should work in most cases. """ def uuid(self) -> Any: """ Provide a unique identifier for the pass, used in Inductor code cache. This should depend on the pass implementation, so that changes to the pass result in recompilation. By default, the object source is hashed. """ return InductorPass.hash_source(self) @staticmethod def hash_source(*srcs: str | Any): """ Utility method to hash the sources of functions or objects. :param srcs: strings or objects to add to the hash. Objects and functions have their source inspected. :return: """ hasher = hashlib.sha256() for src in srcs: if isinstance(src, str): src_str = src elif isinstance(src, (types.FunctionType, type)): src_str = inspect.getsource(src) else: # object instance src_str = inspect.getsource(src.__class__) hasher.update(src_str.encode("utf-8")) return hasher.hexdigest() @staticmethod def hash_dict(dict_: dict[Any, Any]): """ Utility method to hash a dictionary, can alternatively be used for uuid. :return: A sha256 hash of the json rep of the dictionary. """ encoded = json.dumps(dict_, sort_keys=True).encode("utf-8") return hashlib.sha256(encoded).hexdigest() def is_applicable_for_range(self, compile_range: Range): return True class CallableInductorPass(InductorPass): """ This class is a wrapper for a callable that automatically provides an implementation of the UUID. """ def __init__(self, callable: Callable[[fx.Graph], None], uuid: Any | None = None): self.callable = callable self._uuid = self.hash_source(callable) if uuid is None else uuid def __call__(self, graph: torch.fx.Graph): self.callable(graph) def uuid(self) -> Any: return self._uuid def enable_fake_mode(fn: Callable[..., Any]) -> Callable[..., Any]: """ Applies a FakeTensorMode context. This is useful when you don't want to create or run things with real tensors. """ @functools.wraps(fn) def fn_new(*args, **kwargs) -> Any: with torch._guards.tracing(None), unset_fake_temporarily(), FakeTensorMode(): result = fn(*args, **kwargs) return result return fn_new
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/noop_elimination.py
vllm/compilation/noop_elimination.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterable import torch.fx from torch import SymInt from torch.fx.experimental.symbolic_shapes import statically_known_true from vllm.logger import init_logger from .fx_utils import is_func from .vllm_inductor_pass import VllmInductorPass logger = init_logger(__name__) class NoOpEliminationPass(VllmInductorPass): """ This is an inductor pass that removes redundant reshape/slice operations. It is required for RMSNorm-quant fusion to work properly. That's because apply_fp8_linear adds a reshape, which is redundant in the 2D-case. Additionally, torch internal no-op elimination pass does not handle certain slice variants. Cases handled: 1. A chain of reshapes is equivalent to the last reshape called on the base tensor (input of the first reshape). 2. A reshape that produces the shape of the input is redundant 3. A slice that produces the shape of the input is redundant Example graph 1: mul_1: "f16[s0, 4096]" = ... view_1: "f16[s0, 128, 32]" = torch.reshape(mul_1, [-1, 128, 32]) view_2: "f16[s0, 4096]" = torch.reshape(view_2, [-1, 4096]) view_3: "f16[s0, 128, 32]" = torch.reshape(view_3, [-1, 128, 32]) Can be replaced with: mul_1: "f16[s0, 4096]" = ... view_3: "f16[s0, 128, 32]" = ... Example graph 2: getitem_1: "f16[s0, 4096]" = ... view_1: "f16[s0, 4096]" = torch.reshape(getitem_1, [-1, 4096]) at = auto_functionalized(static_scaled_fp8_quant, input = view_1, ...) out: "f8e4m3fn[s0, 4096]" = at[1] Can be replaced with: getitem_1: "f16[s0, 4096]" = ... at = auto_functionalized(static_scaled_fp8_quant, input = getitem_1, ...) out: "f8e4m3fn[s0, 4096]" = at[1] Example graph 3: arg0: "s0" = SymInt(s0) scaled_mm: "f16[s0, 4096]" = ... slice_1: "f16[s0, 4096]" = torch.slice(scaled_mm, -1, 0, arg0) at = auto_functionalized(fused_add_rms_norm, input = slice_1, ...) out: "f16[s0, 4096]" = torch.slice_scatter(scaled_mm, at[1], 0, 0, arg0) Can be replaced with: arg0: "s0" = SymInt(s0) scaled_mm: "f16[s0, 4096]" = ... at = auto_functionalized(fused_add_rms_norm, input = scaled_mm, ...) out: "f16[s0, 4096]" = at[1] """ @VllmInductorPass.time_and_log def __call__(self, graph: torch.fx.Graph): count = 0 # Remove no-op reshapes/views: for node in graph.nodes: if is_func(node, torch.ops.aten.reshape.default): # Case 1: rewrite reshape chains to reshapes on the base tensor input = node.args[0] # If the input is a reshape, rebind to that node if is_func(input, torch.ops.aten.reshape.default): # The new input is guaranteed not to be a reshape, # because we process nodes in order node.update_arg(0, input.args[0]) if len(input.users) == 0: graph.erase_node(input) count += 1 # remove reshape/slice if it produces the original shape if is_func(node, torch.ops.aten.reshape.default) or is_func( node, torch.ops.aten.slice.Tensor ): input = node.args[0] input_shape = input.meta["val"].shape output_shape = node.meta["val"].shape if self.all_dims_equivalent(input_shape, output_shape): node.replace_all_uses_with(input) graph.erase_node(node) count += 1 elif is_func(node, torch.ops.aten.slice_scatter.default): base, view, dim_index, start, end = node.args[:5] base_shape = base.meta["val"].shape view_shape = view.meta["val"].shape if self.all_dims_equivalent(base_shape, view_shape): node.replace_all_uses_with(view) graph.erase_node(node) count += 1 logger.debug("Removed %s no-op reshapes and slices", count) # ---------------------- Shape comparison helpers ---------------------- def dims_equivalent(self, dim: int | SymInt, i_dim: int | SymInt) -> bool: """ This function checks if two dimensions are equivalent. :param dim: The dimension arg to reshape/slice :param i_dim: The corresponding dimension in the input tensor :return: Are the dimensions equivalent? There are two cases in which the dimensions are equivalent: 1. The dimensions are equal (both integers) 2. The dimensions both correspond to the same SymInt """ # Case 1 return statically_known_true(dim == i_dim) def all_dims_equivalent( self, dims: Iterable[int | SymInt], i_dims: Iterable[int | SymInt] ) -> bool: dims_ = list(dims) i_dims_ = list(i_dims) if len(dims_) != len(i_dims_): # Different ranks can't be equivalent return False return all(self.dims_equivalent(s, i_s) for s, i_s in zip(dims, i_dims))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/pass_manager.py
vllm/compilation/pass_manager.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import functools from torch import fx as fx from vllm import envs from vllm._aiter_ops import rocm_aiter_ops from vllm.config import VllmConfig, set_current_vllm_config from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.utils.system_utils import set_env_var from .post_cleanup import PostCleanupPass from .vllm_inductor_pass import VllmInductorPass if rocm_aiter_ops.is_enabled(): from vllm.compilation.rocm_aiter_fusion import ( RocmAiterRMSNormFusionPass, RocmAiterSiluMulFp8GroupQuantFusionPass, ) if current_platform.is_cuda_alike(): from .activation_quant_fusion import ActivationQuantFusionPass from .fusion import RMSNormQuantFusionPass from .fusion_attn import AttnFusionPass from .qk_norm_rope_fusion import QKNormRoPEFusionPass from .sequence_parallelism import SequenceParallelismPass if current_platform.is_cuda(): from .collective_fusion import AllReduceFusionPass, AsyncTPPass from .fix_functionalization import FixFunctionalizationPass from .inductor_pass import ( CustomGraphPass, InductorPass, get_pass_context, ) from .noop_elimination import NoOpEliminationPass logger = init_logger(__name__) def with_pattern_match_debug(fn): """ Function decorator that turns on inductor pattern match debug for the duration of the call. Used to avoid logging builtin Inductor pattern matching. """ @functools.wraps(fn) def wrapper(*args, **kwargs): if (debug_val := envs.VLLM_PATTERN_MATCH_DEBUG) is not None: # optionally check rank here with set_env_var("TORCHINDUCTOR_PATTERN_MATCH_DEBUG", debug_val): return fn(*args, **kwargs) return fn(*args, **kwargs) return wrapper class PostGradPassManager(CustomGraphPass): """ The pass manager for post-grad passes. It handles configuration, adding custom passes, and running passes. It supports uuid for the Inductor code cache. That includes torch<2.6 support using pickling (in .inductor_pass.CustomGraphPass). The order of the post-grad post-passes is: 1. passes (constructor parameter) 2. default passes (NoopEliminationPass, FusionPass) 3. config["post_grad_custom_post_pass"] (if it exists) 4. fix_functionalization This way, all passes operate on a functionalized graph. """ def __init__(self): self.passes: list[InductorPass] = [] @with_pattern_match_debug def __call__(self, graph: fx.Graph): VllmInductorPass.dump_prefix = 0 # reset dump index compile_range = get_pass_context().compile_range for pass_ in self.passes: if pass_.is_applicable_for_range(compile_range): pass_(graph) VllmInductorPass.dump_prefix += 1 else: logger.debug("Skipping %s with compile range %s", pass_, compile_range) # post-cleanup goes before fix_functionalization # because it requires a functional graph self.post_cleanup(graph) VllmInductorPass.dump_prefix += 1 # always run fix_functionalization last self.fix_functionalization(graph) VllmInductorPass.dump_prefix = None # Cleanup index def configure(self, config: VllmConfig): self.pass_config = config.compilation_config.pass_config # Set the current vllm config to allow tracing CustomOp instances with set_current_vllm_config(config, check_compile=False): if self.pass_config.eliminate_noops: self.passes += [NoOpEliminationPass(config)] if self.pass_config.enable_sp: self.passes += [SequenceParallelismPass(config)] if self.pass_config.fuse_gemm_comms: self.passes += [AsyncTPPass(config)] if self.pass_config.fuse_allreduce_rms: self.passes += [AllReduceFusionPass(config)] if self.pass_config.fuse_norm_quant: self.passes += [RMSNormQuantFusionPass(config)] if rocm_aiter_ops.is_enabled(): self.passes += [ RocmAiterRMSNormFusionPass(config), ] if self.pass_config.fuse_act_quant: self.passes += [ActivationQuantFusionPass(config)] if rocm_aiter_ops.is_enabled(): self.passes += [RocmAiterSiluMulFp8GroupQuantFusionPass(config)] if self.pass_config.fuse_attn_quant: self.passes += [AttnFusionPass(config)] if self.pass_config.enable_qk_norm_rope_fusion: self.passes += [QKNormRoPEFusionPass(config)] # needs a functional graph self.post_cleanup = PostCleanupPass(config) self.fix_functionalization = FixFunctionalizationPass(config) def add(self, pass_: InductorPass): assert isinstance(pass_, InductorPass) self.passes.append(pass_) def uuid(self): """ The PostGradPassManager is set as a custom pass in the Inductor and affects compilation caching. Its uuid depends on the UUIDs of all dependent passes and the pass config. See InductorPass for more info. """ state = {"pass_config": self.pass_config.compute_hash(), "passes": []} for pass_ in self.passes: state["passes"].append(pass_.uuid()) state["passes"].append(self.fix_functionalization.uuid()) # Include the compile range in the uuid to ensure that inductor # recompiles the graph for the new dynamic compile range. state["compile_range"] = str(get_pass_context().compile_range) return InductorPass.hash_dict(state)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/collective_fusion.py
vllm/compilation/collective_fusion.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from importlib.util import find_spec import torch import torch._inductor.pattern_matcher as pm import torch.fx as fx from torch._higher_order_ops.auto_functionalize import auto_functionalized from torch._inductor.pattern_matcher import PatternMatcherPass from torch.distributed._symmetric_memory import enable_symm_mem_for_group from vllm.config import VllmConfig from vllm.config.utils import Range from vllm.distributed import get_tp_group, tensor_model_parallel_all_reduce from vllm.distributed.parallel_state import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) from vllm.logger import init_logger from vllm.model_executor.layers.quantization.utils.quant_utils import ( kFp8StaticTensorSym, ) from vllm.platforms import current_platform from vllm.utils.torch_utils import direct_register_custom_op from .inductor_pass import enable_fake_mode from .matcher_utils import MatcherFusedAddRMSNorm, MatcherQuantFP8, MatcherRMSNorm from .vllm_inductor_pass import VllmInductorPass, VllmPatternMatcherPass FP8_DTYPE = current_platform.fp8_dtype() if find_spec("flashinfer"): try: import flashinfer.comm as flashinfer_comm flashinfer_comm = ( flashinfer_comm if hasattr(flashinfer_comm, "trtllm_allreduce_fusion") else None ) except ImportError: flashinfer_comm = None else: flashinfer_comm = None logger = init_logger(__name__) if hasattr(torch.ops._C, "scaled_fp4_quant"): STATIC_FP4_QUANT_OP = torch.ops._C.scaled_fp4_quant.default class BasePattern: def __init__(self, dtype: torch.dtype, device: str): self.dtype = dtype self.device = device self.tp = get_tp_group() self.tp_size = get_tensor_model_parallel_world_size() class GEMMReduceScatterPattern(BasePattern): def get_inputs(self): mul = torch.empty([16, 4], device=self.device, dtype=self.dtype) mm_weight = torch.empty([4, 4], device=self.device, dtype=self.dtype) return [mul, mm_weight] def register(self, pm_pass: PatternMatcherPass): def pattern(mul: torch.Tensor, mm_weight: torch.Tensor): mm = torch.ops.aten.mm.default(mul, mm_weight) reduce_scatter = torch.ops.vllm.reduce_scatter.default( mm, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name, ) return reduce_scatter def replacement(mul: torch.Tensor, mm_weight: torch.Tensor): gemm_rs = torch.ops.symm_mem.fused_matmul_reduce_scatter( mul, mm_weight, "avg", scatter_dim=0, group_name=self.tp.device_group.group_name, ) return gemm_rs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class AllGatherGEMMPattern(BasePattern): def get_inputs(self): x = torch.empty([4, 4], device=self.device, dtype=self.dtype) weight = torch.empty([4, 4], device=self.device, dtype=self.dtype) return [x, weight] def register(self, pm_pass: PatternMatcherPass): def pattern( x: torch.Tensor, weight: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: all_gather = torch.ops.vllm.all_gather.default( x, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name, ) return torch.ops.aten.mm.default(all_gather, weight) def replacement( x: torch.Tensor, weight: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_matmul( x, [weight], gather_dim=0, group_name=self.tp.device_group.group_name, ) return mm_outputs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class ScaledMMReduceScatterPattern(BasePattern): def get_inputs(self): input = torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) mm_weight = ( torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) .contiguous() .transpose(0, 1) ) scale_a = torch.empty([16, 1], device=self.device, dtype=torch.float32) scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32) return [input, mm_weight, scale_a, scale_b] def register(self, pm_pass: PatternMatcherPass): def pattern( input: torch.Tensor, mat2: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, ) -> torch.Tensor: scaled_mm = torch.ops.aten._scaled_mm.default( input, mat2=mat2, scale_a=scale_a, scale_b=scale_b, bias=None, scale_result=None, out_dtype=self.dtype, ) reduce_scatter = torch.ops.vllm.reduce_scatter.default( scaled_mm, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name, ) return reduce_scatter def replacement( input: torch.Tensor, mat2: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, ) -> torch.Tensor: # Calculate output shape: input @ mat2 with scatter_dim reduced output_shape = [*input.shape[:-1], mat2.shape[1]] scatter_dim = 0 gemm_rs = torch.ops.vllm.patched_fused_scaled_matmul_reduce_scatter( input, mat2, scale_a, scale_b, "avg", scatter_dim, # orig_scatter_dim scatter_dim, # scatter_dim_after_maybe_reshape self.tp.device_group.group_name, output_shape, None, # bias None, # result_scale self.dtype, # out_dtype False, # use_fast_accum ) return gemm_rs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class AllGatherScaledMMPattern(BasePattern): def get_inputs(self): x = torch.empty([8, 16], device=self.device, dtype=FP8_DTYPE) weight = ( torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) .contiguous() .transpose(0, 1) ) s1 = x.shape[0] * self.tp_size scale_a = torch.empty([s1, 1], device=self.device, dtype=torch.float32) scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32) return [x, weight, scale_a, scale_b] def register(self, pm_pass: PatternMatcherPass): def pattern( x: torch.Tensor, weight: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, ) -> torch.Tensor: all_gather = torch.ops.vllm.all_gather.default( x, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name ) return torch.ops.aten._scaled_mm.default( all_gather, mat2=weight, scale_a=scale_a, scale_b=scale_b, bias=None, scale_result=None, out_dtype=self.dtype, ) def replacement( x: torch.Tensor, weight: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, ) -> torch.Tensor: ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_scaled_matmul( # noqa x, [weight], scale_a, [scale_b], gather_dim=0, biases=[None], result_scales=[None], out_dtypes=[self.dtype], use_fast_accum=[False], group_name=self.tp.device_group.group_name, ) return mm_outputs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class CutlassScaledMMReduceScatterPattern(BasePattern): def get_inputs(self): input = torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) mm_weight = ( torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) .contiguous() .transpose(0, 1) ) scale_a = torch.empty([16, 1], device=self.device, dtype=torch.float32) scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32) cutlass_mm_output = torch.empty([16, 16], device=self.device, dtype=self.dtype) return [input, mm_weight, scale_a, scale_b, cutlass_mm_output] def register(self, pm_pass: PatternMatcherPass): def pattern( input: torch.Tensor, weight: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, cutlass_mm_output: torch.Tensor, ) -> torch.Tensor: cutlass_scaled_mm = torch.ops.higher_order.auto_functionalized( torch.ops._C.cutlass_scaled_mm.default, out=cutlass_mm_output, a=input, b=weight, a_scales=scale_a, b_scales=scale_b, bias=None, ) reduce_scatter = torch.ops.vllm.reduce_scatter.default( cutlass_scaled_mm[1], dim=0, world_size=self.tp_size, group_name=self.tp.unique_name, ) return reduce_scatter def replacement( input: torch.Tensor, mat2: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, cutlass_mm_output: torch.Tensor, ) -> torch.Tensor: # Calculate output shape: input @ mat2 with scatter_dim reduced output_shape = [*input.shape[:-1], mat2.shape[1]] scatter_dim = 0 gemm_rs = torch.ops.vllm.patched_fused_scaled_matmul_reduce_scatter( input, mat2, scale_a, scale_b, "avg", scatter_dim, # orig_scatter_dim scatter_dim, # scatter_dim_after_maybe_reshape self.tp.device_group.group_name, output_shape, None, # bias None, # result_scale self.dtype, # out_dtype False, # use_fast_accum ) return gemm_rs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class AllGatherCutlassScaledMMPattern(BasePattern): def get_inputs(self): x = torch.empty([8, 16], device=self.device, dtype=FP8_DTYPE) weight = ( torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE) .contiguous() .transpose(0, 1) ) s1 = x.shape[0] * self.tp_size scale_a = torch.empty([s1, 1], device=self.device, dtype=torch.float32) scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32) s2 = weight.shape[1] output = torch.empty([s1, s2], device=self.device, dtype=self.dtype) return [x, weight, scale_a, scale_b, output] def register(self, pm_pass: PatternMatcherPass): def pattern( x: torch.Tensor, weight: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, output: torch.Tensor, ) -> torch.Tensor: all_gather = torch.ops.vllm.all_gather.default( x, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name ) cutlass_scaled_mm = torch.ops.higher_order.auto_functionalized( torch.ops._C.cutlass_scaled_mm.default, out=output, a=all_gather, b=weight, a_scales=scale_a, b_scales=scale_b, bias=None, ) return cutlass_scaled_mm[1] def replacement( x: torch.Tensor, weight: torch.Tensor, scale_a: torch.Tensor, scale_b: torch.Tensor, output: torch.Tensor, ) -> torch.Tensor: ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_scaled_matmul( # noqa x, [weight], scale_a, [scale_b], gather_dim=0, biases=[None], result_scales=[None], out_dtypes=[self.dtype], use_fast_accum=[False], group_name=self.tp.device_group.group_name, ) return mm_outputs pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class AsyncTPPass(VllmPatternMatcherPass): @enable_fake_mode def __init__(self, config: VllmConfig): super().__init__(config) # Enable symmetric memory for the TP process group enable_symm_mem_for_group(get_tp_group().device_group.group_name) self.patterns: PatternMatcherPass = PatternMatcherPass( pass_name="async_tp_pass" ) GEMMReduceScatterPattern(self.model_dtype, self.device).register(self.patterns) AllGatherGEMMPattern(self.model_dtype, self.device).register(self.patterns) # These fusions are enabled only for bfloat16 models because # `scaled_mm` or `cutlass_scaled_mm` with per-token (row-wise) scaling # only supports bfloat16 as the output dtype. if self.model_dtype == torch.bfloat16: ScaledMMReduceScatterPattern(self.model_dtype, self.device).register( self.patterns ) AllGatherScaledMMPattern(self.model_dtype, self.device).register( self.patterns ) CutlassScaledMMReduceScatterPattern(self.model_dtype, self.device).register( self.patterns ) AllGatherCutlassScaledMMPattern(self.model_dtype, self.device).register( self.patterns ) self.dump_patterns(config, self.patterns) def is_applicable_for_range(self, compile_range: Range) -> bool: # This pass is applied on top of the sequence parallelism pass. # It inherits the same applicability condition as `SequenceParallelismPass`. # See `SequenceParallelismPass.is_applicable` for more details. if ( not self.compilation_config.splitting_ops or self.compilation_config.use_inductor_graph_partition ): return True tp_size = get_tensor_model_parallel_world_size() return compile_range.is_single_size() and compile_range.end % tp_size == 0 @VllmInductorPass.time_and_log def __call__(self, graph: fx.Graph): self.matched_count = self.patterns.apply(graph) logger.debug("Replaced %s patterns", self.matched_count) # Max size of the input tensor per world size per device capability # to use flashinfer fused allreduce FI_ALLREDUCE_FUSION_MAX_SIZE_MB: dict[int, dict[int, float]] = { 90: { 2: 64, # 64MB 4: 2, # 2MB 8: 0.5, # 0.5MB }, 100: { 2: 64, # 64MB 4: 32, # 32MB 8: 1, # 1MB }, } # Max size of the input tensor per world size per device capability # to use flashinfer one shot fused allreduce # OneShot max size is at most 64MB / world size (FlashInfer restriction) _FI_ALLREDUCE_ONE_SHOT_MAX_SIZES_MB: dict[int, dict[int, float]] = { 90: { 2: 32, # 32MB 4: 2, # 2MB 8: 0.5, # 0.5MB }, 100: { 2: 32, # 32MB 4: 4, # 4MB 8: 1, # 1MB }, } if flashinfer_comm is not None: _FI_WORKSPACE_TENSOR = None MiB = 1024 * 1024 def call_trtllm_fused_allreduce_norm( allreduce_in: torch.Tensor, residual: torch.Tensor, rms_gamma: torch.Tensor, rms_eps: float, world_rank: int, world_size: int, launch_with_pdl: bool, trigger_completion_at_end: bool, fp32_acc: bool, max_token_num: int, pattern_code: int, norm_out: torch.Tensor | None = None, quant_out: torch.Tensor | None = None, scale_out: torch.Tensor | None = None, scale_factor: torch.Tensor | None = None, ) -> None: num_tokens, hidden_size = allreduce_in.shape element_size = allreduce_in.element_size() current_tensor_size = num_tokens * hidden_size * element_size max_tensor_size = max_token_num * hidden_size * element_size assert current_tensor_size <= max_tensor_size, ( f"Current tensor size {current_tensor_size} is larger than " f"max token num {max_token_num} * hidden size {hidden_size} * " f"element size {element_size}" ) device_capability = current_platform.get_device_capability().to_int() # Get one shot input size limit for the current world size # for the current device capability max_one_shot_size = _FI_ALLREDUCE_ONE_SHOT_MAX_SIZES_MB.get( device_capability, {} ).get(world_size, None) # Use one shot if no max size is specified use_oneshot = ( max_one_shot_size is None or current_tensor_size <= max_one_shot_size * MiB ) assert _FI_WORKSPACE_TENSOR is not None, ( "Flashinfer must be enabled when using flashinfer" ) if norm_out is None: norm_out = allreduce_in residual_out = residual else: # return residual_out as allreduce_out with zeroed residual_in # as flashinfer does not support rms_norm # and allreduce_out together residual_out = allreduce_in # For the sizes that are smaller than the max size, # we only use flashinfer one shot allreduce flashinfer_comm.trtllm_allreduce_fusion( allreduce_in=allreduce_in, token_num=allreduce_in.shape[0], residual_in=residual, residual_out=residual_out, norm_out=norm_out, rms_gamma=rms_gamma, rms_eps=rms_eps, world_rank=world_rank, world_size=world_size, hidden_dim=allreduce_in.shape[-1], workspace_ptrs=_FI_WORKSPACE_TENSOR, launch_with_pdl=launch_with_pdl, use_oneshot=use_oneshot, trigger_completion_at_end=trigger_completion_at_end, fp32_acc=fp32_acc, pattern_code=pattern_code, allreduce_out=None, quant_out=quant_out, scale_out=scale_out, # in vllm we only support swizzled layout layout_code=flashinfer_comm.QuantizationSFLayout.SWIZZLED_128x4, scale_factor=scale_factor, ) def call_trtllm_fused_allreduce_norm_fake( allreduce_in: torch.Tensor, residual: torch.Tensor, rms_gamma: torch.Tensor, rms_eps: float, world_rank: int, world_size: int, launch_with_pdl: bool, trigger_completion_at_end: bool, fp32_acc: bool, max_token_num: int, pattern_code: int, norm_out: torch.Tensor | None = None, quant_out: torch.Tensor | None = None, scale_out: torch.Tensor | None = None, scale_factor: torch.Tensor | None = None, ) -> None: pass direct_register_custom_op( op_name="flashinfer_trtllm_fused_allreduce_norm", op_func=call_trtllm_fused_allreduce_norm, mutates_args=[ "allreduce_in", "residual", "norm_out", "quant_out", "scale_out", ], fake_impl=call_trtllm_fused_allreduce_norm_fake, ) flashinfer_trtllm_fused_allreduce_norm = ( torch.ops.vllm.flashinfer_trtllm_fused_allreduce_norm.default ) class FlashInferFusedAllReduceParams: """Parameters for FlashInfer fused allreduce operations.""" def __init__( self, rank: int, world_size: int, use_fp32_lamport: bool = False, max_token_num: int = 1024, ): self.rank = rank self.world_size = world_size self.use_fp32_lamport = use_fp32_lamport self.trigger_completion_at_end = True self.launch_with_pdl = True self.fp32_acc = True self.max_token_num = max_token_num def get_trtllm_fused_allreduce_kwargs(self): return { "world_rank": self.rank, "world_size": self.world_size, "launch_with_pdl": self.launch_with_pdl, "trigger_completion_at_end": self.trigger_completion_at_end, "fp32_acc": self.fp32_acc, "max_token_num": self.max_token_num, } class AllReduceRMSNormPattern(BasePattern): """ This pattern replaces the allreduce + rms norm (without residual) with fused flashinfer implementation. Applies to allreduce + rmsnorm before attn in the first Transformer block. """ def __init__( self, epsilon: float, dtype: torch.dtype, device: str, allreduce_params: FlashInferFusedAllReduceParams, ): super().__init__(dtype, device) self.epsilon = epsilon self.allreduce_params = allreduce_params self.rmsnorm_matcher = MatcherRMSNorm(epsilon) def get_inputs(self): input, weight = self.rmsnorm_matcher.inputs() # input goes through allreduce first, always 16-bit return [input.to(self.dtype), weight] def register(self, pm_pass: PatternMatcherPass): def pattern(input: torch.Tensor, weight: torch.Tensor): allreduce_output = tensor_model_parallel_all_reduce(input) rms = self.rmsnorm_matcher(allreduce_output, weight) return rms, allreduce_output def replacement(input: torch.Tensor, weight: torch.Tensor): residual = torch.zeros_like(input) rms_result = torch.empty_like(input) allreduce = auto_functionalized( flashinfer_trtllm_fused_allreduce_norm, allreduce_in=input, residual=residual, norm_out=rms_result, quant_out=None, scale_out=None, rms_gamma=weight, rms_eps=self.epsilon, pattern_code=flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNorm, **self.allreduce_params.get_trtllm_fused_allreduce_kwargs(), ) # rms_result, allreduce_in return allreduce[3], allreduce[1] pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) class AllReduceFusedAddRMSNormPattern(BasePattern): """ This pattern replaces the allreduce + rms norm (with residual) with fused flashinfer implementation. Applies to o_proj + rmsnorm after attn and mlp + rmsnorm before attn. """ def __init__( self, epsilon: float, dtype: torch.dtype, device: str, allreduce_params: FlashInferFusedAllReduceParams, ): super().__init__(dtype, device) self.epsilon = epsilon self.allreduce_params = allreduce_params self.rmsnorm_matcher = MatcherFusedAddRMSNorm(epsilon) def get_inputs(self): input, residual, weight = self.rmsnorm_matcher.inputs() # input goes through allreduce first, always 16-bit return [residual, input.to(self.dtype), weight] def register(self, pm_pass: PatternMatcherPass): def pattern(residual: torch.Tensor, input: torch.Tensor, weight: torch.Tensor): allreduce_output = tensor_model_parallel_all_reduce(input) rms, residual = self.rmsnorm_matcher(allreduce_output, weight, residual) return rms, residual def replacement( residual: torch.Tensor, input: torch.Tensor, weight: torch.Tensor ): allreduce = auto_functionalized( flashinfer_trtllm_fused_allreduce_norm, allreduce_in=input, residual=residual, norm_out=None, quant_out=None, scale_out=None, rms_gamma=weight, rms_eps=self.epsilon, pattern_code=flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNorm, **self.allreduce_params.get_trtllm_fused_allreduce_kwargs(), ) # allreduce_in, residual return allreduce[1], allreduce[2] pm.register_replacement( pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass ) # Same pattern, but only return the output and not residual # (helpful for end of graph where residual is not used again) first_return_only = lambda fn: lambda a, b, c: fn(a, b, c)[0] pm.register_replacement( first_return_only(pattern), first_return_only(replacement), self.get_inputs(), pm.fwd_only, pm_pass, ) class AllReduceFusedRMSNormStaticQuantFP8Pattern(BasePattern): """ This pattern replaces the allreduce + rms norm (without residual) + static fp8 quant with fused flashinfer implementation. Applies to allreduce + rmsnorm + quant before attn in the first Transformer block. """ def __init__( self, epsilon: float, dtype: torch.dtype, device: str, allreduce_params: FlashInferFusedAllReduceParams, ): super().__init__(dtype, device) self.epsilon = epsilon self.allreduce_params = allreduce_params self.quant_dtype = torch.float8_e4m3fn self.rmsnorm_matcher = MatcherRMSNorm(epsilon) self.quant_matcher = MatcherQuantFP8(kFp8StaticTensorSym) def register(self, pm_pass: PatternMatcherPass): def get_inputs(): input, weight = self.rmsnorm_matcher.inputs() _, scale = self.quant_matcher.inputs() # input goes through allreduce first, always 16-bit return [input.to(self.dtype), weight, scale] def pattern( input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor, ): all_reduce = tensor_model_parallel_all_reduce(input) rms = self.rmsnorm_matcher(all_reduce, weight) quant, _ = self.quant_matcher(rms, scale) return quant, all_reduce def replacement(input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor): residual = torch.zeros_like(input) result_rms = torch.empty_like(input) result_quant = torch.empty_like(input, dtype=self.quant_dtype) allreduce = auto_functionalized( flashinfer_trtllm_fused_allreduce_norm, allreduce_in=input, residual=residual, norm_out=result_rms, quant_out=result_quant, scale_out=None, rms_gamma=weight, rms_eps=self.epsilon, # We don't use norm_out afterwards pattern_code=( flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNormFP8Quant ), scale_factor=scale, **self.allreduce_params.get_trtllm_fused_allreduce_kwargs(), ) # quant_out, allreduce_output return allreduce[4], allreduce[1] pm.register_replacement( pattern, replacement, get_inputs(), pm.fwd_only, pm_pass ) class AllReduceFusedAddRMSNormStaticQuantFP8Pattern(BasePattern): """ This pattern replaces the allreduce + rms norm (with residual) + static fp8 quant with fused flashinfer implementation. Applies to o_proj + rmsnorm after attn + quant and mlp + rmsnorm + quant before attn. """ def __init__( self, epsilon: float, dtype: torch.dtype, device: str, allreduce_params: FlashInferFusedAllReduceParams, ): super().__init__(dtype, device) self.epsilon = epsilon self.allreduce_params = allreduce_params self.quant_dtype = torch.float8_e4m3fn self.rmsnorm_matcher = MatcherFusedAddRMSNorm(epsilon) self.quant_matcher = MatcherQuantFP8(kFp8StaticTensorSym) def register(self, pm_pass: PatternMatcherPass): def get_inputs(): input, residual, weight = self.rmsnorm_matcher.inputs() _, scale = self.quant_matcher.inputs() # input goes through allreduce first, always 16-bit return [residual, input.to(self.dtype), weight, scale] def pattern( residual: torch.Tensor, input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor, ): allreduce_output = tensor_model_parallel_all_reduce(input) rms, res = self.rmsnorm_matcher(allreduce_output, weight, residual) quant, _ = self.quant_matcher(rms, scale) return quant, res def replacement( residual: torch.Tensor, input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor, ): result_quant = torch.empty_like(input, dtype=self.quant_dtype) allreduce = auto_functionalized( flashinfer_trtllm_fused_allreduce_norm, allreduce_in=input, residual=residual, norm_out=None, quant_out=result_quant, scale_out=None, rms_gamma=weight, rms_eps=self.epsilon, # We don't use norm_out afterwards pattern_code=( flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNormFP8Quant ), scale_factor=scale, **self.allreduce_params.get_trtllm_fused_allreduce_kwargs(), ) # quant_out, rms_norm_residual return allreduce[4], allreduce[2] pm.register_replacement( pattern, replacement, get_inputs(), pm.fwd_only, pm_pass ) class AllReduceFusedRMSNormStaticQuantNVFP4Pattern(BasePattern): """ This pattern replaces the allreduce + rms norm (without residual) + static nvfp4 quant with fused flashinfer implementation. Applies to allreduce + rmsnorm + quant before attn in the first Transformer block. """ def __init__( self, epsilon: float, dtype: torch.dtype, device: str, allreduce_params: FlashInferFusedAllReduceParams, ): super().__init__(dtype, device) self.epsilon = epsilon self.allreduce_params = allreduce_params self.rmsnorm_matcher = MatcherRMSNorm(epsilon) def register(self, pm_pass: PatternMatcherPass): def get_inputs(): input = torch.empty([1, 16, 16], device=self.device, dtype=self.dtype) quant_result = torch.empty((16, 8), device=self.device, dtype=torch.uint8) input_global_scale = torch.empty( [1, 1], device=self.device, dtype=torch.float32 ) weight = torch.empty([16], device=self.device, dtype=self.dtype) output_scale = torch.empty([128, 4], device=self.device, dtype=torch.int32) return [input, quant_result, weight, input_global_scale, output_scale] def pattern( input: torch.Tensor, quant_result: torch.Tensor, weight: torch.Tensor, input_global_scale: torch.Tensor, output_scale: torch.Tensor, ): all_reduce = tensor_model_parallel_all_reduce(input)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/monitor.py
vllm/compilation/monitor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from vllm.config import CompilationConfig, CompilationMode, VllmConfig from vllm.logger import init_logger logger = init_logger(__name__) context_manager = None torch_compile_start_time: float = 0.0 def start_monitoring_torch_compile(vllm_config: VllmConfig): global torch_compile_start_time torch_compile_start_time = time.time() compilation_config: CompilationConfig = vllm_config.compilation_config path = vllm_config.compile_debug_dump_path() if compilation_config.mode == CompilationMode.VLLM_COMPILE and path: import depyf path.mkdir(parents=True, exist_ok=True) logger.debug("Dumping depyf output to %s", path) global context_manager context_manager = depyf.prepare_debug(path.as_posix()) context_manager.__enter__() def end_monitoring_torch_compile(vllm_config: VllmConfig): compilation_config: CompilationConfig = vllm_config.compilation_config if compilation_config.mode == CompilationMode.VLLM_COMPILE: logger.info_once( "torch.compile takes %.2f s in total", compilation_config.compilation_time, scope="local", ) global context_manager if context_manager is not None: context_manager.__exit__(None, None, None) context_manager = None cudagraph_capturing_enabled: bool = True def validate_cudagraph_capturing_enabled(): # used to monitor whether a cudagraph capturing is legal at runtime. # should be called before any cudagraph capturing. # if an illegal cudagraph capturing happens, raise an error. global cudagraph_capturing_enabled if not cudagraph_capturing_enabled: raise RuntimeError( "CUDA graph capturing detected at an inappropriate " "time. This operation is currently disabled." ) def set_cudagraph_capturing_enabled(enabled: bool): global cudagraph_capturing_enabled cudagraph_capturing_enabled = enabled
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/caching.py
vllm/compilation/caching.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import inspect import os import pickle from unittest.mock import patch import torch from torch.utils import _pytree as pytree import vllm.envs as envs from vllm.config import VllmConfig, get_current_vllm_config from vllm.config.utils import hash_factors from vllm.logger import init_logger from vllm.utils.hashing import safe_hash try: from torch._dynamo.aot_compile import SerializableCallable except ImportError: SerializableCallable = object assert isinstance(SerializableCallable, type) logger = init_logger(__name__) class VllmSerializableFunction(SerializableCallable): """ A wrapper around a compiled function by vllm. It will forward the tensor inputs to the compiled function and return the result. It also implements a serialization interface to support PyTorch's precompile with custom backend, so that we can save and load the compiled function on disk. There's no need to wrap around the compiled function if we don't want to serialize them in particular cases. Right now serialization for the custom backend is done via serializing the Dynamo fx graph plus example inputs. """ def __init__( self, graph_module, example_inputs, prefix, optimized_call, is_encoder=False ): assert isinstance(graph_module, torch.fx.GraphModule) self.graph_module = graph_module self.example_inputs = example_inputs self.prefix = prefix self.optimized_call = optimized_call self.is_encoder = is_encoder self.shape_env = None sym_input = next( (i for i in self.example_inputs if isinstance(i, torch.SymInt)), None ) if sym_input is not None: self.shape_env = sym_input.node.shape_env def __call__(self, *args, **kwargs): return self.optimized_call(*args, **kwargs) @classmethod def serialize_compile_artifacts( cls, compiled_fn: "VllmSerializableFunction" ) -> bytes: import sympy from torch._subclasses import FakeTensorMode from torch.fx._graph_pickler import GraphPickler, Options state = compiled_fn.__dict__.copy() state.pop("optimized_call") state.pop("shape_env") for node in state["graph_module"].graph.nodes: node.meta.pop("source_fn_stack", None) node.meta.pop("nn_module_stack", None) graph_reducer_override = GraphPickler.reducer_override def _graph_reducer_override(self, obj): if ( inspect.isclass(obj) and issubclass(obj, sympy.Function) and hasattr(obj, "_torch_unpickler") ): return obj._torch_unpickler, (obj._torch_handler_name,) if isinstance(obj, FakeTensorMode): return type(None), () return graph_reducer_override(self, obj) # Mask off tensor inputs since they are large and not needed. state["example_inputs"] = pytree.tree_map_only( torch.Tensor, lambda _: None, state["example_inputs"] ) with patch.object(GraphPickler, "reducer_override", _graph_reducer_override): state["graph_module"] = GraphPickler.dumps( state["graph_module"], Options(ops_filter=None) ) state["example_inputs"] = GraphPickler.dumps(state["example_inputs"]) return pickle.dumps(state) @classmethod def deserialize_compile_artifacts(cls, data: bytes) -> "VllmSerializableFunction": from torch._guards import TracingContext, tracing from torch._subclasses import FakeTensorMode from torch.fx._graph_pickler import GraphPickler from torch.fx.experimental.symbolic_shapes import ShapeEnv from vllm.compilation.backends import VllmBackend state = pickle.loads(data) fake_mode = FakeTensorMode(shape_env=ShapeEnv()) state["graph_module"] = GraphPickler.loads(state["graph_module"], fake_mode) state["graph_module"].recompile() state["example_inputs"] = GraphPickler.loads(state["example_inputs"], fake_mode) is_encoder = state.get("is_encoder", False) vllm_backend = VllmBackend( get_current_vllm_config(), state["prefix"], is_encoder ) def optimized_call(*example_inputs): """ On the first run of the optimized call, we rerun the compiler backend which should result in a cache hit. After the backend call returns, we just do a one-time replacement of the optimized call with the compiled function, so that subsequent calls are on the AOT compiled path. """ compile_inputs = [ inp if inp is not None else example_inputs[i] for i, inp in enumerate(fn.example_inputs) ] with tracing(TracingContext(fake_mode)): fn.optimized_call = vllm_backend( state["graph_module"], compile_inputs ).optimized_call return fn.optimized_call(*example_inputs) fn = cls(**state, optimized_call=optimized_call) return fn @property def co_name(self): """ Used for depyf debugging. """ return "VllmSerializableFunction" def compilation_config_hash_factors(vllm_config: VllmConfig) -> list[str]: factors = [] # 0. factors come from the env, for example, The values of # VLLM_PP_LAYER_PARTITION will affect the computation graph. env_hash = hash_factors(envs.compile_factors()) factors.append(env_hash) # 1. factors come from the vllm_config (it mainly summarizes how the # model is created) config_hash = vllm_config.compute_hash() factors.append(config_hash) return factors def _compute_code_hash_with_content(file_contents: dict[str, str]) -> str: items = list(sorted(file_contents.items(), key=lambda x: x[0])) hash_content = [] for filepath, content in items: hash_content.append(filepath) if filepath == "<string>": # This means the function was dynamically generated, with # e.g. exec(). We can't actually check these. continue hash_content.append(content) return safe_hash( "\n".join(hash_content).encode(), usedforsecurity=False ).hexdigest() def _compute_code_hash(files: set[str]) -> str: logger.debug( "Traced files (to be considered for compilation cache):\n%s", "\n".join(files) ) file_contents = {} for filepath in files: # Skip files that don't exist (e.g., <string>, <frozen modules>, etc.) if not os.path.isfile(filepath): file_contents[filepath] = "" else: with open(filepath) as f: file_contents[filepath] = f.read() return _compute_code_hash_with_content(file_contents)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/compilation/piecewise_backend.py
vllm/compilation/piecewise_backend.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import dataclasses from collections.abc import Callable from typing import Any import torch.fx as fx from vllm.compilation.backends import VllmBackend from vllm.compilation.monitor import end_monitoring_torch_compile from vllm.config import VllmConfig from vllm.config.compilation import Range from vllm.logger import init_logger logger = init_logger(__name__) @dataclasses.dataclass class RangeEntry: compile_range: Range compiled: bool = False runnable: Callable = None # type: ignore class PiecewiseBackend: def __init__( self, graph: fx.GraphModule, vllm_config: VllmConfig, piecewise_compile_index: int, total_piecewise_compiles: int, sym_shape_indices: list[int], vllm_backend: VllmBackend, ): """ The backend for piecewise compilation. It mainly handles the compilation of static shapes and dispatching based on runtime shape. We will compile `self.graph` once for the general shape, and then compile for different shapes specified in `compilation_config.compile_sizes`. """ self.graph = graph self.vllm_config = vllm_config self.compilation_config = vllm_config.compilation_config self.piecewise_compile_index = piecewise_compile_index self.total_piecewise_compiles = total_piecewise_compiles self.vllm_backend = vllm_backend self.is_first_graph = piecewise_compile_index == 0 self.is_last_graph = piecewise_compile_index == total_piecewise_compiles - 1 self.is_full_graph = total_piecewise_compiles == 1 self.is_encoder_compilation = vllm_backend.is_encoder self.compile_ranges = self.compilation_config.get_compile_ranges() if self.is_encoder_compilation: # For encoder compilation we use the max int32 value # to set the upper bound of the compile ranges max_int32 = 2**31 - 1 last_compile_range = self.compile_ranges[-1] assert ( last_compile_range.end == vllm_config.scheduler_config.max_num_batched_tokens ) self.compile_ranges[-1] = Range( start=last_compile_range.start, end=max_int32 ) log_string = f"PiecewiseBackend: compile_ranges: {self.compile_ranges}" logger.debug_once(log_string) self.compile_sizes = self.compilation_config.compile_sizes log_string = f"PiecewiseBackend: compile_sizes: {self.compile_sizes}" logger.debug_once(log_string) self.sym_shape_indices = sym_shape_indices # the entries for ranges that we need to either self.range_entries: dict[Range, RangeEntry] = {} # to_be_compiled_ranges tracks the remaining ranges to compile, # and updates during the compilation process, so we need to copy it self.to_be_compiled_ranges: set[Range] = set(self.compile_ranges) # We only keep compilation management inside this class directly. for size in self.compile_sizes: range = Range(start=size, end=size) if range not in self.compile_ranges: self.range_entries[range] = RangeEntry( compile_range=range, ) self.to_be_compiled_ranges.add(range) for range in self.compile_ranges: self.range_entries[range] = RangeEntry( compile_range=range, ) def check_for_ending_compilation(self): if self.is_last_graph and not self.to_be_compiled_ranges: # no specific sizes to compile # save the hash of the inductor graph for the next run self.vllm_backend.compiler_manager.save_to_file() end_monitoring_torch_compile(self.vllm_config) def _fakify_args(self, args: list[Any]) -> list[Any]: # We need to pass fake example_inputs, otherwise torch.compile # will fakify the example_inputs potentially causing some non dynamic # dimension to be be duck shaped to other existing shapes that have hints # matching their values. # This is problem because it can lead to unintended specializations! # if the new wrongly dynamic dim is specialized # it will force specializing the whole shape # torch.compile probably should not accept # non fake tensors as example inputs! # See issue https://github.com/vllm-project/vllm/issues/27899 fake_example_inputs = [] for node in self.graph.graph.nodes: # All place holders come first if node.op == "placeholder": fake_example_inputs.append(node.meta["example_value"]) else: break assert len(fake_example_inputs) == len(args) return fake_example_inputs def _maybe_compile_for_range_entry(self, range_entry: RangeEntry, args) -> Any: if not range_entry.compiled: range_entry.compiled = True self.to_be_compiled_ranges.remove(range_entry.compile_range) # args are real arguments # fakify for range, real args for concrete size. # For concrete size, we clear the shape env in # compiler_manager.compile() so no need to fakify. args = ( self._fakify_args(args) if not range_entry.compile_range.is_single_size() else args ) range_entry.runnable = self.vllm_backend.compiler_manager.compile( self.graph, args, self.vllm_backend.inductor_config, self.compilation_config, compile_range=range_entry.compile_range, graph_index=self.piecewise_compile_index, num_graphs=self.total_piecewise_compiles, ) self.check_for_ending_compilation() def _find_range_for_shape(self, runtime_shape: int) -> Range | None: # First we try to find the range entry for the concrete compile size # If not found, we search for the range entry # that contains the runtime shape. if runtime_shape in self.compile_sizes: return self.range_entries[Range(start=runtime_shape, end=runtime_shape)] else: for range in self.compile_ranges: if runtime_shape in range: return self.range_entries[range] return None def __call__(self, *args) -> Any: runtime_shape = args[self.sym_shape_indices[0]] range_entry = self._find_range_for_shape(runtime_shape) assert range_entry is not None, ( f"Shape: {runtime_shape} out of considered ranges: {self.compile_ranges}" ) self._maybe_compile_for_range_entry(range_entry, args) return range_entry.runnable(*args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/triton_utils/__init__.py
vllm/triton_utils/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import TYPE_CHECKING from vllm.triton_utils.importing import ( HAS_TRITON, TritonLanguagePlaceholder, TritonPlaceholder, ) if TYPE_CHECKING or HAS_TRITON: import triton import triton.language as tl import triton.language.extra.libdevice as tldevice else: triton = TritonPlaceholder() tl = TritonLanguagePlaceholder() tldevice = TritonLanguagePlaceholder() __all__ = ["HAS_TRITON", "triton", "tl", "tldevice"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/triton_utils/importing.py
vllm/triton_utils/importing.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import types from importlib.util import find_spec from vllm.logger import init_logger logger = init_logger(__name__) HAS_TRITON = ( find_spec("triton") is not None or find_spec("pytorch-triton-xpu") is not None # Not compatible ) if HAS_TRITON: try: from triton.backends import backends # It's generally expected that x.driver exists and has # an is_active method. # The `x.driver and` check adds a small layer of safety. active_drivers = [ x.driver for x in backends.values() if x.driver and x.driver.is_active() ] # Check if we're in a distributed environment where CUDA_VISIBLE_DEVICES # might be temporarily empty (e.g., Ray sets it to "" during actor init) cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") is_distributed_env = ( cuda_visible_devices is not None and len(cuda_visible_devices.strip()) == 0 ) # Apply lenient driver check for distributed environments if is_distributed_env and len(active_drivers) == 0: # Allow 0 drivers in distributed environments - they may become # active later when CUDA context is properly initialized logger.debug( "Triton found 0 active drivers in distributed environment. " "This is expected during initialization." ) elif not is_distributed_env and len(active_drivers) != 1: # Strict check for non-distributed environments logger.info( "Triton is installed but %d active driver(s) found " "(expected 1). Disabling Triton to prevent runtime errors.", len(active_drivers), ) HAS_TRITON = False except ImportError: # This can occur if Triton is partially installed or triton.backends # is missing. logger.warning( "Triton is installed, but `triton.backends` could not be imported. " "Disabling Triton." ) HAS_TRITON = False except Exception as e: # Catch any other unexpected errors during the check. logger.warning( "An unexpected error occurred while checking Triton active drivers:" " %s. Disabling Triton.", e, ) HAS_TRITON = False if not HAS_TRITON: logger.info( "Triton not installed or not compatible; certain GPU-related" " functions will not be available." ) class TritonPlaceholder(types.ModuleType): def __init__(self): super().__init__("triton") self.__version__ = "3.4.0" self.jit = self._dummy_decorator("jit") self.autotune = self._dummy_decorator("autotune") self.heuristics = self._dummy_decorator("heuristics") self.Config = self._dummy_decorator("Config") self.language = TritonLanguagePlaceholder() def _dummy_decorator(self, name): def decorator(*args, **kwargs): if args and callable(args[0]): return args[0] return lambda f: f return decorator class TritonLanguagePlaceholder(types.ModuleType): def __init__(self): super().__init__("triton.language") self.constexpr = None self.dtype = None self.int64 = None self.int32 = None self.tensor = None self.exp = None self.log = None self.log2 = None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/responses_utils.py
vllm/entrypoints/responses_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Any from openai.types.chat import ( ChatCompletionAssistantMessageParam, ChatCompletionMessageToolCallParam, ChatCompletionToolMessageParam, ) from openai.types.chat.chat_completion_message_tool_call_param import ( Function as FunctionCallTool, ) from openai.types.responses import ResponseFunctionToolCall, ResponseOutputItem from openai.types.responses.response import ToolChoice from openai.types.responses.response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem, ) from openai.types.responses.response_output_message import ResponseOutputMessage from openai.types.responses.response_reasoning_item import ResponseReasoningItem from openai.types.responses.tool import Tool from vllm import envs from vllm.entrypoints.constants import MCP_PREFIX from vllm.entrypoints.openai.protocol import ( ChatCompletionMessageParam, ResponseInputOutputItem, ) def construct_input_messages( *, request_instructions: str | None = None, request_input: str | list[ResponseInputOutputItem], prev_msg: list[ChatCompletionMessageParam] | None = None, prev_response_output: list[ResponseOutputItem] | None = None, ): messages: list[ChatCompletionMessageParam] = [] if request_instructions: messages.append( { "role": "system", "content": request_instructions, } ) # Prepend the conversation history. if prev_msg is not None: # Add the previous messages. messages.extend(prev_msg) if prev_response_output is not None: # Add the previous output. for output_item in prev_response_output: # NOTE: We skip the reasoning output. if isinstance(output_item, ResponseOutputMessage): for content in output_item.content: messages.append( { "role": "assistant", "content": content.text, } ) # Append the new input. # Responses API supports simple text inputs without chat format. if isinstance(request_input, str): messages.append({"role": "user", "content": request_input}) else: input_messages = construct_chat_messages_with_tool_call(request_input) messages.extend(input_messages) return messages def _maybe_combine_reasoning_and_tool_call( item: ResponseInputOutputItem, messages: list[ChatCompletionMessageParam] ) -> ChatCompletionMessageParam | None: """Many models treat MCP calls and reasoning as a single message. This function checks if the last message is a reasoning message and the current message is a tool call""" if not ( isinstance(item, ResponseFunctionToolCall) and item.id.startswith(MCP_PREFIX) ): return None if len(messages) == 0: return None last_message = messages[-1] if not ( last_message.get("role") == "assistant" and last_message.get("reasoning") is not None ): return None last_message["tool_calls"] = [ ChatCompletionMessageToolCallParam( id=item.call_id, function=FunctionCallTool( name=item.name, arguments=item.arguments, ), type="function", ) ] return last_message def construct_chat_messages_with_tool_call( input_messages: list[ResponseInputOutputItem], ) -> list[ChatCompletionMessageParam]: """This function wraps _construct_single_message_from_response_item Because some chatMessages come from multiple response items for example a reasoning item and a MCP tool call are two response items but are one chat message """ messages: list[ChatCompletionMessageParam] = [] for item in input_messages: maybe_combined_message = _maybe_combine_reasoning_and_tool_call(item, messages) if maybe_combined_message is not None: messages[-1] = maybe_combined_message else: messages.append(_construct_single_message_from_response_item(item)) return messages def _construct_single_message_from_response_item( item: ResponseInputOutputItem, ) -> ChatCompletionMessageParam: if isinstance(item, ResponseFunctionToolCall): # Append the function call as a tool call. return ChatCompletionAssistantMessageParam( role="assistant", tool_calls=[ ChatCompletionMessageToolCallParam( id=item.call_id, function=FunctionCallTool( name=item.name, arguments=item.arguments, ), type="function", ) ], ) elif isinstance(item, ResponseReasoningItem): reasoning_content = "" if item.encrypted_content: raise ValueError("Encrypted content is not supported.") if len(item.summary) == 1: reasoning_content = item.summary[0].text elif item.content and len(item.content) == 1: reasoning_content = item.content[0].text return { "role": "assistant", "reasoning": reasoning_content, } elif isinstance(item, ResponseOutputMessage): return { "role": "assistant", "content": item.content[0].text, } elif isinstance(item, ResponseFunctionToolCallOutputItem): return ChatCompletionToolMessageParam( role="tool", content=item.output, tool_call_id=item.call_id, ) elif isinstance(item, dict) and item.get("type") == "function_call_output": # Append the function call output as a tool message. return ChatCompletionToolMessageParam( role="tool", content=item.get("output"), tool_call_id=item.get("call_id"), ) return item # type: ignore def extract_tool_types(tools: list[Tool]) -> set[str]: """ Extracts the tool types from the given tools. """ tool_types: set[str] = set() for tool in tools: if tool.type == "mcp": # Allow the MCP Tool type to enable built in tools if the # server_label is allowlisted in # envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS if tool.server_label in envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS: tool_types.add(tool.server_label) else: tool_types.add(tool.type) return tool_types def convert_tool_responses_to_completions_format(tool: dict) -> dict: """ Convert a flat tool schema: {"type": "function", "name": "...", "description": "...", "parameters": {...}} into: {"type": "function", "function": {...}} """ return { "type": "function", "function": tool, } def construct_tool_dicts( tools: list[Tool], tool_choice: ToolChoice ) -> list[dict[str, Any]] | None: if tools is None or (tool_choice == "none"): tool_dicts = None else: tool_dicts = [ convert_tool_responses_to_completions_format(tool.model_dump()) for tool in tools ] return tool_dicts
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/ssl.py
vllm/entrypoints/ssl.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio from collections.abc import Callable from ssl import SSLContext from watchfiles import Change, awatch from vllm.logger import init_logger logger = init_logger(__name__) class SSLCertRefresher: """A class that monitors SSL certificate files and reloads them when they change. """ def __init__( self, ssl_context: SSLContext, key_path: str | None = None, cert_path: str | None = None, ca_path: str | None = None, ) -> None: self.ssl = ssl_context self.key_path = key_path self.cert_path = cert_path self.ca_path = ca_path # Setup certification chain watcher def update_ssl_cert_chain(change: Change, file_path: str) -> None: logger.info("Reloading SSL certificate chain") assert self.key_path and self.cert_path self.ssl.load_cert_chain(self.cert_path, self.key_path) self.watch_ssl_cert_task = None if self.key_path and self.cert_path: self.watch_ssl_cert_task = asyncio.create_task( self._watch_files( [self.key_path, self.cert_path], update_ssl_cert_chain ) ) # Setup CA files watcher def update_ssl_ca(change: Change, file_path: str) -> None: logger.info("Reloading SSL CA certificates") assert self.ca_path self.ssl.load_verify_locations(self.ca_path) self.watch_ssl_ca_task = None if self.ca_path: self.watch_ssl_ca_task = asyncio.create_task( self._watch_files([self.ca_path], update_ssl_ca) ) async def _watch_files(self, paths, fun: Callable[[Change, str], None]) -> None: """Watch multiple file paths asynchronously.""" logger.info("SSLCertRefresher monitors files: %s", paths) async for changes in awatch(*paths): try: for change, file_path in changes: logger.info("File change detected: %s - %s", change.name, file_path) fun(change, file_path) except Exception as e: logger.error( "SSLCertRefresher failed taking action on file change. Error: %s", e ) def stop(self) -> None: """Stop watching files.""" if self.watch_ssl_cert_task: self.watch_ssl_cert_task.cancel() self.watch_ssl_cert_task = None if self.watch_ssl_ca_task: self.watch_ssl_ca_task.cancel() self.watch_ssl_ca_task = None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/tool_server.py
vllm/entrypoints/tool_server.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from abc import ABC, abstractmethod from contextlib import AbstractAsyncContextManager, asynccontextmanager from typing import TYPE_CHECKING, Any from openai_harmony import ToolDescription, ToolNamespaceConfig from vllm.entrypoints.tool import HarmonyBrowserTool, HarmonyPythonTool, Tool from vllm.logger import init_logger logger = init_logger(__name__) if TYPE_CHECKING: from mcp.types import ListToolsResult async def list_server_and_tools(server_url: str): from mcp import ClientSession from mcp.client.sse import sse_client async with ( sse_client(url=server_url) as streams, ClientSession(*streams) as session, ): initialize_response = await session.initialize() list_tools_response = await session.list_tools() return initialize_response, list_tools_response def trim_schema(schema: dict) -> dict: # Turn JSON Schema from MCP generated into Harmony's variant. if "title" in schema: del schema["title"] if "default" in schema and schema["default"] is None: del schema["default"] if "anyOf" in schema: # Turn "anyOf": [{"type": "type-1"}, {"type": "type-2"}] # into "type": ["type-1", "type-2"] # if there's more than 1 types, also remove "null" type as Harmony will # just ignore it types = [ type_dict["type"] for type_dict in schema["anyOf"] if type_dict["type"] != "null" ] schema["type"] = types del schema["anyOf"] if "properties" in schema: schema["properties"] = { k: trim_schema(v) for k, v in schema["properties"].items() } return schema def post_process_tools_description( list_tools_result: "ListToolsResult", ) -> "ListToolsResult": # Adapt the MCP tool result for Harmony for tool in list_tools_result.tools: tool.inputSchema = trim_schema(tool.inputSchema) # Some tools schema don't need to be part of the prompt (e.g. simple text # in text out for Python) list_tools_result.tools = [ tool for tool in list_tools_result.tools if getattr(tool.annotations, "include_in_prompt", True) ] return list_tools_result class ToolServer(ABC): @abstractmethod def has_tool(self, tool_name: str) -> bool: """ Return True if the tool is supported, False otherwise. """ pass @abstractmethod def get_tool_description( self, tool_name: str, allowed_tools: list[str] | None = None ) -> ToolNamespaceConfig | None: """ Return the tool description for the given tool name. If the tool is not supported, return None. """ pass @abstractmethod def new_session( self, tool_name: str, session_id: str, headers: dict[str, str] | None = None ) -> AbstractAsyncContextManager[Any]: """ Create a session for the tool. """ ... class MCPToolServer(ToolServer): def __init__(self): try: import mcp # noqa: F401 except ImportError: raise ImportError( "mcp is not installed. Please run `pip install mcp` to use " "MCPToolServer." ) from None self.harmony_tool_descriptions = {} async def add_tool_server(self, server_url: str): tool_urls = server_url.split(",") self.harmony_tool_descriptions = {} self.urls: dict[str, str] = {} for url in tool_urls: url = f"http://{url}/sse" initialize_response, list_tools_response = await list_server_and_tools(url) list_tools_response = post_process_tools_description(list_tools_response) tool_from_mcp = ToolNamespaceConfig( name=initialize_response.serverInfo.name, description=initialize_response.instructions, tools=[ ToolDescription.new( name=tool.name, description=tool.description, parameters=tool.inputSchema, ) for tool in list_tools_response.tools ], ) self.harmony_tool_descriptions[tool_from_mcp.name] = tool_from_mcp if tool_from_mcp.name not in self.urls: self.urls[tool_from_mcp.name] = url else: logger.warning( "Tool %s already exists. Ignoring duplicate tool server %s", tool_from_mcp.name, url, ) logger.info( "MCPToolServer initialized with tools: %s", list(self.harmony_tool_descriptions.keys()), ) def has_tool(self, tool_name: str): return tool_name in self.harmony_tool_descriptions def get_tool_description( self, server_label: str, allowed_tools: list[str] | None = None, ) -> ToolNamespaceConfig | None: cfg = self.harmony_tool_descriptions.get(server_label) if cfg is None: return None # No restrictions: all tools from this MCP server if allowed_tools is None: return cfg filtered = [t for t in cfg.tools if t.name in allowed_tools] if not filtered: return None return ToolNamespaceConfig( name=cfg.name, description=cfg.description, tools=filtered, ) @asynccontextmanager async def new_session( self, tool_name: str, session_id: str, headers: dict[str, str] | None = None ): from mcp import ClientSession from mcp.client.sse import sse_client url = self.urls.get(tool_name) request_headers = {"x-session-id": session_id} if headers is not None: request_headers.update(headers) if not url: raise KeyError(f"Tool '{tool_name}' is not supported") async with ( sse_client(url=url, headers=request_headers) as streams, ClientSession(*streams) as session, ): await session.initialize() yield session class DemoToolServer(ToolServer): def __init__(self): self.tools: dict[str, Tool] = {} async def init_and_validate(self): browser_tool = HarmonyBrowserTool() python_tool = HarmonyPythonTool() await python_tool.validate() if browser_tool.enabled: self.tools["browser"] = browser_tool if python_tool.enabled: self.tools["python"] = python_tool logger.info( "DemoToolServer initialized with tools: %s", list(self.tools.keys()) ) def has_tool(self, tool_name: str) -> bool: return tool_name in self.tools def get_tool_description( self, tool_name: str, allowed_tools: list[str] | None = None ) -> ToolNamespaceConfig | None: if tool_name not in self.tools: return None if tool_name == "browser": return ToolNamespaceConfig.browser() elif tool_name == "python": return ToolNamespaceConfig.python() else: raise ValueError(f"Unknown tool {tool_name}") @asynccontextmanager async def new_session( self, tool_name: str, session_id: str, headers: dict[str, str] | None = None ): if tool_name not in self.tools: raise KeyError(f"Tool '{tool_name}' is not supported") yield self.tools[tool_name]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/constants.py
vllm/entrypoints/constants.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Shared constants for vLLM entrypoints. """ # HTTP header limits for h11 parser # These constants help mitigate header abuse attacks H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT = 4194304 # 4 MB H11_MAX_HEADER_COUNT_DEFAULT = 256 MCP_PREFIX = "mcp_"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/score_utils.py
vllm/entrypoints/score_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Any, TypeAlias, cast from torch.nn import CosineSimilarity from typing_extensions import Required, TypedDict from vllm.config import ModelConfig from vllm.entrypoints.chat_utils import ( BaseMultiModalItemTracker, ChatCompletionContentPartImageEmbedsParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartTextParam, ChatTemplateResolutionError, MultiModalItemTracker, _ContentPart, _parse_chat_message_content_part, apply_hf_chat_template, ) from vllm.inputs import TokensPrompt from vllm.model_executor.models.interfaces import supports_score_template from vllm.multimodal.inputs import MultiModalDataDict from vllm.outputs import PoolingRequestOutput from vllm.tokenizers import TokenizerLike ScoreContentPartParam: TypeAlias = ( ChatCompletionContentPartImageParam | ChatCompletionContentPartImageEmbedsParam | ChatCompletionContentPartTextParam ) class ScoreMultiModalParam(TypedDict, total=False): """ A specialized parameter type for scoring multimodal content The reasons why don't reuse `CustomChatCompletionMessageParam` directly: 1. Score tasks don't need the 'role' field (user/assistant/system) that's required in chat completions 2. Including chat-specific fields would confuse users about their purpose in scoring 3. This is a more focused interface that only exposes what's needed for scoring """ # noqa: E501 content: Required[list[ScoreContentPartParam]] """The multimodal contents""" def _cosine_similarity( tokenizer: TokenizerLike, embed_1: list[PoolingRequestOutput], embed_2: list[PoolingRequestOutput], ) -> list[PoolingRequestOutput]: scorer = CosineSimilarity(0) scores: list[PoolingRequestOutput] = [] for emb_1, emb_2 in zip(embed_1, embed_2): pair_score = scorer(emb_1.outputs.data, emb_2.outputs.data) padding: list[int] = [] if (pad_token_id := tokenizer.pad_token_id) is not None: padding = [pad_token_id] tokens = emb_1.prompt_token_ids + padding + emb_2.prompt_token_ids scores.append( PoolingRequestOutput( request_id=f"{emb_1.request_id}_{emb_2.request_id}", outputs=pair_score, prompt_token_ids=tokens, num_cached_tokens=emb_1.num_cached_tokens + emb_2.num_cached_tokens, finished=True, ) ) return scores def _validate_score_input_lens( data_1: list[str] | list[ScoreContentPartParam], data_2: list[str] | list[ScoreContentPartParam], ): len_1 = len(data_1) len_2 = len(data_2) if len_1 > 1 and len_1 != len_2: raise ValueError("Input lengths must be either 1:1, 1:N or N:N") if len_1 == 0: raise ValueError("At least one text element must be given") if len_2 == 0: raise ValueError("At least one text_pair element must be given") def parse_score_data( data_1: str | ScoreContentPartParam, data_2: str | ScoreContentPartParam, model_config: ModelConfig, ) -> tuple[str, str, MultiModalDataDict | None]: mm_tracker = MultiModalItemTracker(model_config) content_1 = _parse_score_content(data_1, mm_tracker) content_2 = _parse_score_content(data_2, mm_tracker) def ensure_str(content: _ContentPart | None) -> str: if content is not None and isinstance(content, str): return cast(str, content) else: raise ValueError(f"Only string content is supported, but got {content}.") prompt_1 = ensure_str(content_1) prompt_2 = ensure_str(content_2) return prompt_1, prompt_2, mm_tracker.all_mm_data() def _parse_score_content( data: str | ScoreContentPartParam, mm_tracker: BaseMultiModalItemTracker, ) -> _ContentPart | None: if isinstance(data, str): part = ChatCompletionContentPartTextParam(type="text", text=data) else: part = data mm_parser = mm_tracker.create_parser() parse_res = _parse_chat_message_content_part( part, mm_parser, wrap_dicts=False, interleave_strings=False, ) if parse_res: return parse_res mm_placeholder_storage = mm_parser.mm_placeholder_storage() if ( len(mm_placeholder_storage) != 1 or len(next(iter(mm_placeholder_storage.values()))) != 1 ): raise ValueError("Only one multi-modal item is supported") return next(iter(mm_placeholder_storage.values()))[0] def _apply_model_score_template( model_config: ModelConfig, prompt_1: str, prompt_2: str ) -> str: # NOTE(Simon): lazy import to avoid bring in all dependencies (e.g. gguf) from vllm.model_executor.model_loader import get_model_cls model = get_model_cls(model_config) if supports_score_template(model): full_prompt = model.get_score_template(prompt_1, prompt_2) if full_prompt is None: raise ValueError("Get empty score template from model") return full_prompt raise ValueError(f"Unsupported model architecture: {model_config.architecture}") def post_process_tokens( model_config: ModelConfig, prompt: TokensPrompt, ) -> None: """ Perform architecture-specific manipulations on the input tokens. Note: This is an in-place operation. """ # NOTE(Simon): lazy import to avoid bring in all dependencies (e.g. gguf) from vllm.model_executor.model_loader import get_model_cls model = get_model_cls(model_config) if supports_score_template(model): model.post_process_tokens(prompt) def get_score_prompt( model_config: ModelConfig, tokenizer: TokenizerLike, tokenization_kwargs: dict[str, Any], data_1: str | ScoreContentPartParam, data_2: str | ScoreContentPartParam, score_template: str | None = None, ) -> tuple[str, TokensPrompt]: prompt_1, prompt_2, mm_data = parse_score_data( data_1, data_2, model_config, ) from vllm.model_executor.model_loader import get_model_cls model = get_model_cls(model_config) def default_tokenizer_encode(): if supports_score_template(model): full_prompt = _apply_model_score_template(model_config, prompt_1, prompt_2) prompt_inputs = tokenizer(full_prompt, **tokenization_kwargs) else: if model_config.use_pad_token: # cross_encoder models defaults to using pad_token. prompt_inputs = tokenizer( text=prompt_1, text_pair=prompt_2, **tokenization_kwargs ) full_prompt = tokenizer.decode(prompt_inputs["input_ids"]) else: # `llm as reranker` models defaults to not using pad_token. full_prompt = prompt_1 + prompt_2 prompt_inputs = tokenizer(text=full_prompt, **tokenization_kwargs) return full_prompt, prompt_inputs # FIXME: For now, we only apply a template when one is explicitly provided. # We cannot rely on the tokenizer's chat template because many models # inherit junk templates from their base LLM, which breaks both the models # and the tests that use them. if score_template is None: full_prompt, prompt_inputs = default_tokenizer_encode() else: # FIXME: Try applying a score template from the CLI arg or tokenizer_config.json # If that fails because there is no such template, # fall back to the default implementation. try: full_prompt = apply_hf_chat_template( tokenizer, [ {"role": "query", "content": prompt_1}, {"role": "document", "content": prompt_2}, ], score_template, tools=None, model_config=model_config, ) prompt_inputs = tokenizer(full_prompt, **tokenization_kwargs) except ChatTemplateResolutionError: full_prompt, prompt_inputs = default_tokenizer_encode() engine_prompt = TokensPrompt(prompt_token_ids=prompt_inputs["input_ids"]) if (token_type_ids := prompt_inputs.get("token_type_ids")) is not None: engine_prompt["token_type_ids"] = token_type_ids post_process_tokens(model_config, engine_prompt) if mm_data is not None: engine_prompt["multi_modal_data"] = mm_data return full_prompt, engine_prompt def compress_token_type_ids(token_type_ids: list[int]) -> int: """ Return position of the first 1 or the length of the list if not found. """ first_one = len(token_type_ids) err_msg = ( "Token type ids are expected to be a sequence" " of zeros followed by a sequence of ones" ) for i, type_id in enumerate(token_type_ids): if type_id == 0 and first_one < i: raise ValueError(err_msg) elif type_id == 1 and first_one > i: first_one = i elif type_id > 1: raise ValueError(err_msg) return first_one
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/llm.py
vllm/entrypoints/llm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Any, cast import cloudpickle import torch.nn as nn from pydantic import ValidationError from tqdm.auto import tqdm from typing_extensions import TypeVar from vllm.beam_search import ( BeamSearchInstance, BeamSearchOutput, BeamSearchSequence, create_sort_beams_key_function, ) from vllm.config import ( AttentionConfig, CompilationConfig, PoolerConfig, ProfilerConfig, StructuredOutputsConfig, is_init_field, ) from vllm.config.compilation import CompilationMode from vllm.config.model import ( ConvertOption, HfOverrides, ModelDType, RunnerOption, TokenizerMode, ) from vllm.engine.arg_utils import EngineArgs from vllm.entrypoints.chat_utils import ( ChatCompletionMessageParam, ChatTemplateContentFormatOption, apply_hf_chat_template, apply_mistral_chat_template, parse_chat_messages, resolve_chat_template_content_format, ) from vllm.entrypoints.score_utils import ( ScoreContentPartParam, ScoreMultiModalParam, _cosine_similarity, _validate_score_input_lens, compress_token_type_ids, get_score_prompt, ) from vllm.entrypoints.utils import _validate_truncation_size, log_non_default_args from vllm.inputs import ( DataPrompt, PromptType, SingletonPrompt, TextPrompt, TokensPrompt, ) from vllm.inputs.parse import get_prompt_components from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.quantization import QuantizationMethods from vllm.outputs import ( ClassificationRequestOutput, EmbeddingRequestOutput, PoolingRequestOutput, RequestOutput, ScoringRequestOutput, ) from vllm.platforms import current_platform from vllm.pooling_params import PoolingParams from vllm.sampling_params import BeamSearchParams, RequestOutputKind, SamplingParams from vllm.tasks import PoolingTask from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer from vllm.usage.usage_lib import UsageContext from vllm.utils.collection_utils import as_iter, is_list_of from vllm.utils.counter import Counter from vllm.v1.engine import EngineCoreRequest from vllm.v1.engine.llm_engine import LLMEngine from vllm.v1.sample.logits_processor import LogitsProcessor if TYPE_CHECKING: from vllm.v1.metrics.reader import Metric logger = init_logger(__name__) _R = TypeVar("_R", default=Any) class LLM: """An LLM for generating texts from given prompts and sampling parameters. This class includes a tokenizer, a language model (possibly distributed across multiple GPUs), and GPU memory space allocated for intermediate states (aka KV cache). Given a batch of prompts and sampling parameters, this class generates texts from the model, using an intelligent batching mechanism and efficient memory management. Args: model: The name or path of a HuggingFace Transformers model. tokenizer: The name or path of a HuggingFace Transformers tokenizer. tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer if available, and "slow" will always use the slow tokenizer. skip_tokenizer_init: If true, skip initialization of tokenizer and detokenizer. Expect valid prompt_token_ids and None for prompt from the input. trust_remote_code: Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer. allowed_local_media_path: Allowing API requests to read local images or videos from directories specified by the server file system. This is a security risk. Should only be enabled in trusted environments. allowed_media_domains: If set, only media URLs that belong to this domain can be used for multi-modal inputs. tensor_parallel_size: The number of GPUs to use for distributed execution with tensor parallelism. dtype: The data type for the model weights and activations. Currently, we support `float32`, `float16`, and `bfloat16`. If `auto`, we use the `dtype` attribute of the Transformers model's config. However, if the `dtype` in the config is `float32`, we will use `float16` instead. quantization: The method used to quantize the model weights. Currently, we support "awq", "gptq", and "fp8" (experimental). If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. revision: The specific model version to use. It can be a branch name, a tag name, or a commit id. tokenizer_revision: The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. seed: The seed to initialize the random number generator for sampling. gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache. Higher values will increase the KV cache size and thus improve the model's throughput. However, if the value is too high, it may cause out-of- memory (OOM) errors. kv_cache_memory_bytes: Size of KV Cache per GPU in bytes. By default, this is set to None and vllm can automatically infer the kv cache size based on gpu_memory_utilization. However, users may want to manually specify the kv cache memory size. kv_cache_memory_bytes allows more fine-grain control of how much memory gets used when compared with using gpu_memory_utilization. Note that kv_cache_memory_bytes (when not-None) ignores gpu_memory_utilization swap_space: The size (GiB) of CPU memory per GPU to use as swap space. This can be used for temporarily storing the states of the requests when their `best_of` sampling parameters are larger than 1. If all requests will have `best_of=1`, you can safely set this to 0. Noting that `best_of` is only supported in V0. Otherwise, too small values may cause out-of-memory (OOM) errors. cpu_offload_gb: The size (GiB) of CPU memory to use for offloading the model weights. This virtually increases the GPU memory space you can use to hold the model weights, at the cost of CPU-GPU data transfer for every forward pass. enforce_eager: Whether to enforce eager execution. If True, we will disable CUDA graph and always execute the model in eager mode. If False, we will use CUDA graph and eager execution in hybrid. disable_custom_all_reduce: See [ParallelConfig][vllm.config.ParallelConfig]. hf_token: The token to use as HTTP bearer authorization for remote files . If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). hf_overrides: If a dictionary, contains arguments to be forwarded to the HuggingFace config. If a callable, it is called to update the HuggingFace config. mm_processor_kwargs: Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. Overrides for the multi-modal processor obtained from `AutoProcessor.from_pretrained`. The available overrides depend on the model that is being run. For example, for Phi-3-Vision: `{"num_crops": 4}`. pooler_config: Initialize non-default pooling config for the pooling model. e.g. `PoolerConfig(pooling_type="mean", normalize=False)`. compilation_config: Either an integer or a dictionary. If it is an integer, it is used as the mode of compilation optimization. If it is a dictionary, it can specify the full compilation configuration. attention_config: Configuration for attention mechanisms. Can be a dictionary or an AttentionConfig instance. If a dictionary, it will be converted to an AttentionConfig. Allows specifying the attention backend and other attention-related settings. **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs]. Note: This class is intended to be used for offline inference. For online serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead. """ def __init__( self, model: str, *, runner: RunnerOption = "auto", convert: ConvertOption = "auto", tokenizer: str | None = None, tokenizer_mode: TokenizerMode | str = "auto", skip_tokenizer_init: bool = False, trust_remote_code: bool = False, allowed_local_media_path: str = "", allowed_media_domains: list[str] | None = None, tensor_parallel_size: int = 1, dtype: ModelDType = "auto", quantization: QuantizationMethods | None = None, revision: str | None = None, tokenizer_revision: str | None = None, seed: int = 0, gpu_memory_utilization: float = 0.9, swap_space: float = 4, cpu_offload_gb: float = 0, enforce_eager: bool = False, disable_custom_all_reduce: bool = False, hf_token: bool | str | None = None, hf_overrides: HfOverrides | None = None, mm_processor_kwargs: dict[str, Any] | None = None, pooler_config: PoolerConfig | None = None, structured_outputs_config: dict[str, Any] | StructuredOutputsConfig | None = None, profiler_config: dict[str, Any] | ProfilerConfig | None = None, attention_config: dict[str, Any] | AttentionConfig | None = None, kv_cache_memory_bytes: int | None = None, compilation_config: int | dict[str, Any] | CompilationConfig | None = None, logits_processors: list[str | type[LogitsProcessor]] | None = None, **kwargs: Any, ) -> None: """LLM constructor.""" if "disable_log_stats" not in kwargs: kwargs["disable_log_stats"] = True if "worker_cls" in kwargs: worker_cls = kwargs["worker_cls"] # if the worker_cls is not qualified string name, # we serialize it using cloudpickle to avoid pickling issues if isinstance(worker_cls, type): kwargs["worker_cls"] = cloudpickle.dumps(worker_cls) if "kv_transfer_config" in kwargs and isinstance( kwargs["kv_transfer_config"], dict ): from vllm.config.kv_transfer import KVTransferConfig raw_config_dict = kwargs["kv_transfer_config"] try: kwargs["kv_transfer_config"] = KVTransferConfig(**raw_config_dict) except ValidationError as e: logger.error( "Failed to convert 'kv_transfer_config' dict to " "KVTransferConfig object. Dict: %s. Error: %s", raw_config_dict, e, ) # Consider re-raising a more specific vLLM error or ValueError # to provide better context to the user. raise ValueError(f"Invalid 'kv_transfer_config' provided: {e}") from e if hf_overrides is None: hf_overrides = {} def _make_config(value: Any, cls: type[_R]) -> _R: """Convert dict/None/instance to a config instance.""" if value is None: return cls() if isinstance(value, dict): return cls(**{k: v for k, v in value.items() if is_init_field(cls, k)}) # type: ignore[arg-type] return value if isinstance(compilation_config, int): compilation_config_instance = CompilationConfig( mode=CompilationMode(compilation_config) ) else: compilation_config_instance = _make_config( compilation_config, CompilationConfig ) structured_outputs_instance = _make_config( structured_outputs_config, StructuredOutputsConfig ) profiler_config_instance = _make_config(profiler_config, ProfilerConfig) attention_config_instance = _make_config(attention_config, AttentionConfig) # warn about single-process data parallel usage. _dp_size = int(kwargs.get("data_parallel_size", 1)) _distributed_executor_backend = kwargs.get("distributed_executor_backend") if ( _dp_size > 1 and not _distributed_executor_backend == "external_launcher" and not current_platform.is_tpu() ): raise ValueError( f"LLM(data_parallel_size={_dp_size}) is not supported for single-" "process usage and may hang. Please use " "the explicit multi-process data-parallel example at " "'examples/offline_inference/data_parallel.py'." ) engine_args = EngineArgs( model=model, runner=runner, convert=convert, tokenizer=tokenizer, tokenizer_mode=tokenizer_mode, skip_tokenizer_init=skip_tokenizer_init, trust_remote_code=trust_remote_code, allowed_local_media_path=allowed_local_media_path, allowed_media_domains=allowed_media_domains, tensor_parallel_size=tensor_parallel_size, dtype=dtype, quantization=quantization, revision=revision, tokenizer_revision=tokenizer_revision, seed=seed, gpu_memory_utilization=gpu_memory_utilization, kv_cache_memory_bytes=kv_cache_memory_bytes, swap_space=swap_space, cpu_offload_gb=cpu_offload_gb, enforce_eager=enforce_eager, disable_custom_all_reduce=disable_custom_all_reduce, hf_token=hf_token, hf_overrides=hf_overrides, mm_processor_kwargs=mm_processor_kwargs, pooler_config=pooler_config, structured_outputs_config=structured_outputs_instance, profiler_config=profiler_config_instance, attention_config=attention_config_instance, compilation_config=compilation_config_instance, logits_processors=logits_processors, **kwargs, ) log_non_default_args(engine_args) self.llm_engine = LLMEngine.from_engine_args( engine_args=engine_args, usage_context=UsageContext.LLM_CLASS ) self.engine_class = type(self.llm_engine) self.request_counter = Counter() self.default_sampling_params: dict[str, Any] | None = None supported_tasks = self.llm_engine.get_supported_tasks() logger.info("Supported tasks: %s", supported_tasks) self.supported_tasks = supported_tasks self.model_config = self.llm_engine.model_config self.input_processor = self.llm_engine.input_processor self.io_processor = self.llm_engine.io_processor def get_tokenizer(self) -> TokenizerLike: return self.llm_engine.get_tokenizer() def reset_mm_cache(self) -> None: self.input_processor.clear_mm_cache() self.llm_engine.reset_mm_cache() def get_default_sampling_params(self) -> SamplingParams: if self.default_sampling_params is None: self.default_sampling_params = self.model_config.get_diff_sampling_param() if self.default_sampling_params: return SamplingParams.from_optional(**self.default_sampling_params) return SamplingParams() def generate( self, prompts: PromptType | Sequence[PromptType], sampling_params: SamplingParams | Sequence[SamplingParams] | None = None, *, use_tqdm: bool | Callable[..., tqdm] = True, lora_request: list[LoRARequest] | LoRARequest | None = None, priority: list[int] | None = None, ) -> list[RequestOutput]: """Generates the completions for the input prompts. This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: The prompts to the LLM. You may pass a sequence of prompts for batch inference. See [PromptType][vllm.inputs.PromptType] for more details about the format of each prompt. sampling_params: The sampling parameters for text generation. If None, we use the default sampling parameters. When it is a single value, it is applied to every prompt. When it is a list, the list must have the same length as the prompts and it is paired one by one with the prompt. use_tqdm: If `True`, shows a tqdm progress bar. If a callable (e.g., `functools.partial(tqdm, leave=False)`), it is used to create the progress bar. If `False`, no progress bar is created. lora_request: LoRA request to use for generation, if any. priority: The priority of the requests, if any. Only applicable when priority scheduling policy is enabled. If provided, must be a list of integers matching the length of `prompts`, where each priority value corresponds to the prompt at the same index. Returns: A list of `RequestOutput` objects containing the generated completions in the same order as the input prompts. Note: Using `prompts` and `prompt_token_ids` as keyword parameters is considered legacy and may be deprecated in the future. You should instead pass them via the `inputs` parameter. """ model_config = self.model_config runner_type = model_config.runner_type if runner_type != "generate": raise ValueError( "LLM.generate() is only supported for generative models. " "Try passing `--runner generate` to use the model as a " "generative model." ) if sampling_params is None: # Use default sampling params. sampling_params = self.get_default_sampling_params() # Add any modality specific loras to the corresponding prompts lora_request = self._get_modality_specific_lora_reqs(prompts, lora_request) self._validate_and_add_requests( prompts=prompts, params=sampling_params, use_tqdm=use_tqdm, lora_request=lora_request, priority=priority, ) outputs = self._run_engine(use_tqdm=use_tqdm) return self.engine_class.validate_outputs(outputs, RequestOutput) def _get_modality_specific_lora_reqs( self, prompts: PromptType | Sequence[PromptType], lora_request: list[LoRARequest] | LoRARequest | None, ): # Grab the lora config off the vllm config on the engine, # since this is the same for both v0 & v1. lora_config = self.llm_engine.vllm_config.lora_config # If there's no lora config / default_mm_loras, or the model # isn't multimodal, leave the lora as is. if ( lora_config is None or not self.model_config.is_multimodal_model or (lora_config and lora_config.default_mm_loras is None) ): return lora_request if not isinstance(prompts, Sequence) or isinstance(prompts, str): prompts = [prompts] optional_loras = ( [lora_request] * len(prompts) if not isinstance(lora_request, Sequence) else lora_request ) return [ self._resolve_single_prompt_mm_lora( prompt, opt_lora_req, lora_config.default_mm_loras, ) for prompt, opt_lora_req in zip(prompts, optional_loras) ] def _resolve_single_prompt_mm_lora( self, prompt: PromptType, lora_request: LoRARequest | None, default_mm_loras: dict[str, str] | None, ): if ( not default_mm_loras or not isinstance(prompt, dict) or not (mm_data := prompt.get("multi_modal_data") or {}) ): return lora_request intersection = set( mm_data.keys() # type: ignore ).intersection(default_mm_loras.keys()) if not intersection: return lora_request if len(intersection) > 1: # TODO: Would be nice to be able to have multiple loras per prompt logger.warning( "Multiple modality specific loras were registered and would be" " used by a single prompt consuming several modalities; " " currently we only support one lora per request; as such," " lora(s) registered with modalities: %s" " will be skipped", intersection, ) return lora_request # Build the LoRA request; the ID of the default mm lora is the # index of the modality name sorted alphabetically + 1. modality_name = intersection.pop() modality_lora_path = default_mm_loras[modality_name] modality_lora_id = sorted(default_mm_loras).index(modality_name) + 1 # If we have a collision, warn if there is a collision, # but always send the explicitly provided request. if lora_request: if lora_request.lora_int_id != modality_lora_id: logger.warning( "A modality with a registered lora and a lora_request " "with a different ID were provided; falling back to the " "lora_request as we only apply one LoRARequest per prompt" ) return lora_request return LoRARequest( modality_name, modality_lora_id, modality_lora_path, ) def collective_rpc( self, method: str | Callable[..., _R], timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ) -> list[_R]: """ Execute an RPC call on all workers. Args: method: Name of the worker method to execute, or a callable that is serialized and sent to all workers to execute. If the method is a callable, it should accept an additional `self` argument, in addition to the arguments passed in `args` and `kwargs`. The `self` argument will be the worker object. timeout: Maximum time in seconds to wait for execution. Raises a [`TimeoutError`][] on timeout. `None` means wait indefinitely. args: Positional arguments to pass to the worker method. kwargs: Keyword arguments to pass to the worker method. Returns: A list containing the results from each worker. Note: It is recommended to use this API to only pass control messages, and set up data-plane communication to pass data. """ return self.llm_engine.collective_rpc(method, timeout, args, kwargs) def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]: """ Run a function directly on the model inside each worker, returning the result for each of them. !!! warning To reduce the overhead of data transfer, avoid returning large arrays or tensors from this method. If you must return them, make sure you move them to CPU first to avoid taking up additional VRAM! """ return self.llm_engine.apply_model(func) def _get_beam_search_lora_requests( self, lora_request: list[LoRARequest] | LoRARequest | None, prompts: list[TokensPrompt | TextPrompt], ) -> list[LoRARequest | None]: """Get the optional lora request corresponding to each prompt.""" if isinstance(lora_request, Sequence) and len(lora_request) != len(prompts): raise ValueError( "Lora request list should be the same length as the prompts" ) if lora_request is None or isinstance(lora_request, LoRARequest): return [lora_request] * len(prompts) raise TypeError(f"Invalid lora_request type {type(lora_request)}") def beam_search( self, prompts: list[TokensPrompt | TextPrompt], params: BeamSearchParams, lora_request: list[LoRARequest] | LoRARequest | None = None, use_tqdm: bool = False, concurrency_limit: int | None = None, ) -> list[BeamSearchOutput]: """ Generate sequences using beam search. Args: prompts: A list of prompts. Each prompt can be a string or a list of token IDs. params: The beam search parameters. lora_request: LoRA request to use for generation, if any. use_tqdm: Whether to use tqdm to display the progress bar. concurrency_limit: The maximum number of concurrent requests. If None, the number of concurrent requests is unlimited. """ # TODO: how does beam search work together with length penalty, # frequency, penalty, and stopping criteria, etc.? beam_width = params.beam_width max_tokens = params.max_tokens temperature = params.temperature ignore_eos = params.ignore_eos length_penalty = params.length_penalty lora_requests = self._get_beam_search_lora_requests(lora_request, prompts) tokenizer = self.get_tokenizer() sort_beams_key = create_sort_beams_key_function( tokenizer.eos_token_id, length_penalty, ) if use_tqdm and concurrency_limit is not None: logger.warning( "Progress bar is not supported when using concurrency_limit. " "Disabling progress bar." ) use_tqdm = False if concurrency_limit is None: concurrency_limit = len(prompts) def create_tokens_prompt_from_beam(beam: BeamSearchSequence) -> TokensPrompt: token_prompt_kwargs: TokensPrompt = {"prompt_token_ids": beam.tokens} if beam.multi_modal_data is not None: token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data if beam.mm_processor_kwargs is not None: token_prompt_kwargs["mm_processor_kwargs"] = beam.mm_processor_kwargs return TokensPrompt(**token_prompt_kwargs) # generate 2 * beam_width candidates at each step # following the huggingface transformers implementation # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa beam_search_params = SamplingParams( logprobs=2 * beam_width, max_tokens=1, temperature=temperature, skip_clone=True, # Internal beam search, safe to skip clone ) instances: list[BeamSearchInstance] = [] for lora_req, prompt in zip(lora_requests, prompts): # Add multimodal processor kwargs & data mm_kwargs = {} if "multi_modal_data" in prompt: mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"] if "mm_processor_kwargs" in prompt: mm_kwargs["mm_processor_kwargs"] = prompt["mm_processor_kwargs"] if "prompt_token_ids" in prompt: prompt = cast(TokensPrompt, prompt) # Needed for mypy prompt_tokens = prompt["prompt_token_ids"] else: prompt_tokens = tokenizer.encode(prompt["prompt"]) instances.append( BeamSearchInstance( prompt_tokens, lora_request=lora_req, logprobs=None, **mm_kwargs, ), ) for prompt_start in range(0, len(prompts), concurrency_limit): instances_batch = instances[prompt_start : prompt_start + concurrency_limit] token_iter = range(max_tokens) if use_tqdm: token_iter = tqdm( token_iter, desc="Beam search", unit="token", unit_scale=False ) logger.warning( "The progress bar shows the upper bound on token steps and " "may finish early due to stopping conditions. It does not " "reflect instance-level progress." ) for _ in token_iter: all_beams: list[BeamSearchSequence] = list( sum((instance.beams for instance in instances_batch), []) ) pos = [0] + list( itertools.accumulate( len(instance.beams) for instance in instances_batch ) ) instance_start_and_end: list[tuple[int, int]] = list( zip(pos[:-1], pos[1:]) ) if len(all_beams) == 0: break # create corresponding batch entries for prompt & optional lora prompts_batch, lora_req_batch = zip( *[ (create_tokens_prompt_from_beam(beam), beam.lora_request) for beam in all_beams ] ) # only runs for one step # we don't need to use tqdm here output = self.generate( prompts_batch, sampling_params=beam_search_params, use_tqdm=False, lora_request=lora_req_batch, ) for (start, end), instance in zip( instance_start_and_end, instances_batch ): instance_new_beams = [] for i in range(start, end): current_beam = all_beams[i] result = output[i] if result.outputs[0].logprobs is not None: # if `result.outputs[0].logprobs` is None, it means # the sequence is completed because of the # max-model-len or abortion. we don't need to add # it to the new beams. logprobs = result.outputs[0].logprobs[0] for token_id, logprob_obj in logprobs.items(): new_beam = BeamSearchSequence( tokens=current_beam.tokens + [token_id], logprobs=current_beam.logprobs + [logprobs], lora_request=current_beam.lora_request, cum_logprob=current_beam.cum_logprob + logprob_obj.logprob, multi_modal_data=current_beam.multi_modal_data, mm_processor_kwargs=current_beam.mm_processor_kwargs, ) if ( token_id == tokenizer.eos_token_id and not ignore_eos ): instance.completed.append(new_beam) else: instance_new_beams.append(new_beam) sorted_beams = sorted( instance_new_beams, key=sort_beams_key, reverse=True )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/launcher.py
vllm/entrypoints/launcher.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import signal import socket from http import HTTPStatus from typing import Any import uvicorn from fastapi import FastAPI, Request, Response from vllm import envs from vllm.engine.protocol import EngineClient from vllm.entrypoints.constants import ( H11_MAX_HEADER_COUNT_DEFAULT, H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT, ) from vllm.entrypoints.ssl import SSLCertRefresher from vllm.logger import init_logger from vllm.utils.network_utils import find_process_using_port from vllm.v1.engine.exceptions import EngineDeadError, EngineGenerateError logger = init_logger(__name__) async def serve_http( app: FastAPI, sock: socket.socket | None, enable_ssl_refresh: bool = False, **uvicorn_kwargs: Any, ): """ Start a FastAPI app using Uvicorn, with support for custom Uvicorn config options. Supports http header limits via h11_max_incomplete_event_size and h11_max_header_count. """ logger.info("Available routes are:") for route in app.routes: methods = getattr(route, "methods", None) path = getattr(route, "path", None) if methods is None or path is None: continue logger.info("Route: %s, Methods: %s", path, ", ".join(methods)) # Extract header limit options if present h11_max_incomplete_event_size = uvicorn_kwargs.pop( "h11_max_incomplete_event_size", None ) h11_max_header_count = uvicorn_kwargs.pop("h11_max_header_count", None) # Set safe defaults if not provided if h11_max_incomplete_event_size is None: h11_max_incomplete_event_size = H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT if h11_max_header_count is None: h11_max_header_count = H11_MAX_HEADER_COUNT_DEFAULT config = uvicorn.Config(app, **uvicorn_kwargs) # Set header limits config.h11_max_incomplete_event_size = h11_max_incomplete_event_size config.h11_max_header_count = h11_max_header_count config.load() server = uvicorn.Server(config) _add_shutdown_handlers(app, server) loop = asyncio.get_running_loop() watchdog_task = loop.create_task(watchdog_loop(server, app.state.engine_client)) server_task = loop.create_task(server.serve(sockets=[sock] if sock else None)) ssl_cert_refresher = ( None if not enable_ssl_refresh else SSLCertRefresher( ssl_context=config.ssl, key_path=config.ssl_keyfile, cert_path=config.ssl_certfile, ca_path=config.ssl_ca_certs, ) ) def signal_handler() -> None: # prevents the uvicorn signal handler to exit early server_task.cancel() watchdog_task.cancel() if ssl_cert_refresher: ssl_cert_refresher.stop() async def dummy_shutdown() -> None: pass loop.add_signal_handler(signal.SIGINT, signal_handler) loop.add_signal_handler(signal.SIGTERM, signal_handler) try: await server_task return dummy_shutdown() except asyncio.CancelledError: port = uvicorn_kwargs["port"] process = find_process_using_port(port) if process is not None: logger.warning( "port %s is used by process %s launched with command:\n%s", port, process, " ".join(process.cmdline()), ) logger.info("Shutting down FastAPI HTTP server.") return server.shutdown() finally: watchdog_task.cancel() async def watchdog_loop(server: uvicorn.Server, engine: EngineClient): """ # Watchdog task that runs in the background, checking # for error state in the engine. Needed to trigger shutdown # if an exception arises is StreamingResponse() generator. """ VLLM_WATCHDOG_TIME_S = 5.0 while True: await asyncio.sleep(VLLM_WATCHDOG_TIME_S) terminate_if_errored(server, engine) def terminate_if_errored(server: uvicorn.Server, engine: EngineClient): """ See discussions here on shutting down a uvicorn server https://github.com/encode/uvicorn/discussions/1103 In this case we cannot await the server shutdown here because handler must first return to close the connection for this request. """ engine_errored = engine.errored and not engine.is_running if not envs.VLLM_KEEP_ALIVE_ON_ENGINE_DEATH and engine_errored: server.should_exit = True def _add_shutdown_handlers(app: FastAPI, server: uvicorn.Server) -> None: """ VLLM V1 AsyncLLM catches exceptions and returns only two types: EngineGenerateError and EngineDeadError. EngineGenerateError is raised by the per request generate() method. This error could be request specific (and therefore recoverable - e.g. if there is an error in input processing). EngineDeadError is raised by the background output_handler method. This error is global and therefore not recoverable. We register these @app.exception_handlers to return nice responses to the end user if they occur and shut down if needed. See https://fastapi.tiangolo.com/tutorial/handling-errors/ for more details on how exception handlers work. If an exception is encountered in a StreamingResponse generator, the exception is not raised, since we already sent a 200 status. Rather, we send an error message as the next chunk. Since the exception is not raised, this means that the server will not automatically shut down. Instead, we use the watchdog background task for check for errored state. """ @app.exception_handler(RuntimeError) @app.exception_handler(EngineDeadError) @app.exception_handler(EngineGenerateError) async def runtime_exception_handler(request: Request, __): terminate_if_errored( server=server, engine=request.app.state.engine_client, ) return Response(status_code=HTTPStatus.INTERNAL_SERVER_ERROR)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/logger.py
vllm/entrypoints/logger.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Sequence import torch from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.pooling_params import PoolingParams from vllm.sampling_params import BeamSearchParams, SamplingParams logger = init_logger(__name__) class RequestLogger: def __init__(self, *, max_log_len: int | None) -> None: self.max_log_len = max_log_len def log_inputs( self, request_id: str, prompt: str | None, prompt_token_ids: list[int] | None, prompt_embeds: torch.Tensor | None, params: SamplingParams | PoolingParams | BeamSearchParams | None, lora_request: LoRARequest | None, ) -> None: max_log_len = self.max_log_len if max_log_len is not None: if prompt is not None: prompt = prompt[:max_log_len] if prompt_token_ids is not None: prompt_token_ids = prompt_token_ids[:max_log_len] logger.debug( "Request %s details: prompt: %r, " "prompt_token_ids: %s, " "prompt_embeds shape: %s.", request_id, prompt, prompt_token_ids, prompt_embeds.shape if prompt_embeds is not None else None, ) logger.info( "Received request %s: params: %s, lora_request: %s.", request_id, params, lora_request, ) def log_outputs( self, request_id: str, outputs: str, output_token_ids: Sequence[int] | None, finish_reason: str | None = None, is_streaming: bool = False, delta: bool = False, ) -> None: max_log_len = self.max_log_len if max_log_len is not None: if outputs is not None: outputs = outputs[:max_log_len] if output_token_ids is not None: # Convert to list and apply truncation output_token_ids = list(output_token_ids)[:max_log_len] stream_info = "" if is_streaming: stream_info = " (streaming delta)" if delta else " (streaming complete)" logger.info( "Generated response %s%s: output: %r, " "output_token_ids: %s, finish_reason: %s", request_id, stream_info, outputs, output_token_ids, finish_reason, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/utils.py
vllm/entrypoints/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import dataclasses import functools import os from argparse import Namespace from pathlib import Path from typing import Any from fastapi import Request from fastapi.responses import JSONResponse, StreamingResponse from starlette.background import BackgroundTask, BackgroundTasks from vllm.config import ModelConfig from vllm.engine.arg_utils import EngineArgs from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ( load_chat_template, resolve_hf_chat_template, resolve_mistral_chat_template, ) from vllm.entrypoints.openai.cli_args import make_arg_parser from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, CompletionRequest, StreamOptions, ) from vllm.entrypoints.openai.serving_models import LoRAModulePath from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.argparse_utils import FlexibleArgumentParser logger = init_logger(__name__) VLLM_SUBCMD_PARSER_EPILOG = ( "For full list: vllm {subcmd} --help=all\n" "For a section: vllm {subcmd} --help=ModelConfig (case-insensitive)\n" # noqa: E501 "For a flag: vllm {subcmd} --help=max-model-len (_ or - accepted)\n" # noqa: E501 "Documentation: https://docs.vllm.ai\n" ) async def listen_for_disconnect(request: Request) -> None: """Returns if a disconnect message is received""" while True: message = await request.receive() if message["type"] == "http.disconnect": # If load tracking is enabled *and* the counter exists, decrement # it. Combines the previous nested checks into a single condition # to satisfy the linter rule. if getattr( request.app.state, "enable_server_load_tracking", False ) and hasattr(request.app.state, "server_load_metrics"): request.app.state.server_load_metrics -= 1 break def with_cancellation(handler_func): """Decorator that allows a route handler to be cancelled by client disconnections. This does _not_ use request.is_disconnected, which does not work with middleware. Instead this follows the pattern from starlette.StreamingResponse, which simultaneously awaits on two tasks- one to wait for an http disconnect message, and the other to do the work that we want done. When the first task finishes, the other is cancelled. A core assumption of this method is that the body of the request has already been read. This is a safe assumption to make for fastapi handlers that have already parsed the body of the request into a pydantic model for us. This decorator is unsafe to use elsewhere, as it will consume and throw away all incoming messages for the request while it looks for a disconnect message. In the case where a `StreamingResponse` is returned by the handler, this wrapper will stop listening for disconnects and instead the response object will start listening for disconnects. """ # Functools.wraps is required for this wrapper to appear to fastapi as a # normal route handler, with the correct request type hinting. @functools.wraps(handler_func) async def wrapper(*args, **kwargs): # The request is either the second positional arg or `raw_request` request = args[1] if len(args) > 1 else kwargs["raw_request"] handler_task = asyncio.create_task(handler_func(*args, **kwargs)) cancellation_task = asyncio.create_task(listen_for_disconnect(request)) done, pending = await asyncio.wait( [handler_task, cancellation_task], return_when=asyncio.FIRST_COMPLETED ) for task in pending: task.cancel() if handler_task in done: return handler_task.result() return None return wrapper def decrement_server_load(request: Request): request.app.state.server_load_metrics -= 1 def load_aware_call(func): @functools.wraps(func) async def wrapper(*args, **kwargs): raw_request = kwargs.get("raw_request", args[1] if len(args) > 1 else None) if raw_request is None: raise ValueError( "raw_request required when server load tracking is enabled" ) if not getattr(raw_request.app.state, "enable_server_load_tracking", False): return await func(*args, **kwargs) # ensure the counter exists if not hasattr(raw_request.app.state, "server_load_metrics"): raw_request.app.state.server_load_metrics = 0 raw_request.app.state.server_load_metrics += 1 try: response = await func(*args, **kwargs) except Exception: raw_request.app.state.server_load_metrics -= 1 raise if isinstance(response, (JSONResponse, StreamingResponse)): if response.background is None: response.background = BackgroundTask(decrement_server_load, raw_request) elif isinstance(response.background, BackgroundTasks): response.background.add_task(decrement_server_load, raw_request) elif isinstance(response.background, BackgroundTask): # Convert the single BackgroundTask to BackgroundTasks # and chain the decrement_server_load task to it tasks = BackgroundTasks() tasks.add_task( response.background.func, *response.background.args, **response.background.kwargs, ) tasks.add_task(decrement_server_load, raw_request) response.background = tasks else: raw_request.app.state.server_load_metrics -= 1 return response return wrapper def cli_env_setup(): # The safest multiprocessing method is `spawn`, as the default `fork` method # is not compatible with some accelerators. The default method will be # changing in future versions of Python, so we should use it explicitly when # possible. # # We only set it here in the CLI entrypoint, because changing to `spawn` # could break some existing code using vLLM as a library. `spawn` will cause # unexpected behavior if the code is not protected by # `if __name__ == "__main__":`. # # References: # - https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # - https://pytorch.org/docs/stable/notes/multiprocessing.html#cuda-in-multiprocessing # - https://pytorch.org/docs/stable/multiprocessing.html#sharing-cuda-tensors # - https://docs.habana.ai/en/latest/PyTorch/Getting_Started_with_PyTorch_and_Gaudi/Getting_Started_with_PyTorch.html?highlight=multiprocessing#torch-multiprocessing-for-dataloaders if "VLLM_WORKER_MULTIPROC_METHOD" not in os.environ: logger.debug("Setting VLLM_WORKER_MULTIPROC_METHOD to 'spawn'") os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" def _validate_truncation_size( max_model_len: int, truncate_prompt_tokens: int | None, tokenization_kwargs: dict[str, Any] | None = None, ) -> int | None: if truncate_prompt_tokens is not None: if truncate_prompt_tokens <= -1: truncate_prompt_tokens = max_model_len if truncate_prompt_tokens > max_model_len: raise ValueError( f"truncate_prompt_tokens value ({truncate_prompt_tokens}) " f"is greater than max_model_len ({max_model_len})." f" Please, select a smaller truncation size." ) if tokenization_kwargs is not None: tokenization_kwargs["truncation"] = True tokenization_kwargs["max_length"] = truncate_prompt_tokens else: if tokenization_kwargs is not None: tokenization_kwargs["truncation"] = False return truncate_prompt_tokens def get_max_tokens( max_model_len: int, request: ChatCompletionRequest | CompletionRequest, input_length: int, default_sampling_params: dict, ) -> int: max_tokens = getattr(request, "max_completion_tokens", None) or request.max_tokens default_max_tokens = max_model_len - input_length max_output_tokens = current_platform.get_max_output_tokens(input_length) return min( val for val in ( default_max_tokens, max_tokens, max_output_tokens, default_sampling_params.get("max_tokens"), ) if val is not None ) def log_non_default_args(args: Namespace | EngineArgs): non_default_args = {} # Handle Namespace if isinstance(args, Namespace): parser = make_arg_parser(FlexibleArgumentParser()) for arg, default in vars(parser.parse_args([])).items(): if default != getattr(args, arg): non_default_args[arg] = getattr(args, arg) # Handle EngineArgs instance elif isinstance(args, EngineArgs): default_args = EngineArgs(model=args.model) # Create default instance for field in dataclasses.fields(args): current_val = getattr(args, field.name) default_val = getattr(default_args, field.name) if current_val != default_val: non_default_args[field.name] = current_val if default_args.model != EngineArgs.model: non_default_args["model"] = default_args.model else: raise TypeError( "Unsupported argument type. Must be Namespace or EngineArgs instance." ) logger.info("non-default args: %s", non_default_args) def should_include_usage( stream_options: StreamOptions | None, enable_force_include_usage: bool ) -> tuple[bool, bool]: if stream_options: include_usage = stream_options.include_usage or enable_force_include_usage include_continuous_usage = include_usage and bool( stream_options.continuous_usage_stats ) else: include_usage, include_continuous_usage = enable_force_include_usage, False return include_usage, include_continuous_usage def process_lora_modules( args_lora_modules: list[LoRAModulePath], default_mm_loras: dict[str, str] | None ) -> list[LoRAModulePath]: lora_modules = args_lora_modules if default_mm_loras: default_mm_lora_paths = [ LoRAModulePath( name=modality, path=lora_path, ) for modality, lora_path in default_mm_loras.items() ] if args_lora_modules is None: lora_modules = default_mm_lora_paths else: lora_modules += default_mm_lora_paths return lora_modules async def process_chat_template( args_chat_template: Path | str | None, engine_client: EngineClient, model_config: ModelConfig, ) -> str | None: resolved_chat_template = load_chat_template(args_chat_template) if resolved_chat_template is not None: # Get the tokenizer to check official template tokenizer = await engine_client.get_tokenizer() if isinstance(tokenizer, MistralTokenizer): # The warning is logged in resolve_mistral_chat_template. resolved_chat_template = resolve_mistral_chat_template( chat_template=resolved_chat_template ) else: hf_chat_template = resolve_hf_chat_template( tokenizer=tokenizer, chat_template=None, tools=None, model_config=model_config, ) if hf_chat_template != resolved_chat_template: logger.warning( "Using supplied chat template: %s\n" "It is different from official chat template '%s'. " "This discrepancy may lead to performance degradation.", resolved_chat_template, model_config.model, ) return resolved_chat_template
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/__init__.py
vllm/entrypoints/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/renderer.py
vllm/entrypoints/renderer.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import io from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Annotated import pybase64 import torch from pydantic import Field from vllm.config import ModelConfig from vllm.entrypoints.openai.protocol import VLLMValidationError from vllm.inputs.data import EmbedsPrompt, TextPrompt, TokensPrompt from vllm.inputs.parse import get_prompt_components, parse_raw_prompts from vllm.tokenizers import TokenizerLike from vllm.utils.async_utils import AsyncMicrobatchTokenizer @dataclass(frozen=True) class RenderConfig: """Configuration to control how prompts are prepared.""" max_length: int | None = None """Maximum allowable total input token length. If provided, token inputs longer than this raise `ValueError`.""" truncate_prompt_tokens: int | None = None """Number of tokens to keep. `None` means no truncation. `0` yields an empty list (and skips embeds). `-1` maps to `model_config.max_model_len`.""" add_special_tokens: bool = True """Whether to add model-specific special tokens during tokenization.""" cache_salt: str | None = None """String to disambiguate prefix cache entries.""" needs_detokenization: bool | None = False """If True, detokenize IDs back to text for inclusion in outputs.""" def verify_truncate_prompt_tokens(self, model_config: ModelConfig) -> int | None: """Validate and normalize `truncate_prompt_tokens` parameter.""" truncate_prompt_tokens = self.truncate_prompt_tokens if truncate_prompt_tokens is None or truncate_prompt_tokens == 0: return truncate_prompt_tokens if truncate_prompt_tokens < 0: truncate_prompt_tokens = model_config.max_model_len max_length = self.max_length if max_length is not None and truncate_prompt_tokens > max_length: # type: ignore[operator] raise ValueError( f"{truncate_prompt_tokens=} cannot be greater than " f"{max_length=}. Please select a smaller truncation size." ) return truncate_prompt_tokens class BaseRenderer(ABC): """ Base class for unified input processing and rendering. The Renderer serves as a unified input processor that consolidates tokenization, chat template formatting, and multimodal input handling into a single component. It converts high-level API requests (OpenAI-style JSON) into token IDs and multimodal features ready for engine consumption. Key responsibilities: - Convert text prompts to token sequences with proper special tokens - Apply chat templates and format conversations - Handle multimodal inputs (images, audio, etc.) when applicable - Manage prompt truncation and length validation - Provide clean separation between API layer and engine core """ def __init__( self, model_config: ModelConfig, tokenizer: TokenizerLike | None = None, ): super().__init__() self.model_config = model_config self.tokenizer = tokenizer @abstractmethod async def render_prompt( self, *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]], config: RenderConfig, ) -> list[TokensPrompt]: """ Convert text or token inputs into engine-ready TokensPrompt objects. This method accepts text or token inputs and produces a list of [`TokensPrompt`][vllm.inputs.data.TokensPrompt] objects for the engine. Args: prompt_or_prompts: One of: - `str`: Single text prompt. - `list[str]`: Batch of text prompts. - `list[int]`: Single pre-tokenized sequence. - `list[list[int]]`: Batch of pre-tokenized sequences. config: Render configuration controlling how prompts are prepared (e.g., tokenization and length handling). Returns: list[TokensPrompt]: Engine-ready token prompts. Raises: ValueError: If input formats are invalid or length limits exceeded. """ raise NotImplementedError @abstractmethod async def render_prompt_and_embeds( self, *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]] | None = None, prompt_embeds: bytes | list[bytes] | None = None, config: RenderConfig, ) -> list[TokensPrompt | EmbedsPrompt]: """ Convert text/token and/or base64-encoded embeddings inputs into engine-ready prompt objects using a unified RenderConfig. At least one of `prompt_or_prompts` or `prompt_embeds` must be provided and non-empty. If both are omitted or empty (e.g., empty string and empty list), a `ValueError` is raised. Args: prompt_or_prompts: Text or token inputs to include. prompt_embeds: Base64-encoded bytes (or list thereof) containing a torch-saved tensor to be used as prompt embeddings. config: Render configuration controlling how prompts are prepared (e.g., tokenization and length handling). Returns: list[Union[TokensPrompt, EmbedsPrompt]]: Engine-ready prompt objects. Raises: ValueError: If both `prompt_or_prompts` and `prompt_embeds` are omitted or empty (decoder prompt cannot be empty), or if length limits are exceeded. """ raise NotImplementedError def load_prompt_embeds( self, prompt_embeds: bytes | list[bytes], truncate_prompt_tokens: Annotated[int, Field(ge=0)] | None = None, cache_salt: str | None = None, ) -> list[EmbedsPrompt]: """Load and validate base64-encoded embeddings into prompt objects.""" if not self.model_config.enable_prompt_embeds: raise VLLMValidationError( "You must set `--enable-prompt-embeds` to input `prompt_embeds`.", parameter="prompt_embeds", ) def _load_and_validate_embed(embed: bytes) -> EmbedsPrompt: # Enable sparse tensor integrity checks to prevent out-of-bounds # writes from maliciously crafted tensors with torch.sparse.check_sparse_tensor_invariants(): tensor = torch.load( io.BytesIO(pybase64.b64decode(embed, validate=True)), weights_only=True, map_location=torch.device("cpu"), ) assert isinstance(tensor, torch.Tensor) and tensor.dtype in ( torch.float32, torch.bfloat16, torch.float16, ) tensor = tensor.to_dense() if tensor.dim() > 2: tensor = tensor.squeeze(0) assert tensor.dim() == 2 if truncate_prompt_tokens is not None: tensor = tensor[-truncate_prompt_tokens:] embeds_prompt = EmbedsPrompt(prompt_embeds=tensor) if cache_salt is not None: embeds_prompt["cache_salt"] = cache_salt return embeds_prompt if isinstance(prompt_embeds, list): return [_load_and_validate_embed(embed) for embed in prompt_embeds] return [_load_and_validate_embed(prompt_embeds)] class CompletionRenderer(BaseRenderer): def __init__( self, model_config: ModelConfig, tokenizer: TokenizerLike | None = None, async_tokenizer_pool: dict[TokenizerLike, AsyncMicrobatchTokenizer] | None = None, ): super().__init__(model_config, tokenizer) self.async_tokenizer_pool = async_tokenizer_pool self.async_tokenizer: AsyncMicrobatchTokenizer | None = None async def render_prompt( self, *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]], config: RenderConfig, ) -> list[TokensPrompt]: """Implementation of prompt rendering for completion-style requests. Uses async tokenizer pooling for improved performance. See base class for detailed parameter documentation. """ truncate_prompt_tokens = config.verify_truncate_prompt_tokens(self.model_config) if truncate_prompt_tokens == 0: return [] tasks = ( self._create_prompt( prompt_input, config=config, truncate_prompt_tokens=truncate_prompt_tokens, ) for prompt_input in parse_raw_prompts(prompt_or_prompts) ) return await asyncio.gather(*tasks) async def render_prompt_and_embeds( self, *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]] | None = None, prompt_embeds: bytes | list[bytes] | None = None, config: RenderConfig, ) -> list[TokensPrompt | EmbedsPrompt]: """ Render text/token prompts and/or precomputed embedding prompts. At least one of `prompt_or_prompts` or `prompt_embeds` must be provided. """ truncate_prompt_tokens = config.verify_truncate_prompt_tokens(self.model_config) if truncate_prompt_tokens == 0: return [] rendered: list[TokensPrompt | EmbedsPrompt] = [] if prompt_embeds is not None: rendered.extend( self.load_prompt_embeds( prompt_embeds, truncate_prompt_tokens, config.cache_salt ) ) if prompt_or_prompts is None or prompt_or_prompts == "": return rendered token_prompts = await self.render_prompt( prompt_or_prompts=prompt_or_prompts, config=config, ) rendered.extend(token_prompts) return rendered def _maybe_apply_truncation( self, token_ids: list[int], truncate_prompt_tokens: int | None ) -> list[int]: """Apply truncation to token sequence.""" if truncate_prompt_tokens is None: return token_ids if truncate_prompt_tokens >= len(token_ids): return token_ids return token_ids[-truncate_prompt_tokens:] async def _create_prompt( self, prompt_input: TextPrompt | TokensPrompt, config: RenderConfig, truncate_prompt_tokens: int | None, ) -> TokensPrompt: prompt, prompt_token_ids, _ = get_prompt_components(prompt_input) if prompt_token_ids is not None: # NOTE: detokenization is needed when echo is enabled, # where the input token IDs are decoded back to text. return await self._create_prompt_from_token_ids( prompt_token_ids, config.max_length, truncate_prompt_tokens, config.cache_salt, config.needs_detokenization, ) if prompt is not None: return await self._create_prompt_from_text( prompt, config.max_length, truncate_prompt_tokens, config.add_special_tokens, config.cache_salt, ) # TODO: Also handle embeds prompt using this method raise NotImplementedError async def _create_prompt_from_text( self, text: str, max_length: int | None, truncate_prompt_tokens: int | None, add_special_tokens: bool, cache_salt: str | None, ) -> TokensPrompt: """Tokenize text input asynchronously.""" async_tokenizer = self._get_async_tokenizer() # Handle encoder-specific preprocessing if ( self.model_config.encoder_config is not None and self.model_config.encoder_config.get("do_lower_case", False) ): text = text.lower() # Tokenize texts if truncate_prompt_tokens is None: encoded = await async_tokenizer(text, add_special_tokens=add_special_tokens) else: encoded = await async_tokenizer( text, add_special_tokens=add_special_tokens, truncation=True, max_length=truncate_prompt_tokens, ) return self._create_tokens_prompt( encoded.input_ids, max_length, cache_salt, text ) async def _create_prompt_from_token_ids( self, token_ids: list[int], max_length: int | None, truncate_prompt_tokens: int | None, cache_salt: str | None, needs_detokenization: bool | None = False, ) -> TokensPrompt: """Optionally detokenize token IDs and build a tokens prompt.""" token_ids = self._maybe_apply_truncation(token_ids, truncate_prompt_tokens) prompt = None if needs_detokenization: async_tokenizer = self._get_async_tokenizer() prompt = await async_tokenizer.decode(token_ids) return self._create_tokens_prompt( token_ids=token_ids, max_length=max_length, cache_salt=cache_salt, prompt=prompt, ) def _get_async_tokenizer(self) -> AsyncMicrobatchTokenizer: """Get or create async tokenizer using shared pool.""" async_tokenizer = self.async_tokenizer if async_tokenizer is not None: return async_tokenizer tokenizer = self.tokenizer if tokenizer is None: raise ValueError("No tokenizer available for text input processing") if self.async_tokenizer_pool is None: async_tokenizer = AsyncMicrobatchTokenizer(tokenizer) else: async_tokenizer = self.async_tokenizer_pool.get(tokenizer) if async_tokenizer is None: async_tokenizer = AsyncMicrobatchTokenizer(tokenizer) self.async_tokenizer_pool[tokenizer] = async_tokenizer self.async_tokenizer = async_tokenizer return async_tokenizer def _create_tokens_prompt( self, token_ids: list[int], max_length: int | None = None, cache_salt: str | None = None, prompt: str | None = None, ) -> TokensPrompt: """Create validated TokensPrompt.""" if max_length is not None and len(token_ids) > max_length: raise VLLMValidationError( f"This model's maximum context length is {max_length} tokens. " f"However, your request has {len(token_ids)} input tokens. " "Please reduce the length of the input messages.", parameter="input_tokens", value=len(token_ids), ) tokens_prompt = TokensPrompt(prompt_token_ids=token_ids) if cache_salt is not None: tokens_prompt["cache_salt"] = cache_salt if prompt is not None: tokens_prompt["prompt"] = prompt return tokens_prompt
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/context.py
vllm/entrypoints/context.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import contextlib import copy import json import logging from abc import ABC, abstractmethod from collections.abc import Callable from contextlib import AsyncExitStack from dataclasses import replace from typing import TYPE_CHECKING, Union from openai.types.responses.response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem, ) from openai.types.responses.tool import Mcp from openai_harmony import Author, Message, Role, StreamState, TextContent from vllm import envs from vllm.entrypoints.chat_utils import ( ChatTemplateContentFormatOption, ) from vllm.entrypoints.constants import MCP_PREFIX from vllm.entrypoints.openai.parser.harmony_utils import ( get_encoding, get_streamable_parser_for_assistant, render_for_completion, ) from vllm.entrypoints.openai.parser.responses_parser import ( get_responses_parser_for_simple_context, ) from vllm.entrypoints.openai.protocol import ( FunctionCall, ResponseInputOutputItem, ResponseRawMessageAndToken, ResponsesRequest, ) from vllm.entrypoints.responses_utils import construct_tool_dicts from vllm.entrypoints.tool import Tool from vllm.entrypoints.tool_server import ToolServer from vllm.outputs import RequestOutput from vllm.reasoning.abs_reasoning_parsers import ReasoningParser from vllm.tokenizers import TokenizerLike from vllm.tool_parsers.abstract_tool_parser import ToolParser from vllm.utils import random_uuid if TYPE_CHECKING: from mcp.client import ClientSession logger = logging.getLogger(__name__) # This is currently needed as the tool type doesn't 1:1 match the # tool namespace, which is what is used to look up the # connection to the tool server _TOOL_NAME_TO_TYPE_MAP = { "browser": "web_search_preview", "python": "code_interpreter", "container": "container", } def _map_tool_name_to_tool_type(tool_name: str) -> str: if tool_name not in _TOOL_NAME_TO_TYPE_MAP: available_tools = ", ".join(_TOOL_NAME_TO_TYPE_MAP.keys()) raise ValueError( f"Built-in tool name '{tool_name}' not defined in mapping. " f"Available tools: {available_tools}" ) return _TOOL_NAME_TO_TYPE_MAP[tool_name] class TurnMetrics: """Tracks token and toolcall details for a single conversation turn.""" def __init__( self, input_tokens: int = 0, output_tokens: int = 0, cached_input_tokens: int = 0, tool_output_tokens: int = 0, ) -> None: self.input_tokens = input_tokens self.output_tokens = output_tokens self.cached_input_tokens = cached_input_tokens self.tool_output_tokens = tool_output_tokens def reset(self) -> None: """Reset counters for a new turn.""" self.input_tokens = 0 self.output_tokens = 0 self.cached_input_tokens = 0 self.tool_output_tokens = 0 def copy(self) -> "TurnMetrics": """Create a copy of this turn's token counts.""" return TurnMetrics( self.input_tokens, self.output_tokens, self.cached_input_tokens, self.tool_output_tokens, ) class ConversationContext(ABC): @abstractmethod def append_output(self, output: RequestOutput) -> None: pass @abstractmethod def append_tool_output(self, output) -> None: pass @abstractmethod async def call_tool(self) -> list[Message]: pass @abstractmethod def need_builtin_tool_call(self) -> bool: pass @abstractmethod def render_for_completion(self) -> list[int]: pass @abstractmethod async def init_tool_sessions( self, tool_server: ToolServer | None, exit_stack: AsyncExitStack, request_id: str, mcp_tools: dict[str, Mcp], ) -> None: pass @abstractmethod async def cleanup_session(self) -> None: raise NotImplementedError("Should not be called.") def _create_json_parse_error_messages( last_msg: Message, e: json.JSONDecodeError ) -> list[Message]: """ Creates an error message when json parse failed. """ error_msg = ( f"Error parsing tool arguments as JSON: {str(e)}. " "Please ensure the tool call arguments are valid JSON and try again." ) content = TextContent(text=error_msg) author = Author(role=Role.TOOL, name=last_msg.recipient) return [ Message( author=author, content=[content], recipient=Role.ASSISTANT, channel=last_msg.channel, ) ] class SimpleContext(ConversationContext): """This is a context that cannot handle MCP tool calls""" def __init__(self): self.last_output = None # Accumulated final output for streaming mode self._accumulated_text: str = "" self._accumulated_token_ids: list[int] = [] self._accumulated_logprobs: list = [] self.num_prompt_tokens = 0 self.num_output_tokens = 0 self.num_cached_tokens = 0 # todo num_reasoning_tokens is not implemented yet. self.num_reasoning_tokens = 0 # not implemented yet for SimpleContext self.all_turn_metrics = [] self.input_messages: list[ResponseRawMessageAndToken] = [] self.output_messages: list[ResponseRawMessageAndToken] = [] def append_output(self, output) -> None: self.last_output = output if not isinstance(output, RequestOutput): raise ValueError("SimpleContext only supports RequestOutput.") self.num_prompt_tokens = len(output.prompt_token_ids or []) self.num_cached_tokens = output.num_cached_tokens or 0 self.num_output_tokens += len(output.outputs[0].token_ids or []) # Accumulate text, token_ids, and logprobs for streaming mode delta_output = output.outputs[0] self._accumulated_text += delta_output.text self._accumulated_token_ids.extend(delta_output.token_ids) if delta_output.logprobs is not None: self._accumulated_logprobs.extend(delta_output.logprobs) if len(self.input_messages) == 0: output_prompt = output.prompt or "" output_prompt_token_ids = output.prompt_token_ids or [] self.input_messages.append( ResponseRawMessageAndToken( message=output_prompt, tokens=output_prompt_token_ids, ) ) self.output_messages.append( ResponseRawMessageAndToken( message=delta_output.text, tokens=delta_output.token_ids, ) ) @property def final_output(self) -> RequestOutput | None: """Return the final output, with complete text/token_ids/logprobs.""" if self.last_output is not None and self.last_output.outputs: assert isinstance(self.last_output, RequestOutput) final_output = copy.copy(self.last_output) # copy inner item to avoid modify last_output final_output.outputs = [replace(item) for item in self.last_output.outputs] final_output.outputs[0].text = self._accumulated_text final_output.outputs[0].token_ids = tuple(self._accumulated_token_ids) if self._accumulated_logprobs: final_output.outputs[0].logprobs = self._accumulated_logprobs return final_output return self.last_output def append_tool_output(self, output) -> None: raise NotImplementedError("Should not be called.") def need_builtin_tool_call(self) -> bool: return False async def call_tool(self) -> list[Message]: raise NotImplementedError("Should not be called.") def render_for_completion(self) -> list[int]: raise NotImplementedError("Should not be called.") async def init_tool_sessions( self, tool_server: ToolServer | None, exit_stack: AsyncExitStack, request_id: str, mcp_tools: dict[str, Mcp], ) -> None: pass async def cleanup_session(self) -> None: raise NotImplementedError("Should not be called.") class ParsableContext(ConversationContext): def __init__( self, *, response_messages: list[ResponseInputOutputItem], tokenizer: TokenizerLike, reasoning_parser_cls: Callable[[TokenizerLike], ReasoningParser] | None, request: ResponsesRequest, available_tools: list[str] | None, tool_parser_cls: Callable[[TokenizerLike], ToolParser] | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, ): self.num_prompt_tokens = 0 self.num_output_tokens = 0 self.num_cached_tokens = 0 # TODO: num_reasoning_tokens is not implemented yet. self.num_reasoning_tokens = 0 # not implemented yet for ParsableContext self.all_turn_metrics: list[TurnMetrics] = [] if reasoning_parser_cls is None: raise ValueError("reasoning_parser_cls must be provided.") self.parser = get_responses_parser_for_simple_context( tokenizer=tokenizer, reasoning_parser_cls=reasoning_parser_cls, response_messages=response_messages, request=request, tool_parser_cls=tool_parser_cls, ) self.tool_parser_cls = tool_parser_cls self.request = request self.tokenizer = tokenizer self.available_tools = available_tools or [] self._tool_sessions: dict[str, ClientSession | Tool] = {} self.called_tools: set[str] = set() self.tool_dicts = construct_tool_dicts(request.tools, request.tool_choice) self.chat_template = chat_template self.chat_template_content_format = chat_template_content_format self.input_messages: list[ResponseRawMessageAndToken] = [] self.output_messages: list[ResponseRawMessageAndToken] = [] def append_output(self, output: RequestOutput) -> None: self.num_prompt_tokens = len(output.prompt_token_ids or []) self.num_cached_tokens = output.num_cached_tokens or 0 self.num_output_tokens += len(output.outputs[0].token_ids or []) self.parser.process(output.outputs[0]) # only store if enable_response_messages is True, save memory if self.request.enable_response_messages: output_prompt = output.prompt or "" output_prompt_token_ids = output.prompt_token_ids or [] if len(self.input_messages) == 0: self.input_messages.append( ResponseRawMessageAndToken( message=output_prompt, tokens=output_prompt_token_ids, ) ) else: self.output_messages.append( ResponseRawMessageAndToken( message=output_prompt, tokens=output_prompt_token_ids, ) ) self.output_messages.append( ResponseRawMessageAndToken( message=output.outputs[0].text, tokens=output.outputs[0].token_ids, ) ) def append_tool_output(self, output: list[ResponseInputOutputItem]) -> None: self.parser.response_messages.extend(output) def need_builtin_tool_call(self) -> bool: """Return true if the last message is a MCP tool call""" last_message = self.parser.response_messages[-1] # TODO(qandrew): figure out which tools are MCP tools if last_message.type == "function_call": # noqa: SIM102 if last_message.name in ( "code_interpreter", "python", "web_search_preview", ) or last_message.name.startswith("container"): return True return False async def call_python_tool( self, tool_session: Union["ClientSession", Tool], last_msg: FunctionCall ) -> list[ResponseInputOutputItem]: self.called_tools.add("python") if isinstance(tool_session, Tool): return await tool_session.get_result_parsable_context(self) args = json.loads(last_msg.arguments) param = { "code": args["code"], } result = await tool_session.call_tool("python", param) result_str = result.content[0].text message = ResponseFunctionToolCallOutputItem( id=f"mcpo_{random_uuid()}", type="function_call_output", call_id=f"call_{random_uuid()}", output=result_str, status="completed", ) return [message] async def call_search_tool( self, tool_session: Union["ClientSession", Tool], last_msg: FunctionCall ) -> list[ResponseInputOutputItem]: self.called_tools.add("browser") if isinstance(tool_session, Tool): return await tool_session.get_result_parsable_context(self) if envs.VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY: try: args = json.loads(last_msg.arguments) except json.JSONDecodeError as e: return _create_json_parse_error_messages(last_msg, e) else: args = json.loads(last_msg.arguments) result = await tool_session.call_tool("search", args) result_str = result.content[0].text message = ResponseFunctionToolCallOutputItem( id=f"fco_{random_uuid()}", type="function_call_output", call_id=f"call_{random_uuid()}", output=result_str, status="completed", ) return [message] async def call_container_tool( self, tool_session: Union["ClientSession", Tool], last_msg: Message ) -> list[Message]: """ Call container tool. Expect this to be run in a stateful docker with command line terminal. The official container tool would at least expect the following format: - for tool name: exec - args: { "cmd":List[str] "command to execute", "workdir":optional[str] "current working directory", "env":optional[object/dict] "environment variables", "session_name":optional[str] "session name", "timeout":optional[int] "timeout in seconds", "user":optional[str] "user name", } """ self.called_tools.add("container") if isinstance(tool_session, Tool): return await tool_session.get_result_parsable_context(self) # tool_name = last_msg.recipient.split(".")[1].split(" ")[0] if envs.VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY: try: args = json.loads(last_msg.arguments) except json.JSONDecodeError as e: return _create_json_parse_error_messages(last_msg, e) else: args = json.loads(last_msg.arguments) result = await tool_session.call_tool("exec", args) result_str = result.content[0].text message = ResponseFunctionToolCallOutputItem( id=f"fco_{random_uuid()}", type="function_call_output", call_id=f"call_{random_uuid()}", output=result_str, status="completed", ) return [message] async def call_tool(self) -> list[ResponseInputOutputItem]: if not self.parser.response_messages: return [] last_msg = self.parser.response_messages[-1] # change this to a mcp_ function call last_msg.id = f"{MCP_PREFIX}{random_uuid()}" self.parser.response_messages[-1] = last_msg if last_msg.name == "code_interpreter": return await self.call_python_tool(self._tool_sessions["python"], last_msg) elif last_msg.name == "web_search_preview": return await self.call_search_tool(self._tool_sessions["browser"], last_msg) elif last_msg.name.startswith("container"): return await self.call_container_tool( self._tool_sessions["container"], last_msg ) return [] def render_for_completion(self): raise NotImplementedError("Should not be called.") async def init_tool_sessions( self, tool_server: ToolServer | None, exit_stack: AsyncExitStack, request_id: str, mcp_tools: dict[str, Mcp], ): if tool_server: for tool_name in self.available_tools: if tool_name in self._tool_sessions: continue tool_type = _map_tool_name_to_tool_type(tool_name) headers = ( mcp_tools[tool_type].headers if tool_type in mcp_tools else None ) tool_session = await exit_stack.enter_async_context( tool_server.new_session(tool_name, request_id, headers) ) self._tool_sessions[tool_name] = tool_session exit_stack.push_async_exit(self.cleanup_session) async def cleanup_session(self, *args, **kwargs) -> None: """Can be used as coro to used in __aexit__""" async def cleanup_tool_session(tool_session): if not isinstance(tool_session, Tool): logger.info( "Cleaning up tool session for %s", tool_session._client_info ) with contextlib.suppress(Exception): await tool_session.call_tool("cleanup_session", {}) await asyncio.gather( *( cleanup_tool_session(self._tool_sessions[tool]) for tool in self.called_tools ) ) class HarmonyContext(ConversationContext): def __init__( self, messages: list, available_tools: list[str], ): self._messages = messages self.finish_reason: str | None = None self.available_tools = available_tools self._tool_sessions: dict[str, ClientSession | Tool] = {} self.called_tools: set[str] = set() self.parser = get_streamable_parser_for_assistant() self.num_init_messages = len(messages) self.num_prompt_tokens = 0 self.num_output_tokens = 0 self.num_cached_tokens = 0 self.num_reasoning_tokens = 0 self.num_tool_output_tokens = 0 # Turn tracking - replaces multiple individual tracking variables self.current_turn_metrics = TurnMetrics() # Track metrics for all turns self.all_turn_metrics: list[TurnMetrics] = [] self.is_first_turn = True self.first_tok_of_message = True # For streaming support def _update_num_reasoning_tokens(self): # Count all analysis and commentary channels as reasoning tokens if self.parser.current_channel in {"analysis", "commentary"}: self.num_reasoning_tokens += 1 def append_output(self, output: RequestOutput) -> None: output_token_ids = output.outputs[0].token_ids self.parser = get_streamable_parser_for_assistant() for token_id in output_token_ids: self.parser.process(token_id) # Check if the current token is part of reasoning content self._update_num_reasoning_tokens() self._update_prefill_token_usage(output) self._update_decode_token_usage(output) # Append current turn to all turn list for next turn's calculations self.all_turn_metrics.append(self.current_turn_metrics.copy()) self.current_turn_metrics.reset() # append_output is called only once before tool calling # in non-streaming case # so we can append all the parser messages to _messages output_msgs = self.parser.messages # The responses finish reason is set in the last message self.finish_reason = output.outputs[0].finish_reason self._messages.extend(output_msgs) def append_tool_output(self, output: list[Message]) -> None: output_msgs = output self._messages.extend(output_msgs) def _update_prefill_token_usage(self, output: RequestOutput) -> None: """Update token usage statistics for the prefill phase of generation. The prefill phase processes the input prompt tokens. This method: 1. Counts the prompt tokens for this turn 2. Calculates tool output tokens for multi-turn conversations 3. Updates cached token counts 4. Tracks state for next turn calculations Tool output tokens are calculated as: current_prompt_tokens - last_turn_prompt_tokens - last_turn_output_tokens This represents tokens added between turns (typically tool responses). Args: output: The RequestOutput containing prompt token information """ if output.prompt_token_ids is not None: this_turn_input_tokens = len(output.prompt_token_ids) else: this_turn_input_tokens = 0 logger.error("RequestOutput appended contains no prompt_token_ids.") # Update current turn input tokens self.current_turn_metrics.input_tokens = this_turn_input_tokens self.num_prompt_tokens += this_turn_input_tokens # Calculate tool tokens (except on first turn) if self.is_first_turn: self.is_first_turn = False else: previous_turn = self.all_turn_metrics[-1] # start counting tool after first turn # tool tokens = this turn prefill - last turn prefill - # last turn decode this_turn_tool_tokens = ( self.current_turn_metrics.input_tokens - previous_turn.input_tokens - previous_turn.output_tokens ) # Handle negative tool token counts (shouldn't happen in normal # cases) if this_turn_tool_tokens < 0: logger.error( "Negative tool output tokens calculated: %d " "(current_input=%d, previous_input=%d, " "previous_output=%d). Setting to 0.", this_turn_tool_tokens, self.current_turn_metrics.input_tokens, previous_turn.input_tokens, previous_turn.output_tokens, ) this_turn_tool_tokens = 0 self.num_tool_output_tokens += this_turn_tool_tokens self.current_turn_metrics.tool_output_tokens = this_turn_tool_tokens # Update cached tokens num_cached_token = output.num_cached_tokens if num_cached_token is not None: self.num_cached_tokens += num_cached_token self.current_turn_metrics.cached_input_tokens = num_cached_token def _update_decode_token_usage(self, output: RequestOutput) -> int: """Update token usage statistics for the decode phase of generation. The decode phase processes the generated output tokens. This method: 1. Counts output tokens from all completion outputs 2. Updates the total output token count 3. Tracks tokens generated in the current turn In streaming mode, this is called for each token generated. In non-streaming mode, this is called once with all output tokens. Args: output: The RequestOutput containing generated token information Returns: int: Number of output tokens processed in this call """ updated_output_token_count = 0 if output.outputs: for completion_output in output.outputs: # only keep last round updated_output_token_count += len(completion_output.token_ids) self.num_output_tokens += updated_output_token_count self.current_turn_metrics.output_tokens += updated_output_token_count return updated_output_token_count @property def messages(self) -> list: return self._messages def need_builtin_tool_call(self) -> bool: last_msg = self.messages[-1] recipient = last_msg.recipient return recipient is not None and ( recipient.startswith("browser.") or recipient.startswith("python") or recipient.startswith("container.") ) async def call_tool(self) -> list[Message]: if not self.messages: return [] last_msg = self.messages[-1] recipient = last_msg.recipient if recipient is not None: if recipient.startswith("browser."): return await self.call_search_tool( self._tool_sessions["browser"], last_msg ) elif recipient.startswith("python"): return await self.call_python_tool( self._tool_sessions["python"], last_msg ) elif recipient.startswith("container."): return await self.call_container_tool( self._tool_sessions["container"], last_msg ) raise ValueError("No tool call found") def render_for_completion(self) -> list[int]: return render_for_completion(self.messages) async def call_search_tool( self, tool_session: Union["ClientSession", Tool], last_msg: Message ) -> list[Message]: self.called_tools.add("browser") if isinstance(tool_session, Tool): return await tool_session.get_result(self) tool_name = last_msg.recipient.split(".")[1] if envs.VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY: try: args = json.loads(last_msg.content[0].text) except json.JSONDecodeError as e: return _create_json_parse_error_messages(last_msg, e) else: args = json.loads(last_msg.content[0].text) result = await tool_session.call_tool(tool_name, args) result_str = result.content[0].text content = TextContent(text=result_str) author = Author(role=Role.TOOL, name=last_msg.recipient) return [ Message( author=author, content=[content], recipient=Role.ASSISTANT, channel=last_msg.channel, ) ] async def call_python_tool( self, tool_session: Union["ClientSession", Tool], last_msg: Message ) -> list[Message]: self.called_tools.add("python") if isinstance(tool_session, Tool): return await tool_session.get_result(self) param = { "code": last_msg.content[0].text, } result = await tool_session.call_tool("python", param) result_str = result.content[0].text content = TextContent(text=result_str) author = Author(role=Role.TOOL, name="python") return [ Message( author=author, content=[content], channel=last_msg.channel, recipient=Role.ASSISTANT, ) ] async def init_tool_sessions( self, tool_server: ToolServer | None, exit_stack: AsyncExitStack, request_id: str, mcp_tools: dict[str, Mcp], ): if tool_server: for tool_name in self.available_tools: if tool_name not in self._tool_sessions: tool_type = _map_tool_name_to_tool_type(tool_name) headers = ( mcp_tools[tool_type].headers if tool_type in mcp_tools else None ) tool_session = await exit_stack.enter_async_context( tool_server.new_session(tool_name, request_id, headers) ) self._tool_sessions[tool_name] = tool_session exit_stack.push_async_exit(self.cleanup_session) async def call_container_tool( self, tool_session: Union["ClientSession", Tool], last_msg: Message ) -> list[Message]: """ Call container tool. Expect this to be run in a stateful docker with command line terminal. The official container tool would at least expect the following format: - for tool name: exec - args: { "cmd":List[str] "command to execute", "workdir":optional[str] "current working directory", "env":optional[object/dict] "environment variables", "session_name":optional[str] "session name", "timeout":optional[int] "timeout in seconds", "user":optional[str] "user name", } """ self.called_tools.add("container") if isinstance(tool_session, Tool): return await tool_session.get_result(self) tool_name = last_msg.recipient.split(".")[1].split(" ")[0] if envs.VLLM_TOOL_JSON_ERROR_AUTOMATIC_RETRY: try: args = json.loads(last_msg.content[0].text) except json.JSONDecodeError as e: return _create_json_parse_error_messages(last_msg, e) else: args = json.loads(last_msg.content[0].text) result = await tool_session.call_tool(tool_name, args) result_str = result.content[0].text content = TextContent(text=result_str) author = Author(role=Role.TOOL, name=last_msg.recipient) return [ Message( author=author, content=[content], recipient=Role.ASSISTANT, channel=last_msg.channel, ) ] async def cleanup_session(self, *args, **kwargs) -> None: """Can be used as coro to used in __aexit__""" async def cleanup_tool_session(tool_session): if not isinstance(tool_session, Tool): logger.info( "Cleaning up tool session for %s", tool_session._client_info ) with contextlib.suppress(Exception): await tool_session.call_tool("cleanup_session", {}) await asyncio.gather( *( cleanup_tool_session(self._tool_sessions[tool]) for tool in self.called_tools ) ) class StreamingHarmonyContext(HarmonyContext): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.last_output = None self.parser = get_streamable_parser_for_assistant() self.encoding = get_encoding() self.last_tok = None self.first_tok_of_message = True @property def messages(self) -> list: return self._messages def append_output(self, output: RequestOutput) -> None: # append_output is called for each output token in streaming case, # so we only want to add the prompt tokens once for each message. if self.first_tok_of_message: self._update_prefill_token_usage(output) # Reset self.first_tok_of_message if needed: # if the current token is the last one of the current message # (finished=True), then the next token processed will mark the # beginning of a new message self.first_tok_of_message = output.finished for tok in output.outputs[0].token_ids: self.parser.process(tok) self._update_decode_token_usage(output) # For streaming, update previous turn when message is complete if output.finished: self.all_turn_metrics.append(self.current_turn_metrics.copy()) self.current_turn_metrics.reset() # Check if the current token is part of reasoning content self._update_num_reasoning_tokens() self.last_tok = tok if len(self._messages) - self.num_init_messages < len(self.parser.messages): self._messages.extend( self.parser.messages[len(self._messages) - self.num_init_messages :] ) def append_tool_output(self, output: list[Message]) -> None: # Handle the case of tool output in direct message format assert len(output) == 1, "Tool output should be a single message" msg = output[0]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/chat_utils.py
vllm/entrypoints/chat_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import inspect import json from abc import ABC, abstractmethod from collections import Counter, defaultdict, deque from collections.abc import Awaitable, Callable, Iterable from functools import cached_property, lru_cache, partial from pathlib import Path from typing import TYPE_CHECKING, Any, Generic, Literal, TypeAlias, TypeVar, cast import jinja2 import jinja2.ext import jinja2.meta import jinja2.nodes import jinja2.parser import jinja2.sandbox import transformers.utils.chat_template_utils as hf_chat_utils from openai.types.chat import ( ChatCompletionAssistantMessageParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam, ChatCompletionContentPartRefusalParam, ChatCompletionContentPartTextParam, ChatCompletionFunctionToolParam, ChatCompletionMessageToolCallParam, ChatCompletionToolMessageParam, ) from openai.types.chat import ( ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam, ) from openai.types.chat import ( ChatCompletionMessageParam as OpenAIChatCompletionMessageParam, ) from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio from openai.types.responses import ResponseInputImageParam from openai_harmony import Message as OpenAIHarmonyMessage from PIL import Image from pydantic import BaseModel, ConfigDict, TypeAdapter from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin # pydantic needs the TypedDict from typing_extensions from typing_extensions import Required, TypedDict from vllm import envs from vllm.config import ModelConfig from vllm.logger import init_logger from vllm.model_executor.models import SupportsMultiModal from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalDataDict, MultiModalUUIDDict from vllm.multimodal.utils import MEDIA_CONNECTOR_REGISTRY, MediaConnector from vllm.tokenizers import TokenizerLike from vllm.transformers_utils.chat_templates import get_chat_template_fallback_path from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import random_uuid from vllm.utils.collection_utils import is_list_of from vllm.utils.func_utils import supports_kw from vllm.utils.import_utils import LazyLoader if TYPE_CHECKING: import torch from vllm.tokenizers.mistral import MistralTokenizer else: torch = LazyLoader("torch", globals(), "torch") logger = init_logger(__name__) class ChatTemplateResolutionError(ValueError): """Raised when chat template resolution fails. This is a subclass of ValueError for backward compatibility with existing exception handlers. """ MODALITY_PLACEHOLDERS_MAP = { "image": "<##IMAGE##>", "audio": "<##AUDIO##>", "video": "<##VIDEO##>", } class AudioURL(TypedDict, total=False): url: Required[str] """ Either a URL of the audio or a data URL with base64 encoded audio data. """ class ChatCompletionContentPartAudioParam(TypedDict, total=False): audio_url: Required[AudioURL] type: Required[Literal["audio_url"]] """The type of the content part.""" class ChatCompletionContentPartImageEmbedsParam(TypedDict, total=False): image_embeds: str | dict[str, str] | None """ The image embeddings. It can be either: - A single base64 string. - A dictionary where each value is a base64 string. """ type: Required[Literal["image_embeds"]] """The type of the content part.""" uuid: str | None """ User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias. """ class ChatCompletionContentPartAudioEmbedsParam(TypedDict, total=False): audio_embeds: str | dict[str, str] | None """ The audio embeddings. It can be either: - A single base64 string representing a serialized torch tensor. - A dictionary where each value is a base64 string. """ type: Required[Literal["audio_embeds"]] """The type of the content part.""" uuid: str | None """ User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias. """ class VideoURL(TypedDict, total=False): url: Required[str] """ Either a URL of the video or a data URL with base64 encoded video data. """ class ChatCompletionContentPartVideoParam(TypedDict, total=False): video_url: Required[VideoURL] type: Required[Literal["video_url"]] """The type of the content part.""" class PILImage(BaseModel): """ A PIL.Image.Image object. """ image_pil: Image.Image model_config = ConfigDict(arbitrary_types_allowed=True) class CustomChatCompletionContentPILImageParam(TypedDict, total=False): """A simpler version of the param that only accepts a PIL image. Example: { "image_pil": ImageAsset('cherry_blossom').pil_image } """ image_pil: PILImage | None uuid: str | None """ User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias. """ class CustomChatCompletionContentSimpleImageParam(TypedDict, total=False): """A simpler version of the param that only accepts a plain image_url. This is supported by OpenAI API, although it is not documented. Example: { "image_url": "https://example.com/image.jpg" } """ image_url: str | None uuid: str | None """ User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias. """ class CustomChatCompletionContentSimpleAudioParam(TypedDict, total=False): """A simpler version of the param that only accepts a plain audio_url. Example: { "audio_url": "https://example.com/audio.mp3" } """ audio_url: str | None class CustomChatCompletionContentSimpleVideoParam(TypedDict, total=False): """A simpler version of the param that only accepts a plain audio_url. Example: { "video_url": "https://example.com/video.mp4" } """ video_url: str | None uuid: str | None """ User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias. """ class CustomThinkCompletionContentParam(TypedDict, total=False): """A Think Completion Content Param that accepts a plain text and a boolean. Example: { "thinking": "I am thinking about the answer", "closed": True, "type": "thinking" } """ thinking: Required[str] """The thinking content.""" closed: bool """Whether the thinking is closed.""" type: Required[Literal["thinking"]] """The thinking type.""" ChatCompletionContentPartParam: TypeAlias = ( OpenAIChatCompletionContentPartParam | ChatCompletionContentPartAudioParam | ChatCompletionContentPartInputAudioParam | ChatCompletionContentPartVideoParam | ChatCompletionContentPartRefusalParam | CustomChatCompletionContentPILImageParam | CustomChatCompletionContentSimpleImageParam | ChatCompletionContentPartImageEmbedsParam | ChatCompletionContentPartAudioEmbedsParam | CustomChatCompletionContentSimpleAudioParam | CustomChatCompletionContentSimpleVideoParam | str | CustomThinkCompletionContentParam ) class CustomChatCompletionMessageParam(TypedDict, total=False): """Enables custom roles in the Chat Completion API.""" role: Required[str] """The role of the message's author.""" content: str | list[ChatCompletionContentPartParam] """The contents of the message.""" name: str """An optional name for the participant. Provides the model information to differentiate between participants of the same role. """ tool_call_id: str | None """Tool call that this message is responding to.""" tool_calls: Iterable[ChatCompletionMessageToolCallParam] | None """The tool calls generated by the model, such as function calls.""" reasoning: str | None """The reasoning content for interleaved thinking.""" tools: list[ChatCompletionFunctionToolParam] | None """The tools for developer role.""" ChatCompletionMessageParam: TypeAlias = ( OpenAIChatCompletionMessageParam | CustomChatCompletionMessageParam | OpenAIHarmonyMessage ) # TODO: Make fields ReadOnly once mypy supports it class ConversationMessage(TypedDict, total=False): role: Required[str] """The role of the message's author.""" content: str | None | list[dict[str, str]] """The contents of the message""" tool_call_id: str | None """Tool call that this message is responding to.""" name: str | None """The name of the function to call""" tool_calls: Iterable[ChatCompletionMessageToolCallParam] | None """The tool calls generated by the model, such as function calls.""" reasoning: str | None """The reasoning content for interleaved thinking.""" reasoning_content: str | None """Deprecated: The reasoning content for interleaved thinking.""" tools: list[ChatCompletionFunctionToolParam] | None """The tools for developer role.""" # Passed in by user ChatTemplateContentFormatOption = Literal["auto", "string", "openai"] # Used internally _ChatTemplateContentFormat = Literal["string", "openai"] def _is_var_access(node: jinja2.nodes.Node, varname: str) -> bool: if isinstance(node, jinja2.nodes.Name): return node.ctx == "load" and node.name == varname return False def _is_attr_access(node: jinja2.nodes.Node, varname: str, key: str) -> bool: if isinstance(node, jinja2.nodes.Getitem): return ( _is_var_access(node.node, varname) and isinstance(node.arg, jinja2.nodes.Const) and node.arg.value == key ) if isinstance(node, jinja2.nodes.Getattr): return _is_var_access(node.node, varname) and node.attr == key return False def _is_var_or_elems_access( node: jinja2.nodes.Node, varname: str, key: str | None = None, ) -> bool: if isinstance(node, jinja2.nodes.Filter): return node.node is not None and _is_var_or_elems_access( node.node, varname, key ) if isinstance(node, jinja2.nodes.Test): return _is_var_or_elems_access(node.node, varname, key) if isinstance(node, jinja2.nodes.Getitem) and isinstance( node.arg, jinja2.nodes.Slice ): return _is_var_or_elems_access(node.node, varname, key) return _is_attr_access(node, varname, key) if key else _is_var_access(node, varname) def _iter_nodes_assign_var_or_elems(root: jinja2.nodes.Node, varname: str): # Global variable that is implicitly defined at the root yield root, varname # Iterative BFS related_varnames = deque([varname]) while related_varnames: related_varname = related_varnames.popleft() for assign_ast in root.find_all(jinja2.nodes.Assign): lhs = assign_ast.target rhs = assign_ast.node if _is_var_or_elems_access(rhs, related_varname): assert isinstance(lhs, jinja2.nodes.Name) yield assign_ast, lhs.name # Avoid infinite looping for self-assignment if lhs.name != related_varname: related_varnames.append(lhs.name) # NOTE: The proper way to handle this is to build a CFG so that we can handle # the scope in which each variable is defined, but that is too complicated def _iter_nodes_assign_messages_item(root: jinja2.nodes.Node): messages_varnames = [ varname for _, varname in _iter_nodes_assign_var_or_elems(root, "messages") ] # Search for {%- for message in messages -%} loops for loop_ast in root.find_all(jinja2.nodes.For): loop_iter = loop_ast.iter loop_target = loop_ast.target for varname in messages_varnames: if _is_var_or_elems_access(loop_iter, varname): assert isinstance(loop_target, jinja2.nodes.Name) yield loop_ast, loop_target.name break def _iter_nodes_assign_content_item(root: jinja2.nodes.Node): message_varnames = [ varname for _, varname in _iter_nodes_assign_messages_item(root) ] # Search for {%- for content in message['content'] -%} loops for loop_ast in root.find_all(jinja2.nodes.For): loop_iter = loop_ast.iter loop_target = loop_ast.target for varname in message_varnames: if _is_var_or_elems_access(loop_iter, varname, "content"): assert isinstance(loop_target, jinja2.nodes.Name) yield loop_ast, loop_target.name break def _try_extract_ast(chat_template: str) -> jinja2.nodes.Template | None: try: jinja_compiled = hf_chat_utils._compile_jinja_template(chat_template) return jinja_compiled.environment.parse(chat_template) except Exception: logger.exception("Error when compiling Jinja template") return None @lru_cache(maxsize=32) def _detect_content_format( chat_template: str, *, default: _ChatTemplateContentFormat, ) -> _ChatTemplateContentFormat: jinja_ast = _try_extract_ast(chat_template) if jinja_ast is None: return default try: next(_iter_nodes_assign_content_item(jinja_ast)) except StopIteration: return "string" except Exception: logger.exception("Error when parsing AST of Jinja template") return default else: return "openai" def resolve_mistral_chat_template( chat_template: str | None, **kwargs: Any, ) -> str | None: if chat_template is not None or kwargs.get("chat_template_kwargs") is not None: raise ValueError( "'chat_template' or 'chat_template_kwargs' cannot be overridden " "for mistral tokenizer." ) return None _PROCESSOR_CHAT_TEMPLATES = dict[tuple[str, bool], str | None]() """ Used in `_try_get_processor_chat_template` to avoid calling `cached_get_processor` again if the processor fails to be loaded. This is needed because `lru_cache` does not cache when an exception happens. """ def _try_get_processor_chat_template( tokenizer: PreTrainedTokenizer | PreTrainedTokenizerFast, model_config: ModelConfig, ) -> str | None: cache_key = (tokenizer.name_or_path, model_config.trust_remote_code) if cache_key in _PROCESSOR_CHAT_TEMPLATES: return _PROCESSOR_CHAT_TEMPLATES[cache_key] try: processor = cached_get_processor( tokenizer.name_or_path, processor_cls=( PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin, ), trust_remote_code=model_config.trust_remote_code, ) if ( isinstance(processor, ProcessorMixin) and hasattr(processor, "chat_template") and (chat_template := processor.chat_template) is not None ): _PROCESSOR_CHAT_TEMPLATES[cache_key] = chat_template return chat_template except Exception: logger.debug( "Failed to load AutoProcessor chat template for %s", tokenizer.name_or_path, exc_info=True, ) _PROCESSOR_CHAT_TEMPLATES[cache_key] = None return None def resolve_hf_chat_template( tokenizer: PreTrainedTokenizer | PreTrainedTokenizerFast, chat_template: str | None, tools: list[dict[str, Any]] | None, *, model_config: ModelConfig, ) -> str | None: # 1st priority: The given chat template if chat_template is not None: return chat_template # 2nd priority: AutoProcessor chat template, unless tool calling is enabled if tools is None: chat_template = _try_get_processor_chat_template(tokenizer, model_config) if chat_template is not None: return chat_template # 3rd priority: AutoTokenizer chat template try: return tokenizer.get_chat_template(chat_template, tools=tools) except Exception: logger.debug( "Failed to load AutoTokenizer chat template for %s", tokenizer.name_or_path, exc_info=True, ) # 4th priority: Predefined fallbacks path = get_chat_template_fallback_path( model_type=model_config.hf_config.model_type, tokenizer_name_or_path=model_config.tokenizer, ) if path is not None: logger.info_once( "Loading chat template fallback for %s as there isn't one " "defined on HF Hub.", tokenizer.name_or_path, ) chat_template = load_chat_template(path) else: logger.debug_once( "There is no chat template fallback for %s", tokenizer.name_or_path ) return chat_template def _resolve_chat_template_content_format( chat_template: str | None, tools: list[dict[str, Any]] | None, tokenizer: TokenizerLike | None, *, model_config: ModelConfig, ) -> _ChatTemplateContentFormat: if isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)): hf_chat_template = resolve_hf_chat_template( tokenizer, chat_template=chat_template, tools=tools, model_config=model_config, ) else: hf_chat_template = None jinja_text = ( hf_chat_template if isinstance(hf_chat_template, str) else load_chat_template(chat_template, is_literal=True) ) detected_format = ( "string" if jinja_text is None else _detect_content_format(jinja_text, default="string") ) return detected_format @lru_cache def _log_chat_template_content_format( chat_template: str | None, given_format: ChatTemplateContentFormatOption, detected_format: ChatTemplateContentFormatOption, ): logger.info( "Detected the chat template content format to be '%s'. " "You can set `--chat-template-content-format` to override this.", detected_format, ) if given_format != "auto" and given_format != detected_format: logger.warning( "You specified `--chat-template-content-format %s` " "which is different from the detected format '%s'. " "If our automatic detection is incorrect, please consider " "opening a GitHub issue so that we can improve it: " "https://github.com/vllm-project/vllm/issues/new/choose", given_format, detected_format, ) def resolve_chat_template_content_format( chat_template: str | None, tools: list[dict[str, Any]] | None, given_format: ChatTemplateContentFormatOption, tokenizer: TokenizerLike | None, *, model_config: ModelConfig, ) -> _ChatTemplateContentFormat: if given_format != "auto": return given_format detected_format = _resolve_chat_template_content_format( chat_template, tools, tokenizer, model_config=model_config, ) _log_chat_template_content_format( chat_template, given_format=given_format, detected_format=detected_format, ) return detected_format ModalityStr = Literal["image", "audio", "video", "image_embeds", "audio_embeds"] _T = TypeVar("_T") def _extract_embeds(tensors: list[torch.Tensor]): if len(tensors) == 0: return tensors if len(tensors) == 1: tensors[0]._is_single_item = True # type: ignore return tensors[0] # To keep backwards compatibility for single item input first_shape = tensors[0].shape if all(t.shape == first_shape for t in tensors): return torch.stack(tensors) return tensors def _get_embeds_data(items_by_modality: dict[str, list[Any]], modality: str): embeds_key = f"{modality}_embeds" embeds = items_by_modality[embeds_key] if len(embeds) == 0: return embeds if is_list_of(embeds, torch.Tensor): return _extract_embeds(embeds) if is_list_of(embeds, dict): if not embeds: return {} first_keys = set(embeds[0].keys()) if any(set(item.keys()) != first_keys for item in embeds[1:]): raise ValueError( "All dictionaries in the list of embeddings must have the same keys." ) return {k: _extract_embeds([item[k] for item in embeds]) for k in first_keys} return embeds class BaseMultiModalItemTracker(ABC, Generic[_T]): """ Tracks multi-modal items in a given request and ensures that the number of multi-modal items in a given request does not exceed the configured maximum per prompt. """ def __init__(self, model_config: ModelConfig): super().__init__() self._model_config = model_config self._items_by_modality = defaultdict[str, list[_T | None]](list) self._uuids_by_modality = defaultdict[str, list[str | None]](list) @property def model_config(self) -> ModelConfig: return self._model_config @cached_property def model_cls(self) -> type[SupportsMultiModal]: from vllm.model_executor.model_loader import get_model_cls model_cls = get_model_cls(self.model_config) return cast(type[SupportsMultiModal], model_cls) @property def allowed_local_media_path(self): return self._model_config.allowed_local_media_path @property def allowed_media_domains(self): return self._model_config.allowed_media_domains @property def mm_registry(self): return MULTIMODAL_REGISTRY @cached_property def mm_processor(self): return self.mm_registry.create_processor(self.model_config) def add( self, modality: ModalityStr, item: _T | None, uuid: str | None = None, ) -> str | None: """ Add a multi-modal item to the current prompt and returns the placeholder string to use, if any. An optional uuid can be added which serves as a unique identifier of the media. """ input_modality = modality.replace("_embeds", "") num_items = len(self._items_by_modality[modality]) + 1 self.mm_processor.validate_num_items(input_modality, num_items) self._items_by_modality[modality].append(item) self._uuids_by_modality[modality].append(uuid) return self.model_cls.get_placeholder_str(modality, num_items) def all_mm_uuids(self) -> MultiModalUUIDDict | None: if not self._items_by_modality: return None uuids_by_modality = dict(self._uuids_by_modality) if "image" in uuids_by_modality and "image_embeds" in uuids_by_modality: raise ValueError("Mixing raw image and embedding inputs is not allowed") if "audio" in uuids_by_modality and "audio_embeds" in uuids_by_modality: raise ValueError("Mixing raw audio and embedding inputs is not allowed") mm_uuids = {} if "image_embeds" in uuids_by_modality: mm_uuids["image"] = uuids_by_modality["image_embeds"] if "image" in uuids_by_modality: mm_uuids["image"] = uuids_by_modality["image"] # UUIDs of images if "audio_embeds" in uuids_by_modality: mm_uuids["audio"] = uuids_by_modality["audio_embeds"] if "audio" in uuids_by_modality: mm_uuids["audio"] = uuids_by_modality["audio"] # UUIDs of audios if "video" in uuids_by_modality: mm_uuids["video"] = uuids_by_modality["video"] # UUIDs of videos return mm_uuids @abstractmethod def create_parser(self) -> "BaseMultiModalContentParser": raise NotImplementedError class MultiModalItemTracker(BaseMultiModalItemTracker[object]): def all_mm_data(self) -> MultiModalDataDict | None: if not self._items_by_modality: return None items_by_modality = dict(self._items_by_modality) if "image" in items_by_modality and "image_embeds" in items_by_modality: raise ValueError("Mixing raw image and embedding inputs is not allowed") if "audio" in items_by_modality and "audio_embeds" in items_by_modality: raise ValueError("Mixing raw audio and embedding inputs is not allowed") mm_inputs = {} if "image_embeds" in items_by_modality: mm_inputs["image"] = _get_embeds_data(items_by_modality, "image") if "image" in items_by_modality: mm_inputs["image"] = items_by_modality["image"] # A list of images if "audio_embeds" in items_by_modality: mm_inputs["audio"] = _get_embeds_data(items_by_modality, "audio") if "audio" in items_by_modality: mm_inputs["audio"] = items_by_modality["audio"] # A list of audios if "video" in items_by_modality: mm_inputs["video"] = items_by_modality["video"] # A list of videos return mm_inputs def create_parser(self) -> "BaseMultiModalContentParser": return MultiModalContentParser(self) class AsyncMultiModalItemTracker(BaseMultiModalItemTracker[Awaitable[object]]): async def all_mm_data(self) -> MultiModalDataDict | None: if not self._items_by_modality: return None coros_by_modality = { modality: [item or asyncio.sleep(0) for item in items] for modality, items in self._items_by_modality.items() } items_by_modality: dict[str, list[object | None]] = { modality: await asyncio.gather(*coros) for modality, coros in coros_by_modality.items() } if "image" in items_by_modality and "image_embeds" in items_by_modality: raise ValueError("Mixing raw image and embedding inputs is not allowed") if "audio" in items_by_modality and "audio_embeds" in items_by_modality: raise ValueError("Mixing raw audio and embedding inputs is not allowed") mm_inputs = {} if "image_embeds" in items_by_modality: mm_inputs["image"] = _get_embeds_data(items_by_modality, "image") if "image" in items_by_modality: mm_inputs["image"] = items_by_modality["image"] # A list of images if "audio_embeds" in items_by_modality: mm_inputs["audio"] = _get_embeds_data(items_by_modality, "audio") if "audio" in items_by_modality: mm_inputs["audio"] = items_by_modality["audio"] # A list of audios if "video" in items_by_modality: mm_inputs["video"] = items_by_modality["video"] # A list of videos return mm_inputs def create_parser(self) -> "BaseMultiModalContentParser": return AsyncMultiModalContentParser(self) class BaseMultiModalContentParser(ABC): def __init__(self) -> None: super().__init__() # stores model placeholders list with corresponding # general MM placeholder: # { # "<##IMAGE##>": ["<image>", "<image>", "<image>"], # "<##AUDIO##>": ["<audio>", "<audio>"] # } self._placeholder_storage: dict[str, list] = defaultdict(list) def _add_placeholder(self, modality: ModalityStr, placeholder: str | None): mod_placeholder = MODALITY_PLACEHOLDERS_MAP[modality] if placeholder: self._placeholder_storage[mod_placeholder].append(placeholder) def mm_placeholder_storage(self) -> dict[str, list]: return dict(self._placeholder_storage) @abstractmethod def parse_image(self, image_url: str | None, uuid: str | None = None) -> None: raise NotImplementedError @abstractmethod def parse_image_embeds( self, image_embeds: str | dict[str, str] | None, uuid: str | None = None, ) -> None: raise NotImplementedError @abstractmethod def parse_image_pil( self, image_pil: Image.Image | None, uuid: str | None = None ) -> None: raise NotImplementedError @abstractmethod def parse_audio(self, audio_url: str | None, uuid: str | None = None) -> None: raise NotImplementedError @abstractmethod def parse_input_audio( self, input_audio: InputAudio | None, uuid: str | None = None ) -> None: raise NotImplementedError @abstractmethod def parse_audio_embeds( self, audio_embeds: str | dict[str, str] | None, uuid: str | None = None, ) -> None: raise NotImplementedError @abstractmethod def parse_video(self, video_url: str | None, uuid: str | None = None) -> None: raise NotImplementedError class MultiModalContentParser(BaseMultiModalContentParser): def __init__(self, tracker: MultiModalItemTracker) -> None: super().__init__() self._tracker = tracker multimodal_config = self._tracker.model_config.multimodal_config media_io_kwargs = getattr(multimodal_config, "media_io_kwargs", None) self._connector: MediaConnector = MEDIA_CONNECTOR_REGISTRY.load( envs.VLLM_MEDIA_CONNECTOR, media_io_kwargs=media_io_kwargs, allowed_local_media_path=tracker.allowed_local_media_path, allowed_media_domains=tracker.allowed_media_domains, ) @property def model_config(self) -> ModelConfig: return self._tracker.model_config def parse_image(self, image_url: str | None, uuid: str | None = None) -> None: image = self._connector.fetch_image(image_url) if image_url else None placeholder = self._tracker.add("image", image, uuid) self._add_placeholder("image", placeholder) def parse_image_embeds( self, image_embeds: str | dict[str, str] | None, uuid: str | None = None, ) -> None: mm_config = self.model_config.get_multimodal_config() if not mm_config.enable_mm_embeds: raise ValueError( "You must set `--enable-mm-embeds` to input `image_embeds`" ) if isinstance(image_embeds, dict): embeds = { k: self._connector.fetch_image_embedding(v) for k, v in image_embeds.items() } placeholder = self._tracker.add("image_embeds", embeds, uuid) if isinstance(image_embeds, str): embedding = self._connector.fetch_image_embedding(image_embeds) placeholder = self._tracker.add("image_embeds", embedding, uuid) if image_embeds is None: placeholder = self._tracker.add("image_embeds", None, uuid) self._add_placeholder("image", placeholder) def parse_audio_embeds( self, audio_embeds: str | dict[str, str] | None, uuid: str | None = None, ) -> None: mm_config = self.model_config.get_multimodal_config() if not mm_config.enable_mm_embeds: raise ValueError( "You must set `--enable-mm-embeds` to input `audio_embeds`" ) if isinstance(audio_embeds, dict): embeds = { k: self._connector.fetch_audio_embedding(v) for k, v in audio_embeds.items() } placeholder = self._tracker.add("audio_embeds", embeds, uuid) elif isinstance(audio_embeds, str): embedding = self._connector.fetch_audio_embedding(audio_embeds) placeholder = self._tracker.add("audio_embeds", embedding, uuid) else: placeholder = self._tracker.add("audio_embeds", None, uuid) self._add_placeholder("audio", placeholder) def parse_image_pil( self, image_pil: Image.Image | None, uuid: str | None = None ) -> None: placeholder = self._tracker.add("image", image_pil, uuid) self._add_placeholder("image", placeholder) def parse_audio(self, audio_url: str | None, uuid: str | None = None) -> None: audio = self._connector.fetch_audio(audio_url) if audio_url else None placeholder = self._tracker.add("audio", audio, uuid) self._add_placeholder("audio", placeholder) def parse_input_audio( self, input_audio: InputAudio | None, uuid: str | None = None ) -> None: if input_audio: audio_data = input_audio.get("data", "") audio_format = input_audio.get("format", "") if audio_data: audio_url = f"data:audio/{audio_format};base64,{audio_data}" else:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/api_server.py
vllm/entrypoints/api_server.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ NOTE: This API server is used only for demonstrating usage of AsyncEngine and simple performance benchmarks. It is not intended for production use. For production use, we recommend using our OpenAI compatible server. We are also not going to accept PRs modifying this file, please change `vllm/entrypoints/openai/api_server.py` instead. """ import asyncio import json import ssl from argparse import Namespace from collections.abc import AsyncGenerator from typing import Any from fastapi import FastAPI, Request from fastapi.responses import JSONResponse, Response, StreamingResponse import vllm.envs as envs from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.launcher import serve_http from vllm.entrypoints.utils import with_cancellation from vllm.logger import init_logger from vllm.sampling_params import SamplingParams from vllm.usage.usage_lib import UsageContext from vllm.utils import random_uuid from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.system_utils import set_ulimit from vllm.version import __version__ as VLLM_VERSION logger = init_logger("vllm.entrypoints.api_server") app = FastAPI() engine = None @app.get("/health") async def health() -> Response: """Health check.""" return Response(status_code=200) @app.post("/generate") async def generate(request: Request) -> Response: """Generate completion for the request. The request should be a JSON object with the following fields: - prompt: the prompt to use for the generation. - stream: whether to stream the results or not. - other fields: the sampling parameters (See `SamplingParams` for details). """ request_dict = await request.json() return await _generate(request_dict, raw_request=request) @with_cancellation async def _generate(request_dict: dict, raw_request: Request) -> Response: prompt = request_dict.pop("prompt") stream = request_dict.pop("stream", False) # Since SamplingParams is created fresh per request, safe to skip clone sampling_params = SamplingParams(**request_dict, skip_clone=True) request_id = random_uuid() assert engine is not None results_generator = engine.generate(prompt, sampling_params, request_id) # Streaming case async def stream_results() -> AsyncGenerator[bytes, None]: async for request_output in results_generator: prompt = request_output.prompt assert prompt is not None text_outputs = [prompt + output.text for output in request_output.outputs] ret = {"text": text_outputs} yield (json.dumps(ret) + "\n").encode("utf-8") if stream: return StreamingResponse(stream_results()) # Non-streaming case final_output = None try: async for request_output in results_generator: final_output = request_output except asyncio.CancelledError: return Response(status_code=499) assert final_output is not None prompt = final_output.prompt assert prompt is not None text_outputs = [prompt + output.text for output in final_output.outputs] ret = {"text": text_outputs} return JSONResponse(ret) def build_app(args: Namespace) -> FastAPI: global app app.root_path = args.root_path return app async def init_app( args: Namespace, llm_engine: AsyncLLMEngine | None = None, ) -> FastAPI: app = build_app(args) global engine engine_args = AsyncEngineArgs.from_cli_args(args) engine = ( llm_engine if llm_engine is not None else AsyncLLMEngine.from_engine_args( engine_args, usage_context=UsageContext.API_SERVER ) ) app.state.engine_client = engine app.state.args = args return app async def run_server( args: Namespace, llm_engine: AsyncLLMEngine | None = None, **uvicorn_kwargs: Any ) -> None: logger.info("vLLM API server version %s", VLLM_VERSION) logger.info("args: %s", args) set_ulimit() app = await init_app(args, llm_engine) assert engine is not None shutdown_task = await serve_http( app, sock=None, enable_ssl_refresh=args.enable_ssl_refresh, host=args.host, port=args.port, log_level=args.log_level, timeout_keep_alive=envs.VLLM_HTTP_TIMEOUT_KEEP_ALIVE, ssl_keyfile=args.ssl_keyfile, ssl_certfile=args.ssl_certfile, ssl_ca_certs=args.ssl_ca_certs, ssl_cert_reqs=args.ssl_cert_reqs, **uvicorn_kwargs, ) await shutdown_task if __name__ == "__main__": parser = FlexibleArgumentParser() parser.add_argument("--host", type=str, default=None) parser.add_argument("--port", type=parser.check_port, default=8000) parser.add_argument("--ssl-keyfile", type=str, default=None) parser.add_argument("--ssl-certfile", type=str, default=None) parser.add_argument( "--ssl-ca-certs", type=str, default=None, help="The CA certificates file" ) parser.add_argument( "--enable-ssl-refresh", action="store_true", default=False, help="Refresh SSL Context when SSL certificate files change", ) parser.add_argument( "--ssl-cert-reqs", type=int, default=int(ssl.CERT_NONE), help="Whether client certificate is required (see stdlib ssl module's)", ) parser.add_argument( "--root-path", type=str, default=None, help="FastAPI root_path when app is behind a path based routing proxy", ) parser.add_argument("--log-level", type=str, default="debug") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() asyncio.run(run_server(args))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/tool.py
vllm/entrypoints/tool.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json import os from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any from openai.types.responses.response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem, ) from openai_harmony import Author, Message, Role, TextContent from vllm.logger import init_logger from vllm.utils import random_uuid if TYPE_CHECKING: # Avoid circular import. from vllm.entrypoints.context import ConversationContext logger = init_logger(__name__) MIN_GPT_OSS_VERSION = "0.0.7" def validate_gpt_oss_install(): """ Check if the gpt-oss is installed and its version is at least 0.0.7. If not, raise an ImportError. """ from importlib.metadata import PackageNotFoundError, version from packaging.version import InvalidVersion, Version try: pkg_version_str = version("gpt_oss") pkg_version = Version(pkg_version_str) except PackageNotFoundError: raise ImportError("Package 'gpt_oss' is not installed.") from None except InvalidVersion as e: raise ImportError(f"Invalid version string for 'gpt_oss': {e}") from None if pkg_version < Version(MIN_GPT_OSS_VERSION): raise ImportError( f"gpt_oss >= {MIN_GPT_OSS_VERSION} is required, " f"but {pkg_version} is installed." ) from None class Tool(ABC): @abstractmethod async def get_result(self, context: "ConversationContext") -> Any: pass @abstractmethod async def get_result_parsable_context(self, context: "ConversationContext") -> Any: pass class HarmonyBrowserTool(Tool): def __init__(self): self.enabled = True exa_api_key = os.getenv("EXA_API_KEY") if not exa_api_key: self.enabled = False logger.warning_once("EXA_API_KEY is not set, browsing is disabled") return try: validate_gpt_oss_install() from gpt_oss.tools.simple_browser import SimpleBrowserTool from gpt_oss.tools.simple_browser.backend import ExaBackend except ImportError as e: self.enabled = False logger.warning_once( "gpt_oss is not installed properly (%s), browsing is disabled", e ) return browser_backend = ExaBackend(source="web", api_key=exa_api_key) self.browser_tool = SimpleBrowserTool(backend=browser_backend) logger.info_once("Browser tool initialized") async def get_result(self, context: "ConversationContext") -> Any: from vllm.entrypoints.context import HarmonyContext assert isinstance(context, HarmonyContext) last_msg = context.messages[-1] tool_output_msgs = [] async for msg in self.browser_tool.process(last_msg): tool_output_msgs.append(msg) return tool_output_msgs async def get_result_parsable_context(self, context: "ConversationContext") -> Any: raise NotImplementedError("Not implemented yet") @property def tool_config(self) -> Any: return self.browser_tool.tool_config class HarmonyPythonTool(Tool): def __init__(self): self.enabled = True try: validate_gpt_oss_install() from gpt_oss.tools.python_docker.docker_tool import PythonTool except ImportError as e: self.enabled = False logger.warning_once( "gpt_oss is not installed properly (%s), code interpreter is disabled", e, ) return self.python_tool = PythonTool() async def validate(self): if not self.enabled: return try: message = Message( author=Author(role=Role.ASSISTANT), content=[TextContent(text="print('Hello, world!')")], channel="analysis", recipient="python", content_type="code", ) msgs = [] async for msg in self.python_tool.process(message): msgs.append(msg) assert msgs[0].content[0].text == "Hello, world!\n" except Exception as e: self.enabled = False logger.warning_once( "Code interpreter tool failed to initialize (%s), code " "interpreter is disabled", e, ) return logger.info_once("Code interpreter tool initialized") async def get_result(self, context: "ConversationContext") -> Any: from vllm.entrypoints.context import HarmonyContext assert isinstance(context, HarmonyContext) last_msg = context.messages[-1] tool_output_msgs = [] async for msg in self.python_tool.process(last_msg): tool_output_msgs.append(msg) return tool_output_msgs async def get_result_parsable_context(self, context: "ConversationContext") -> Any: """ This function converts parsable context types to harmony and back so we can use GPTOSS demo python tool """ from vllm.entrypoints.context import ParsableContext assert isinstance(context, ParsableContext) last_msg = context.parser.response_messages[-1] args = json.loads(last_msg.arguments) last_msg_harmony = Message( author=Author(role="assistant", name=None), content=[TextContent(text=args["code"])], channel="analysis", recipient="python", content_type="code", ) tool_output_msgs = [] async for msg in self.python_tool.process(last_msg_harmony): processed = ResponseFunctionToolCallOutputItem( id=f"fco_{random_uuid()}", type="function_call_output", call_id=f"call_{random_uuid()}", output=msg.content[0].text, status="completed", ) tool_output_msgs.append(processed) return tool_output_msgs @property def tool_config(self) -> Any: return self.python_tool.tool_config
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_chat_stream_harmony.py
vllm/entrypoints/openai/serving_chat_stream_harmony.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Harmony-specific streaming delta extraction for chat completions. This module handles the extraction of DeltaMessage objects from harmony parser state during streaming chat completions. """ from openai_harmony import StreamableParser from vllm.entrypoints.chat_utils import make_tool_call_id from vllm.entrypoints.openai.protocol import ( DeltaFunctionCall, DeltaMessage, DeltaToolCall, ) def extract_harmony_streaming_delta( harmony_parser: StreamableParser, cur_channel: str | None, cur_recipient: str | None, prev_recipient: str | None, delta_text: str, include_reasoning: bool, ) -> tuple[DeltaMessage | None, bool]: """ Extract a DeltaMessage from harmony parser state during streaming. Args: harmony_parser: The StreamableParser instance tracking parse state cur_channel: Current channel ("final", "analysis", "commentary", etc.) cur_recipient: Current recipient (e.g., "functions.my_func") prev_recipient: Previous recipient for detecting tool call transitions delta_text: The text delta to include in the message include_reasoning: Whether to include reasoning content Returns: A tuple of (DeltaMessage or None, tools_streamed_flag) """ tools_streamed = False if cur_channel == "final": delta_message = DeltaMessage(content=delta_text) elif ( (cur_channel == "commentary" or cur_channel == "analysis") and cur_recipient and cur_recipient.startswith("functions.") ): # Count completed tool calls to determine index base_index = 0 for msg in harmony_parser.messages: if ( (msg.channel == "commentary" or msg.channel == "analysis") and msg.recipient and msg.recipient.startswith("functions.") ): base_index += 1 if prev_recipient != cur_recipient: tool_name = cur_recipient.split("functions.", 1)[1] delta_message = DeltaMessage( tool_calls=[ DeltaToolCall( id=make_tool_call_id(), type="function", function=DeltaFunctionCall( name=tool_name, arguments="", ), index=base_index, ) ] ) elif delta_text: delta_message = DeltaMessage( tool_calls=[ DeltaToolCall( index=base_index, function=DeltaFunctionCall(arguments=delta_text), ) ] ) else: delta_message = None if delta_message is not None: tools_streamed = True elif cur_channel == "commentary": # Tool call preambles meant to be shown to the user delta_message = DeltaMessage(content=delta_text) elif cur_channel == "analysis": if include_reasoning: delta_message = DeltaMessage(reasoning=delta_text) else: delta_message = None else: delta_message = None return delta_message, tools_streamed
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/orca_metrics.py
vllm/entrypoints/openai/orca_metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Utility functions that create ORCA endpoint load report response headers. """ import json from collections.abc import Mapping from vllm.logger import init_logger from vllm.v1.metrics.reader import Gauge, get_metrics_snapshot logger = init_logger(__name__) def create_orca_header( metrics_format: str, named_metrics: list[tuple[str, float]] ) -> Mapping[str, str] | None: """ Creates ORCA headers named 'endpoint-load-metrics' in the specified format and adds custom metrics to named_metrics. ORCA headers format description: https://docs.google.com/document/d/1C1ybMmDKJIVlrbOLbywhu9iRYo4rilR-cT50OTtOFTs/edit?tab=t.0 ORCA proto https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto Parameters: - metrics_format (str): The format of the header ('TEXT', 'JSON'). - named_metrics (List[Tuple[str, float]]): List of tuples with metric names and their corresponding double values. Returns: - Optional[Mapping[str,str]]: A dictionary with header key as 'endpoint-load-metrics' and values as the ORCA header strings with format prefix and data in with named_metrics in. """ if metrics_format.lower() not in ["text", "json"]: logger.warning( "Warning: `%s` format is not supported in the ORCA response header", format, ) return None header = {} orca_report = { "named_metrics": { metric_name: value for metric_name, value in named_metrics if isinstance(metric_name, str) and isinstance(value, float) } } # output example: # endpoint-load-metrics: TEXT named_metrics.kv_cache_utilization=0.4 if metrics_format.lower() == "text": native_http_header = ", ".join( [ f"named_metrics.{metric_name}={value}" for metric_name, value in named_metrics if isinstance(metric_name, str) and isinstance(value, float) ] ) header["endpoint-load-metrics"] = f"TEXT {native_http_header}" # output example: # endpoint-load-metrics: JSON “named_metrics”: {“custom-metric-util”: 0.4} elif metrics_format.lower() == "json": header["endpoint-load-metrics"] = f"JSON {json.dumps(orca_report)}" logger.info("Created ORCA header %s", header) return header def get_named_metrics_from_prometheus() -> list[tuple[str, float]]: """ Collects current metrics from Prometheus and returns some of them in the form of the `named_metrics` list for `create_orca_header()`. Parameters: - None Returns: - list[tuple[str, float]]: List of tuples of metric names and their values. """ named_metrics: list[tuple[str, float]] = [] # Map from prometheus metric names to ORCA named metrics. prometheus_to_orca_metrics = { "vllm:kv_cache_usage_perc": "kv_cache_usage_perc", "vllm:num_requests_waiting": "num_requests_waiting", } metrics = get_metrics_snapshot() for metric in metrics: orca_name = prometheus_to_orca_metrics.get(metric.name) # If this metric is mapped into ORCA, then add it to the report. # Note: Only Gauge metrics are currently supported. if orca_name is not None and isinstance(metric, Gauge): named_metrics.append((str(orca_name), float(metric.value))) return named_metrics def metrics_header(metrics_format: str) -> Mapping[str, str] | None: """ Creates ORCA headers named 'endpoint-load-metrics' in the specified format. Metrics are collected from Prometheus using `get_named_metrics_from_prometheus()`. ORCA headers format description: https://docs.google.com/document/d/1C1ybMmDKJIVlrbOLbywhu9iRYo4rilR-cT50OTtOFTs/edit?tab=t.0 ORCA proto https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto Parameters: - metrics_format (str): The format of the header ('TEXT', 'JSON'). Returns: - Optional[Mapping[str,str]]: A dictionary with header key as 'endpoint-load-metrics' and values as the ORCA header strings with format prefix and data in with named_metrics in. """ if not metrics_format: return None # Get named metrics from prometheus. named_metrics = get_named_metrics_from_prometheus() return create_orca_header(metrics_format, named_metrics)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_chat.py
vllm/entrypoints/openai/serving_chat.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json import time from collections.abc import AsyncGenerator, AsyncIterator from collections.abc import Sequence as GenericSequence from typing import Any, Final import jinja2 import partial_json_parser import regex as re from fastapi import Request from openai_harmony import Message as OpenAIMessage from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ( ChatTemplateContentFormatOption, ConversationMessage, get_history_tool_calls_cnt, make_tool_call_id, ) from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.parser.harmony_utils import ( get_developer_message, get_stop_tokens_for_assistant_actions, get_streamable_parser_for_assistant, get_system_message, parse_chat_inputs_to_harmony_messages, parse_chat_output, render_for_completion, ) from vllm.entrypoints.openai.protocol import ( ChatCompletionLogProb, ChatCompletionLogProbs, ChatCompletionLogProbsContent, ChatCompletionNamedToolChoiceParam, ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, DeltaFunctionCall, DeltaMessage, DeltaToolCall, ErrorResponse, PromptTokenUsageInfo, RequestResponseMetadata, ToolCall, UsageInfo, ) from vllm.entrypoints.openai.serving_chat_stream_harmony import ( extract_harmony_streaming_delta, ) from vllm.entrypoints.openai.serving_engine import ( GenerationError, OpenAIServing, clamp_prompt_logprobs, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.openai.utils import maybe_filter_parallel_tool_calls from vllm.entrypoints.utils import get_max_tokens, should_include_usage from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.outputs import CompletionOutput, RequestOutput from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import ( MistralTokenizer, maybe_serialize_tool_calls, truncate_tool_call_ids, validate_request_params, ) from vllm.tool_parsers import ToolParser from vllm.tool_parsers.mistral_tool_parser import MistralToolCall from vllm.utils.collection_utils import as_list from vllm.v1.sample.logits_processor import validate_logits_processors_parameters logger = init_logger(__name__) class OpenAIServingChat(OpenAIServing): def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, response_role: str, *, request_logger: RequestLogger | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, trust_request_chat_template: bool = False, return_tokens_as_token_ids: bool = False, reasoning_parser: str = "", enable_auto_tools: bool = False, exclude_tools_when_tool_choice_none: bool = False, tool_parser: str | None = None, enable_prompt_tokens_details: bool = False, enable_force_include_usage: bool = False, enable_log_outputs: bool = False, log_error_stack: bool = False, default_chat_template_kwargs: dict[str, Any] | None = None, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, log_error_stack=log_error_stack, ) self.response_role = response_role self.chat_template = chat_template self.chat_template_content_format: Final = chat_template_content_format self.trust_request_chat_template = trust_request_chat_template self.default_chat_template_kwargs = default_chat_template_kwargs or {} self.enable_log_outputs = enable_log_outputs # set up logits processors self.logits_processors = self.model_config.logits_processors # set up reasoning parser self.reasoning_parser = self._get_reasoning_parser( reasoning_parser_name=reasoning_parser ) # set up tool use self.enable_auto_tools: bool = enable_auto_tools self.tool_parser = self._get_tool_parser( tool_parser_name=tool_parser, enable_auto_tools=enable_auto_tools ) self.exclude_tools_when_tool_choice_none = exclude_tools_when_tool_choice_none self.enable_prompt_tokens_details = enable_prompt_tokens_details self.enable_force_include_usage = enable_force_include_usage self.default_sampling_params = self.model_config.get_diff_sampling_param() if self.default_sampling_params: source = self.model_config.generation_config source = "model" if source == "auto" else source logger.info( "Using default chat sampling params from %s: %s", source, self.default_sampling_params, ) if self.model_config.hf_config.model_type == "kimi_k2": self.tool_call_id_type = "kimi_k2" else: self.tool_call_id_type = "random" self.use_harmony = self.model_config.hf_config.model_type == "gpt_oss" if self.use_harmony: if "stop_token_ids" not in self.default_sampling_params: self.default_sampling_params["stop_token_ids"] = [] self.default_sampling_params["stop_token_ids"].extend( get_stop_tokens_for_assistant_actions() ) # NOTE(woosuk): While OpenAI's chat completion API supports browsing # for some models, currently vLLM doesn't support it. Please use the # Responses API instead. self.supports_browsing = False self.browser_tool = None # NOTE(woosuk): Chat completion API does not support code interpreter. # Please use the Responses API instead. self.supports_code_interpreter = False self.python_tool = None async def warmup(self) -> None: """ Warm up the chat template processing to avoid first-request latency. This method triggers Jinja2 template compilation and content format detection that would otherwise happen on the first real request, causing increased latency on the first request. """ logger.info("Warming up chat template processing...") start_time = time.perf_counter() try: # Get the tokenizer from the engine tokenizer = await self.engine_client.get_tokenizer() # Create a minimal dummy request dummy_request = ChatCompletionRequest( messages=[{"role": "user", "content": "warmup"}], model=None, max_completion_tokens=1, ) # Call _preprocess_chat to trigger template compilation # This forces: # 1. Chat template content format detection # 2. Jinja2 template compilation # 3. Tokenizer initialization for chat await self._preprocess_chat( dummy_request, tokenizer, dummy_request.messages, chat_template=self.chat_template, chat_template_content_format=self.chat_template_content_format, add_generation_prompt=True, continue_final_message=False, tool_dicts=None, documents=None, chat_template_kwargs=None, default_chat_template_kwargs=self.default_chat_template_kwargs, tool_parser=None, add_special_tokens=False, ) elapsed = (time.perf_counter() - start_time) * 1000 logger.info("Chat template warmup completed in %.1fms", elapsed) except Exception: # Log but don't fail server startup if warmup fails logger.exception("Chat template warmup failed") async def create_chat_completion( self, request: ChatCompletionRequest, raw_request: Request | None = None, ) -> AsyncGenerator[str, None] | ChatCompletionResponse | ErrorResponse: """ Chat Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/chat/create for the API specification. This API mimics the OpenAI Chat Completion API. """ error_check_ret = await self._check_model(request) if error_check_ret is not None: logger.error("Error with model %s", error_check_ret) return error_check_ret # If the engine is dead, raise the engine's DEAD_ERROR. # This is required for the streaming case, where we return a # success status before we actually start generating text :). if self.engine_client.errored: raise self.engine_client.dead_error try: lora_request = self._maybe_get_adapters( request, supports_default_mm_loras=True ) model_name = self.models.model_name(lora_request) tokenizer = await self.engine_client.get_tokenizer() tool_parser = self.tool_parser if isinstance(tokenizer, MistralTokenizer): # because of issues with pydantic we need to potentially # re-serialize the tool_calls field of the request # for more info: see comment in `maybe_serialize_tool_calls` maybe_serialize_tool_calls(request) truncate_tool_call_ids(request) validate_request_params(request) # Check if tool parsing is unavailable (common condition) tool_parsing_unavailable = ( tool_parser is None and not isinstance(tokenizer, MistralTokenizer) and not self.use_harmony ) # Validate tool_choice when tool parsing is required but unavailable if tool_parsing_unavailable and request.tool_choice not in ( None, "none", ): if request.tool_choice == "auto" and not self.enable_auto_tools: # for hf tokenizers, "auto" tools requires # --enable-auto-tool-choice and --tool-call-parser return self.create_error_response( '"auto" tool choice requires ' "--enable-auto-tool-choice and --tool-call-parser to be set" ) elif request.tool_choice != "auto": # "required" or named tool requires tool parser return self.create_error_response( f'tool_choice="{request.tool_choice}" requires ' "--tool-call-parser to be set" ) if request.tools is None or ( request.tool_choice == "none" and self.exclude_tools_when_tool_choice_none ): tool_dicts = None else: tool_dicts = [tool.model_dump() for tool in request.tools] if not self.use_harmony: # Common case. error_check_ret = self._validate_chat_template( request_chat_template=request.chat_template, chat_template_kwargs=request.chat_template_kwargs, trust_request_chat_template=self.trust_request_chat_template, ) if error_check_ret is not None: return error_check_ret conversation, engine_prompts = await self._preprocess_chat( request, tokenizer, request.messages, chat_template=request.chat_template or self.chat_template, chat_template_content_format=self.chat_template_content_format, add_generation_prompt=request.add_generation_prompt, continue_final_message=request.continue_final_message, tool_dicts=tool_dicts, documents=request.documents, chat_template_kwargs=request.chat_template_kwargs, default_chat_template_kwargs=self.default_chat_template_kwargs, tool_parser=tool_parser, add_special_tokens=request.add_special_tokens, ) else: # For GPT-OSS. should_include_tools = tool_dicts is not None conversation, engine_prompts = self._make_request_with_harmony( request, should_include_tools ) except (ValueError, TypeError, RuntimeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(f"{e} {e.__cause__}") request_id = ( f"chatcmpl-{self._base_request_id(raw_request, request.request_id)}" ) request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: raw_request.state.request_metadata = request_metadata # Extract data_parallel_rank from header (router can inject it) data_parallel_rank = self._get_data_parallel_rank(raw_request) # Schedule the request and get the result generator. generators: list[AsyncGenerator[RequestOutput, None]] = [] try: for i, engine_prompt in enumerate(engine_prompts): prompt_text, _, _ = self._get_prompt_components(engine_prompt) # If we are creating sub requests for multiple prompts, ensure that they # have unique request ids. sub_request_id = ( request_id if len(engine_prompts) == 1 else f"{request_id}_{i}" ) if self.default_sampling_params is None: self.default_sampling_params = {} max_tokens = get_max_tokens( max_model_len=self.max_model_len, request=request, input_length=len(engine_prompt["prompt_token_ids"]), default_sampling_params=self.default_sampling_params, ) sampling_params: SamplingParams | BeamSearchParams if request.use_beam_search: sampling_params = request.to_beam_search_params( max_tokens, self.default_sampling_params ) else: sampling_params = request.to_sampling_params( max_tokens, self.model_config.logits_processor_pattern, self.default_sampling_params, ) validate_logits_processors_parameters( self.logits_processors, sampling_params, ) self._log_inputs( sub_request_id, engine_prompt, params=sampling_params, lora_request=lora_request, ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) if isinstance(sampling_params, BeamSearchParams): generator = self.beam_search( prompt=engine_prompt, request_id=sub_request_id, params=sampling_params, lora_request=lora_request, trace_headers=trace_headers, ) else: engine_request, tokenization_kwargs = await self._process_inputs( sub_request_id, engine_prompt, sampling_params, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, data_parallel_rank=data_parallel_rank, ) generator = self.engine_client.generate( engine_request, sampling_params, sub_request_id, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, prompt_text=prompt_text, tokenization_kwargs=tokenization_kwargs, data_parallel_rank=data_parallel_rank, ) generators.append(generator) except ValueError as e: return self.create_error_response(e) assert len(generators) == 1 (result_generator,) = generators # Streaming response if request.stream: return self.chat_completion_stream_generator( request, result_generator, request_id, model_name, conversation, tokenizer, request_metadata, ) try: return await self.chat_completion_full_generator( request, result_generator, request_id, model_name, conversation, tokenizer, request_metadata, ) except GenerationError as e: return self._convert_generation_error_to_response(e) except ValueError as e: return self.create_error_response(e) def get_chat_request_role(self, request: ChatCompletionRequest) -> str: if request.add_generation_prompt: return self.response_role return request.messages[-1]["role"] @staticmethod def _bracket_level(s: str, opening="{", closing="}") -> int: """ Calculate the current level of nested brackets in a given string. """ level = 0 for char in s: if char == opening: level += 1 elif char == closing: level -= 1 return level @staticmethod def _filter_delta_text(delta_text: str, previous_text: str) -> tuple[str, bool]: # remove last '},' of the tool definition stemming from the # "name"/"parameters" outer object or closing ']' of the tool list # count occurrences of opening and closing curly braces and # once level 0 is reached stop outputting text # if 0 is reached while parsing the delta_text we know the current # tool will finish in this current iteration bracket_level = OpenAIServingChat._bracket_level(previous_text) updated_delta, passed_zero = "", False for c in delta_text: if c == "{": bracket_level += 1 passed_zero = bracket_level == 0 elif c == "}": bracket_level -= 1 passed_zero = bracket_level == 0 if bracket_level != 0: updated_delta += c else: # if a comma is reached at level 0 we can stop if c == ",": break return updated_delta, passed_zero def extract_tool_call_required_streaming( self, previous_text: str, current_text: str | None, delta_text: str, function_name_returned: bool, tool_call_idx: int | None = None, ) -> tuple[DeltaMessage | None, bool]: if current_text is None or current_text == "": # if the current text is empty, we cannot parse it return None, function_name_returned try: obj = partial_json_parser.loads(current_text) except partial_json_parser.core.exceptions.MalformedJSON: logger.debug("not enough tokens to parse into JSON yet") obj = None # check if the current text is a valid array # containing a partial tool calling object # if not repeat if obj is None or not isinstance(obj, list) or not len(obj) > 0: function_name_returned = False delta_message = None else: _, finishes_previous_tool = OpenAIServingChat._filter_delta_text( delta_text, previous_text ) # take the last tool call from the generated list current_tool_call = obj[-1] # once parameters have been generated the name is complete as well if not finishes_previous_tool and ( "name" not in current_tool_call or "parameters" not in current_tool_call ): function_name_returned = False delta_message = None else: if not function_name_returned: # get partly generated arguments from the latest tool call param_match = re.search( r'.*"parameters":\s*(.*)', current_text, re.DOTALL ) arguments = param_match.group(1) if param_match else "" arguments, _ = OpenAIServingChat._filter_delta_text( arguments, previous_text ) # if this iteration finishes a previous tool call but a # new incomplete tool is already generated, take the # previous from the list if finishes_previous_tool and "parameters" not in current_tool_call: current_tool_call = obj[-2] function_name_returned = True tool_call_id = make_tool_call_id( id_type=self.tool_call_id_type, func_name=current_tool_call["name"], idx=tool_call_idx, ) delta_message = DeltaMessage( tool_calls=[ DeltaToolCall( id=tool_call_id, function=DeltaFunctionCall( name=current_tool_call["name"], arguments=arguments ), index=len(obj) - 1, type="function", ) ] ) else: delta_text, _ = OpenAIServingChat._filter_delta_text( delta_text, previous_text ) if delta_text != "": delta_message = DeltaMessage( tool_calls=[ DeltaToolCall( function=DeltaFunctionCall( # OpenAI API returns None # instead of name every time name=None, arguments=delta_text, ), index=len(obj) - 1, ) ] ) else: delta_message = None return delta_message, function_name_returned async def chat_completion_stream_generator( self, request: ChatCompletionRequest, result_generator: AsyncIterator[RequestOutput], request_id: str, model_name: str, conversation: list[ConversationMessage], tokenizer: TokenizerLike | None, request_metadata: RequestResponseMetadata, ) -> AsyncGenerator[str, None]: created_time = int(time.time()) chunk_object_type: Final = "chat.completion.chunk" first_iteration = True # Send response for each token for each request.n (index) num_choices = 1 if request.n is None else request.n previous_num_tokens = [0] * num_choices finish_reason_sent = [False] * num_choices num_prompt_tokens = 0 num_cached_tokens = None if self.use_harmony: harmony_parsers = [ get_streamable_parser_for_assistant() for _ in range(num_choices) ] harmony_tools_streamed = [False] * num_choices tools_streamed = [False] * num_choices if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam): tool_choice_function_name = request.tool_choice.function.name else: tool_choice_function_name = None # Determine whether tools are in use with "auto" tool choice tool_choice_auto = ( not tool_choice_function_name and self._should_stream_with_auto_tool_parsing(request) ) all_previous_token_ids: list[list[int]] | None function_name_returned = [False] * num_choices if self.tool_call_id_type == "kimi_k2": history_tool_call_cnt = get_history_tool_calls_cnt(conversation) else: history_tool_call_cnt = 0 # Always track previous_texts for comprehensive output logging previous_texts = [""] * num_choices # Only one of these will be used, thus previous_texts and # all_previous_token_ids will not be used twice in the same iteration. if tool_choice_auto or self.reasoning_parser: # These are only required in "auto" tool choice case all_previous_token_ids = [[]] * num_choices # For reasoning parser and tool call all enabled added_content_delta_arr = [False] * num_choices reasoning_end_arr = [False] * num_choices else: all_previous_token_ids = None try: if self.reasoning_parser: if tokenizer is None: raise ValueError( "Tokenizer not available when `skip_tokenizer_init=True`" ) reasoning_parser = self.reasoning_parser( tokenizer, chat_template_kwargs=request.chat_template_kwargs, # type: ignore ) except RuntimeError as e: logger.exception("Error in reasoning parser creation.") data = self.create_streaming_error_response(str(e)) yield f"data: {data}\n\n" yield "data: [DONE]\n\n" return # Prepare the tool parser if it's needed try: if tool_choice_auto and self.tool_parser: if tokenizer is None: raise ValueError( "Tokenizer not available when `skip_tokenizer_init=True`" ) tool_parsers: list[ToolParser | None] = [ self.tool_parser(tokenizer) ] * num_choices else: tool_parsers = [None] * num_choices except Exception as e: logger.exception("Error in tool parser creation.") data = self.create_streaming_error_response(e) yield f"data: {data}\n\n" yield "data: [DONE]\n\n" return stream_options = request.stream_options include_usage, include_continuous_usage = should_include_usage( stream_options, self.enable_force_include_usage ) try: async for res in result_generator: if res.prompt_token_ids is not None: num_prompt_tokens = len(res.prompt_token_ids) if res.encoder_prompt_token_ids is not None: num_prompt_tokens += len(res.encoder_prompt_token_ids) # We need to do it here, because if there are exceptions in # the result_generator, it needs to be sent as the FIRST # response (by the try...catch). if first_iteration: num_cached_tokens = res.num_cached_tokens # Send first response for each request.n (index) with # the role role = self.get_chat_request_role(request) # NOTE num_choices defaults to 1 so this usually executes # once per request for i in range(num_choices): choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage( role=role, content="", ), logprobs=None, finish_reason=None, ) # return prompt_token_ids at the first chunk ever chunk = ChatCompletionStreamResponse( id=request_id, object=chunk_object_type, created=created_time, choices=[choice_data], model=model_name, prompt_token_ids=( res.prompt_token_ids if request.return_token_ids else None ), ) # if continuous usage stats are requested, add it if include_continuous_usage: chunk.usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=0, total_tokens=num_prompt_tokens, ) data = chunk.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" # Send response to echo the input portion of the # last message if request.echo: last_msg_content: str | list[dict[str, str]] = "" if ( conversation and "content" in conversation[-1] and conversation[-1].get("role") == role ): last_msg_content = conversation[-1]["content"] or "" if last_msg_content: for i in range(num_choices): choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(content=last_msg_content), logprobs=None, finish_reason=None, ) chunk = ChatCompletionStreamResponse( id=request_id, object=chunk_object_type, created=created_time, choices=[choice_data], model=model_name, ) if include_continuous_usage: chunk.usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=0, total_tokens=num_prompt_tokens, ) data = chunk.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" first_iteration = False for output in res.outputs: i = output.index tool_parser = tool_parsers[i] if finish_reason_sent[i]:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/speech_to_text.py
vllm/entrypoints/openai/speech_to_text.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import io import math import time from collections.abc import AsyncGenerator, Callable from functools import cached_property from typing import Literal, TypeAlias, TypeVar, cast import numpy as np from fastapi import Request from transformers import PreTrainedTokenizerBase import vllm.envs as envs from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( DeltaMessage, ErrorResponse, RequestResponseMetadata, TranscriptionResponse, TranscriptionResponseStreamChoice, TranscriptionResponseVerbose, TranscriptionSegment, TranscriptionStreamResponse, TranslationResponse, TranslationResponseStreamChoice, TranslationResponseVerbose, TranslationSegment, TranslationStreamResponse, UsageInfo, VLLMValidationError, ) from vllm.entrypoints.openai.serving_engine import OpenAIServing, SpeechToTextRequest from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.inputs.data import PromptType from vllm.logger import init_logger from vllm.model_executor.models import SupportsTranscription, supports_transcription from vllm.outputs import RequestOutput from vllm.tokenizers import get_tokenizer from vllm.utils.import_utils import PlaceholderModule try: import librosa except ImportError: librosa = PlaceholderModule("librosa") # type: ignore[assignment] SpeechToTextResponse: TypeAlias = TranscriptionResponse | TranslationResponse SpeechToTextResponseVerbose: TypeAlias = ( TranscriptionResponseVerbose | TranslationResponseVerbose ) SpeechToTextSegment: TypeAlias = TranscriptionSegment | TranslationSegment T = TypeVar("T", bound=SpeechToTextResponse) V = TypeVar("V", bound=SpeechToTextResponseVerbose) S = TypeVar("S", bound=SpeechToTextSegment) ResponseType: TypeAlias = ( TranscriptionResponse | TranslationResponse | TranscriptionResponseVerbose | TranslationResponseVerbose ) logger = init_logger(__name__) class OpenAISpeechToText(OpenAIServing): """Base class for speech-to-text operations like transcription and translation.""" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, return_tokens_as_token_ids: bool = False, task_type: Literal["transcribe", "translate"] = "transcribe", log_error_stack: bool = False, enable_force_include_usage: bool = False, ): super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, log_error_stack=log_error_stack, ) self.default_sampling_params = self.model_config.get_diff_sampling_param() self.task_type = task_type self.asr_config = self.model_cls.get_speech_to_text_config( self.model_config, task_type ) self.enable_force_include_usage = enable_force_include_usage self.max_audio_filesize_mb = envs.VLLM_MAX_AUDIO_CLIP_FILESIZE_MB if self.model_cls.supports_segment_timestamp: self.tokenizer = cast( PreTrainedTokenizerBase, get_tokenizer( tokenizer_name=self.model_config.tokenizer, tokenizer_mode=self.model_config.tokenizer_mode, ), ) if self.default_sampling_params: logger.info( "Overwriting default completion sampling param with: %s", self.default_sampling_params, ) # Warm up audio preprocessing to avoid first-request latency self._warmup_audio_preprocessing() # Warm up input processor with dummy audio self._warmup_input_processor() def _warmup_audio_preprocessing(self) -> None: """Warm up audio processing libraries to avoid first-request latency. The first call to librosa functions (load, get_duration, mel-spectrogram) triggers JIT compilation and library initialization which can take ~7s. This method warms up these operations during server initialization. """ # Skip warmup if librosa is not installed (optional dependency) if isinstance(librosa, PlaceholderModule): return # Skip warmup if model doesn't support transcription if not supports_transcription(self.model_cls): return try: warmup_start = time.perf_counter() logger.info("Warming up audio preprocessing libraries...") # Create a minimal dummy audio (1 second of silence at target sample rate) dummy_audio = np.zeros(int(self.asr_config.sample_rate), dtype=np.float32) # Warm up librosa.load by using librosa functions on the dummy data # This initializes FFTW, numba JIT, and other audio processing libraries _ = librosa.get_duration(y=dummy_audio, sr=self.asr_config.sample_rate) # Warm up mel-spectrogram computation with model-specific parameters from vllm.transformers_utils.processor import ( cached_processor_from_config, ) processor = cached_processor_from_config(self.model_config) feature_extractor = None if hasattr(processor, "feature_extractor"): feature_extractor = processor.feature_extractor elif hasattr(processor, "audio_processor"): # For models like GraniteSpeech that use audio_processor audio_proc = processor.audio_processor if hasattr(audio_proc, "feature_extractor"): feature_extractor = audio_proc.feature_extractor # If audio_processor doesn't have feature_extractor, # skip mel-spectrogram warmup for these models if feature_extractor is not None: _ = librosa.feature.melspectrogram( y=dummy_audio, sr=self.asr_config.sample_rate, n_mels=getattr(feature_extractor, "n_mels", 128), n_fft=getattr(feature_extractor, "n_fft", 400), hop_length=getattr(feature_extractor, "hop_length", 160), ) warmup_elapsed = time.perf_counter() - warmup_start logger.info("Audio preprocessing warmup completed in %.2fs", warmup_elapsed) except Exception: # Don't fail initialization if warmup fails - log exception and continue logger.exception( "Audio preprocessing warmup failed (non-fatal): %s. " "First request may experience higher latency.", ) def _warmup_input_processor(self) -> None: """Warm up input processor with dummy audio to avoid first-request latency. The first call to input_processor.process_inputs() with multimodal audio triggers multimodal processing initialization which can take ~2.5s. This method processes a dummy audio request to warm up the pipeline. """ # Skip warmup if model doesn't support transcription if not supports_transcription(self.model_cls): return # Only warm up if model supports transcription methods if not hasattr(self.model_cls, "get_generation_prompt"): return try: from vllm.sampling_params import SamplingParams warmup_start = time.perf_counter() logger.info("Warming up multimodal input processor...") # Create minimal dummy audio (1 second of silence) dummy_audio = np.zeros(int(self.asr_config.sample_rate), dtype=np.float32) # Use the same method that _preprocess_speech_to_text uses # to create the prompt dummy_prompt = self.model_cls.get_generation_prompt( audio=dummy_audio, stt_config=self.asr_config, model_config=self.model_config, language="en", task_type=self.task_type, request_prompt="", to_language=None, ) # Create minimal sampling params dummy_params = SamplingParams( max_tokens=1, temperature=0.0, skip_clone=True, # Internal warmup, safe to skip clone ) # Process the dummy input through the input processor # This will trigger all the multimodal processing initialization _ = self.input_processor.process_inputs( request_id="warmup", prompt=dummy_prompt, params=dummy_params, ) warmup_elapsed = time.perf_counter() - warmup_start logger.info("Input processor warmup completed in %.2fs", warmup_elapsed) except Exception: # Don't fail initialization if warmup fails - log warning and continue logger.exception( "Input processor warmup failed (non-fatal): %s. " "First request may experience higher latency." ) @cached_property def model_cls(self) -> type[SupportsTranscription]: from vllm.model_executor.model_loader import get_model_cls model_cls = get_model_cls(self.model_config) return cast(type[SupportsTranscription], model_cls) async def _preprocess_speech_to_text( self, request: SpeechToTextRequest, audio_data: bytes, ) -> tuple[list[PromptType], float]: # Validate request language = self.model_cls.validate_language(request.language) # Skip to_language validation to avoid extra logging for Whisper. to_language = ( self.model_cls.validate_language(request.to_language) if request.to_language else None ) if len(audio_data) / 1024**2 > self.max_audio_filesize_mb: raise VLLMValidationError( "Maximum file size exceeded", parameter="audio_filesize_mb", value=len(audio_data) / 1024**2, ) with io.BytesIO(audio_data) as bytes_: # NOTE resample to model SR here for efficiency. This is also a # pre-requisite for chunking, as it assumes Whisper SR. y, sr = librosa.load(bytes_, sr=self.asr_config.sample_rate) duration = librosa.get_duration(y=y, sr=sr) do_split_audio = ( self.asr_config.allow_audio_chunking and duration > self.asr_config.max_audio_clip_s ) chunks = [y] if not do_split_audio else self._split_audio(y, int(sr)) prompts = [] for chunk in chunks: # The model has control over the construction, as long as it # returns a valid PromptType. prompt = self.model_cls.get_generation_prompt( audio=chunk, stt_config=self.asr_config, model_config=self.model_config, language=language, task_type=self.task_type, request_prompt=request.prompt, to_language=to_language, ) if request.response_format == "verbose_json": if not isinstance(prompt, dict): raise VLLMValidationError( "Expected prompt to be a dict", parameter="prompt", value=type(prompt).__name__, ) prompt_dict = cast(dict, prompt) decoder_prompt = prompt.get("decoder_prompt") if not isinstance(decoder_prompt, str): raise VLLMValidationError( "Expected decoder_prompt to be str", parameter="decoder_prompt", value=type(decoder_prompt).__name__, ) prompt_dict["decoder_prompt"] = decoder_prompt.replace( "<|notimestamps|>", "<|0.00|>" ) prompts.append(prompt) return prompts, duration def _get_verbose_segments( self, tokens: tuple, request: SpeechToTextRequest, segment_class: type[SpeechToTextSegment], start_time: float = 0, ) -> list[SpeechToTextSegment]: """ Convert tokens to verbose segments. This method expects the model to produce timestamps as tokens (similar to Whisper). If the tokens do not include timestamp information, the segments may not be generated correctly. Note: Fields like avg_logprob, compression_ratio, and no_speech_prob are not supported in this implementation and will be None. See docs for details. """ BASE_OFFSET = 0.02 init_token = self.tokenizer.encode("<|0.00|>", add_special_tokens=False)[0] if tokens[-1] == self.tokenizer.eos_token_id: tokens = tokens[:-1] tokens_with_start = (init_token,) + tokens segments: list[SpeechToTextSegment] = [] last_timestamp_start = 0 if tokens_with_start[-2] < init_token and tokens_with_start[-1] >= init_token: tokens_with_start = tokens_with_start + (tokens_with_start[-1],) for idx, token in enumerate(tokens_with_start): # Timestamp tokens (e.g., <|0.00|>) are assumed to be sorted. # If the ordering is violated, this slicing may produce incorrect results. if ( token >= init_token and idx != 0 and tokens_with_start[idx - 1] >= init_token ): sliced_timestamp_tokens = tokens_with_start[last_timestamp_start:idx] start_timestamp = sliced_timestamp_tokens[0] - init_token end_timestamp = sliced_timestamp_tokens[-1] - init_token casting_segment = cast( SpeechToTextSegment, segment_class( id=len(segments), seek=start_time, start=start_time + BASE_OFFSET * start_timestamp, end=start_time + BASE_OFFSET * end_timestamp, temperature=request.temperature, text=self.tokenizer.decode(sliced_timestamp_tokens[1:-1]), tokens=sliced_timestamp_tokens[1:-1], ), ) segments.append(casting_segment) last_timestamp_start = idx return segments async def _create_speech_to_text( self, audio_data: bytes, request: SpeechToTextRequest, raw_request: Request, response_class: type[T | V], stream_generator_method: Callable[..., AsyncGenerator[str, None]], ) -> T | V | AsyncGenerator[str, None] | ErrorResponse: """Base method for speech-to-text operations like transcription and translation.""" error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret # If the engine is dead, raise the engine's DEAD_ERROR. # This is required for the streaming case, where we return a # success status before we actually start generating text :). if self.engine_client.errored: raise self.engine_client.dead_error if request.response_format not in ["text", "json", "verbose_json"]: return self.create_error_response( ("Currently only support response_format") + ("`text`, `json` or `verbose_json`") ) if ( request.response_format == "verbose_json" and not self.model_cls.supports_segment_timestamp ): return self.create_error_response( f"Currently do not support verbose_json for {request.model}" ) if request.response_format == "verbose_json" and request.stream: return self.create_error_response( "verbose_json format doesn't support streaming case" ) request_id = f"{self.task_type}-{self._base_request_id(raw_request)}" request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: raw_request.state.request_metadata = request_metadata try: lora_request = self._maybe_get_adapters(request) prompts, duration_s = await self._preprocess_speech_to_text( request=request, audio_data=audio_data, ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(e) list_result_generator: list[AsyncGenerator[RequestOutput, None]] | None = None try: # Unlike most decoder-only models, whisper generation length is not # constrained by the size of the input audio, which is mapped to a # fixed-size log-mel-spectogram. Still, allow for fewer tokens to be # generated by respecting the extra completion tokens arg. if request.max_completion_tokens is None: default_max_tokens = self.model_config.max_model_len else: default_max_tokens = min( self.model_config.max_model_len, request.max_completion_tokens ) sampling_params = request.to_sampling_params( default_max_tokens, self.default_sampling_params ) self._log_inputs( request_id, # It will not display special tokens like <|startoftranscript|> request.prompt, params=sampling_params, lora_request=lora_request, ) list_result_generator = [ self.engine_client.generate( prompt, sampling_params, f"{request_id}_{i}", lora_request=lora_request, ) for i, prompt in enumerate(prompts) ] except ValueError as e: return self.create_error_response(e) if request.stream: return stream_generator_method( request, list_result_generator, request_id, request_metadata, duration_s ) # Non-streaming response. total_segments = [] text_parts = [] try: assert list_result_generator is not None segments_types: dict[str, type[SpeechToTextSegment]] = { "transcribe": TranscriptionSegment, "translate": TranslationSegment, } segment_class: type[SpeechToTextSegment] = segments_types[self.task_type] text = "" for idx, result_generator in enumerate(list_result_generator): async for op in result_generator: if request.response_format == "verbose_json": segments: list[SpeechToTextSegment] = ( self._get_verbose_segments( tokens=tuple(op.outputs[0].token_ids), segment_class=segment_class, request=request, start_time=idx * self.asr_config.max_audio_clip_s, ) ) total_segments.extend(segments) text_parts.extend([seg.text for seg in segments]) else: text_parts.append(op.outputs[0].text) text = "".join(text_parts) if self.task_type == "transcribe": final_response: ResponseType # add usage in TranscriptionResponse. usage = { "type": "duration", # rounded up as per openAI specs "seconds": int(math.ceil(duration_s)), } if request.response_format != "verbose_json": final_response = cast( T, TranscriptionResponse(text=text, usage=usage) ) else: final_response = cast( V, TranscriptionResponseVerbose( text=text, language=request.language, duration=str(duration_s), segments=total_segments, ), ) else: # no usage in response for translation task if request.response_format != "verbose_json": final_response = cast(T, TranslationResponse(text=text)) else: final_response = cast( V, TranslationResponseVerbose( text=text, language=request.language, duration=str(duration_s), segments=total_segments, ), ) return final_response except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: return self.create_error_response(e) async def _speech_to_text_stream_generator( self, request: SpeechToTextRequest, list_result_generator: list[AsyncGenerator[RequestOutput, None]], request_id: str, request_metadata: RequestResponseMetadata, audio_duration_s: float, chunk_object_type: Literal["translation.chunk", "transcription.chunk"], response_stream_choice_class: type[TranscriptionResponseStreamChoice] | type[TranslationResponseStreamChoice], stream_response_class: type[TranscriptionStreamResponse] | type[TranslationStreamResponse], ) -> AsyncGenerator[str, None]: created_time = int(time.time()) model_name = request.model completion_tokens = 0 num_prompt_tokens = 0 include_usage = self.enable_force_include_usage or request.stream_include_usage include_continuous_usage = ( request.stream_continuous_usage_stats if include_usage and request.stream_continuous_usage_stats else False ) try: for result_generator in list_result_generator: async for res in result_generator: # On first result. if res.prompt_token_ids is not None: num_prompt_tokens = len(res.prompt_token_ids) if audio_tokens := self.model_cls.get_num_audio_tokens( audio_duration_s, self.asr_config, self.model_config ): num_prompt_tokens += audio_tokens # We need to do it here, because if there are exceptions in # the result_generator, it needs to be sent as the FIRST # response (by the try...catch). # Just one output (n=1) supported. assert len(res.outputs) == 1 output = res.outputs[0] delta_message = DeltaMessage(content=output.text) completion_tokens += len(output.token_ids) if output.finish_reason is None: # Still generating, send delta update. choice_data = response_stream_choice_class(delta=delta_message) else: # Model is finished generating. choice_data = response_stream_choice_class( delta=delta_message, finish_reason=output.finish_reason, stop_reason=output.stop_reason, ) chunk = stream_response_class( id=request_id, object=chunk_object_type, created=created_time, choices=[choice_data], model=model_name, ) # handle usage stats if requested & if continuous if include_continuous_usage: chunk.usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=completion_tokens, total_tokens=num_prompt_tokens + completion_tokens, ) data = chunk.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" # Once the final token is handled, if stream_options.include_usage # is sent, send the usage. if include_usage: final_usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=completion_tokens, total_tokens=num_prompt_tokens + completion_tokens, ) final_usage_chunk = stream_response_class( id=request_id, object=chunk_object_type, created=created_time, choices=[], model=model_name, usage=final_usage, ) final_usage_data = final_usage_chunk.model_dump_json( exclude_unset=True, exclude_none=True ) yield f"data: {final_usage_data}\n\n" # report to FastAPI middleware aggregate usage across all choices request_metadata.final_usage_info = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=completion_tokens, total_tokens=num_prompt_tokens + completion_tokens, ) except Exception as e: logger.exception("Error in %s stream generator.", self.task_type) data = self.create_streaming_error_response(e) yield f"data: {data}\n\n" # Send the final done message after all response.n are finished yield "data: [DONE]\n\n" def _split_audio( self, audio_data: np.ndarray, sample_rate: int ) -> list[np.ndarray]: chunk_size = sample_rate * self.asr_config.max_audio_clip_s overlap_size = sample_rate * self.asr_config.overlap_chunk_second chunks = [] i = 0 while i < audio_data.shape[-1]: if i + chunk_size >= audio_data.shape[-1]: # handle last chunk chunks.append(audio_data[..., i:]) break # Find the best split point in the overlap region search_start = i + chunk_size - overlap_size search_end = min(i + chunk_size, audio_data.shape[-1]) split_point = self._find_split_point(audio_data, search_start, search_end) # Extract chunk up to the split point chunks.append(audio_data[..., i:split_point]) i = split_point return chunks def _find_split_point(self, wav: np.ndarray, start_idx: int, end_idx: int) -> int: """Find the best point to split audio by looking for silence or low amplitude. Args: wav: Audio tensor [1, T] start_idx: Start index of search region end_idx: End index of search region Returns: Index of best splitting point """ segment = wav[start_idx:end_idx] # Calculate RMS energy in small windows min_energy = math.inf quietest_idx = 0 min_energy_window = self.asr_config.min_energy_split_window_size assert min_energy_window is not None for i in range(0, len(segment) - min_energy_window, min_energy_window): window = segment[i : i + min_energy_window] energy = (window**2).mean() ** 0.5 if energy < min_energy: quietest_idx = i + start_idx min_energy = energy return quietest_idx
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_models.py
vllm/entrypoints/openai/serving_models.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from asyncio import Lock from collections import defaultdict from dataclasses import dataclass from http import HTTPStatus from vllm.engine.protocol import EngineClient from vllm.entrypoints.openai.protocol import ( ErrorInfo, ErrorResponse, LoadLoRAAdapterRequest, ModelCard, ModelList, ModelPermission, UnloadLoRAAdapterRequest, ) from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.lora.resolver import LoRAResolver, LoRAResolverRegistry from vllm.utils.counter import AtomicCounter logger = init_logger(__name__) @dataclass class BaseModelPath: name: str model_path: str @dataclass class LoRAModulePath: name: str path: str base_model_name: str | None = None class OpenAIServingModels: """Shared instance to hold data about the loaded base model(s) and adapters. Handles the routes: - /v1/models - /v1/load_lora_adapter - /v1/unload_lora_adapter """ def __init__( self, engine_client: EngineClient, base_model_paths: list[BaseModelPath], *, lora_modules: list[LoRAModulePath] | None = None, ): super().__init__() self.engine_client = engine_client self.base_model_paths = base_model_paths self.static_lora_modules = lora_modules self.lora_requests: dict[str, LoRARequest] = {} self.lora_id_counter = AtomicCounter(0) self.lora_resolvers: list[LoRAResolver] = [] for lora_resolver_name in LoRAResolverRegistry.get_supported_resolvers(): self.lora_resolvers.append( LoRAResolverRegistry.get_resolver(lora_resolver_name) ) self.lora_resolver_lock: dict[str, Lock] = defaultdict(Lock) self.input_processor = self.engine_client.input_processor self.io_processor = self.engine_client.io_processor self.model_config = self.engine_client.model_config self.max_model_len = self.model_config.max_model_len async def init_static_loras(self): """Loads all static LoRA modules. Raises if any fail to load""" if self.static_lora_modules is None: return for lora in self.static_lora_modules: load_request = LoadLoRAAdapterRequest( lora_path=lora.path, lora_name=lora.name ) load_result = await self.load_lora_adapter( request=load_request, base_model_name=lora.base_model_name ) if isinstance(load_result, ErrorResponse): raise ValueError(load_result.error.message) def is_base_model(self, model_name) -> bool: return any(model.name == model_name for model in self.base_model_paths) def model_name(self, lora_request: LoRARequest | None = None) -> str: """Returns the appropriate model name depending on the availability and support of the LoRA or base model. Parameters: - lora: LoRARequest that contain a base_model_name. Returns: - str: The name of the base model or the first available model path. """ if lora_request is not None: return lora_request.lora_name return self.base_model_paths[0].name async def show_available_models(self) -> ModelList: """Show available models. This includes the base model and all adapters""" model_cards = [ ModelCard( id=base_model.name, max_model_len=self.max_model_len, root=base_model.model_path, permission=[ModelPermission()], ) for base_model in self.base_model_paths ] lora_cards = [ ModelCard( id=lora.lora_name, root=lora.path, parent=lora.base_model_name if lora.base_model_name else self.base_model_paths[0].name, permission=[ModelPermission()], ) for lora in self.lora_requests.values() ] model_cards.extend(lora_cards) return ModelList(data=model_cards) async def load_lora_adapter( self, request: LoadLoRAAdapterRequest, base_model_name: str | None = None ) -> ErrorResponse | str: lora_name = request.lora_name # Ensure atomicity based on the lora name async with self.lora_resolver_lock[lora_name]: error_check_ret = await self._check_load_lora_adapter_request(request) if error_check_ret is not None: return error_check_ret lora_path = request.lora_path unique_id = self.lora_id_counter.inc(1) lora_request = LoRARequest( lora_name=lora_name, lora_int_id=unique_id, lora_path=lora_path ) if base_model_name is not None and self.is_base_model(base_model_name): lora_request.base_model_name = base_model_name # Validate that the adapter can be loaded into the engine # This will also preload it for incoming requests try: await self.engine_client.add_lora(lora_request) except Exception as e: error_type = "BadRequestError" status_code = HTTPStatus.BAD_REQUEST if "No adapter found" in str(e): error_type = "NotFoundError" status_code = HTTPStatus.NOT_FOUND return create_error_response( message=str(e), err_type=error_type, status_code=status_code ) self.lora_requests[lora_name] = lora_request logger.info( "Loaded new LoRA adapter: name '%s', path '%s'", lora_name, lora_path ) return f"Success: LoRA adapter '{lora_name}' added successfully." async def unload_lora_adapter( self, request: UnloadLoRAAdapterRequest ) -> ErrorResponse | str: lora_name = request.lora_name # Ensure atomicity based on the lora name async with self.lora_resolver_lock[lora_name]: error_check_ret = await self._check_unload_lora_adapter_request(request) if error_check_ret is not None: return error_check_ret # Safe to delete now since we hold the lock del self.lora_requests[lora_name] logger.info("Removed LoRA adapter: name '%s'", lora_name) return f"Success: LoRA adapter '{lora_name}' removed successfully." async def _check_load_lora_adapter_request( self, request: LoadLoRAAdapterRequest ) -> ErrorResponse | None: # Check if both 'lora_name' and 'lora_path' are provided if not request.lora_name or not request.lora_path: return create_error_response( message="Both 'lora_name' and 'lora_path' must be provided.", err_type="InvalidUserInput", status_code=HTTPStatus.BAD_REQUEST, ) # Check if the lora adapter with the given name already exists if request.lora_name in self.lora_requests: return create_error_response( message=f"The lora adapter '{request.lora_name}' has already been " "loaded.", err_type="InvalidUserInput", status_code=HTTPStatus.BAD_REQUEST, ) return None async def _check_unload_lora_adapter_request( self, request: UnloadLoRAAdapterRequest ) -> ErrorResponse | None: # Check if 'lora_name' is not provided return an error if not request.lora_name: return create_error_response( message="'lora_name' needs to be provided to unload a LoRA adapter.", err_type="InvalidUserInput", status_code=HTTPStatus.BAD_REQUEST, ) # Check if the lora adapter with the given name exists if request.lora_name not in self.lora_requests: return create_error_response( message=f"The lora adapter '{request.lora_name}' cannot be found.", err_type="NotFoundError", status_code=HTTPStatus.NOT_FOUND, ) return None async def resolve_lora(self, lora_name: str) -> LoRARequest | ErrorResponse: """Attempt to resolve a LoRA adapter using available resolvers. Args: lora_name: Name/identifier of the LoRA adapter Returns: LoRARequest if found and loaded successfully. ErrorResponse (404) if no resolver finds the adapter. ErrorResponse (400) if adapter(s) are found but none load. """ async with self.lora_resolver_lock[lora_name]: # First check if this LoRA is already loaded if lora_name in self.lora_requests: return self.lora_requests[lora_name] base_model_name = self.model_config.model unique_id = self.lora_id_counter.inc(1) found_adapter = False # Try to resolve using available resolvers for resolver in self.lora_resolvers: lora_request = await resolver.resolve_lora(base_model_name, lora_name) if lora_request is not None: found_adapter = True lora_request.lora_int_id = unique_id try: await self.engine_client.add_lora(lora_request) self.lora_requests[lora_name] = lora_request logger.info( "Resolved and loaded LoRA adapter '%s' using %s", lora_name, resolver.__class__.__name__, ) return lora_request except BaseException as e: logger.warning( "Failed to load LoRA '%s' resolved by %s: %s. " "Trying next resolver.", lora_name, resolver.__class__.__name__, e, ) continue if found_adapter: # An adapter was found, but all attempts to load it failed. return create_error_response( message=( f"LoRA adapter '{lora_name}' was found but could not be loaded." ), err_type="BadRequestError", status_code=HTTPStatus.BAD_REQUEST, ) else: # No adapter was found return create_error_response( message=f"LoRA adapter {lora_name} does not exist", err_type="NotFoundError", status_code=HTTPStatus.NOT_FOUND, ) def create_error_response( message: str, err_type: str = "BadRequestError", status_code: HTTPStatus = HTTPStatus.BAD_REQUEST, ) -> ErrorResponse: return ErrorResponse( error=ErrorInfo(message=message, type=err_type, code=status_code.value) )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_engine.py
vllm/entrypoints/openai/serving_engine.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json import sys import time import traceback from collections.abc import AsyncGenerator, Callable, Iterable, Mapping from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field from http import HTTPStatus from typing import Any, ClassVar, Generic, TypeAlias, TypeVar import numpy as np from fastapi import Request from openai.types.responses import ( ToolChoiceFunction, ) from pydantic import ConfigDict, TypeAdapter from starlette.datastructures import Headers import vllm.envs as envs from vllm.beam_search import BeamSearchSequence, create_sort_beams_key_function from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ( ChatCompletionMessageParam, ChatTemplateContentFormatOption, ConversationMessage, apply_hf_chat_template, apply_mistral_chat_template, parse_chat_messages_futures, resolve_chat_template_content_format, ) from vllm.entrypoints.context import ( ConversationContext, HarmonyContext, ParsableContext, StreamingHarmonyContext, ) from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionNamedToolChoiceParam, ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse, DetokenizeRequest, ErrorInfo, ErrorResponse, FunctionCall, FunctionDefinition, ResponseInputOutputItem, ResponsesRequest, TokenizeChatRequest, TokenizeCompletionRequest, TokenizeResponse, TranscriptionRequest, TranscriptionResponse, TranslationRequest, VLLMValidationError, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.classify.protocol import ( ClassificationChatRequest, ClassificationCompletionRequest, ClassificationRequest, ClassificationResponse, ) from vllm.entrypoints.pooling.embed.protocol import ( EmbeddingChatRequest, EmbeddingCompletionRequest, EmbeddingRequest, EmbeddingResponse, ) from vllm.entrypoints.pooling.pooling.protocol import ( IOProcessorRequest, PoolingResponse, ) from vllm.entrypoints.pooling.score.protocol import ( RerankRequest, ScoreRequest, ScoreResponse, ) from vllm.entrypoints.renderer import BaseRenderer, CompletionRenderer, RenderConfig from vllm.entrypoints.responses_utils import ( construct_input_messages, ) from vllm.entrypoints.serve.disagg.protocol import GenerateRequest, GenerateResponse from vllm.entrypoints.utils import _validate_truncation_size from vllm.inputs.data import PromptType, TokensPrompt from vllm.inputs.parse import ( PromptComponents, get_prompt_components, is_explicit_encoder_decoder_prompt, ) from vllm.logger import init_logger from vllm.logprobs import Logprob, PromptLogprobs from vllm.lora.request import LoRARequest from vllm.multimodal import MultiModalDataDict from vllm.outputs import CompletionOutput, PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.reasoning import ReasoningParser, ReasoningParserManager from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.tokenizers import TokenizerLike from vllm.tokenizers.deepseek_v32 import DeepseekV32Tokenizer from vllm.tokenizers.mistral import MistralTokenizer from vllm.tool_parsers import ToolParser, ToolParserManager from vllm.tracing import ( contains_trace_headers, extract_trace_headers, log_tracing_disabled_warning, ) from vllm.utils import random_uuid from vllm.utils.async_utils import ( AsyncMicrobatchTokenizer, collect_from_async_generator, make_async, merge_async_iterators, ) from vllm.utils.collection_utils import is_list_of from vllm.v1.engine import EngineCoreRequest class GenerationError(Exception): """raised when finish_reason indicates internal server error (500)""" def __init__(self, message: str = "Internal server error"): super().__init__(message) self.status_code = HTTPStatus.INTERNAL_SERVER_ERROR logger = init_logger(__name__) CompletionLikeRequest: TypeAlias = ( CompletionRequest | DetokenizeRequest | EmbeddingCompletionRequest | RerankRequest | ClassificationCompletionRequest | ScoreRequest | TokenizeCompletionRequest ) ChatLikeRequest: TypeAlias = ( ChatCompletionRequest | EmbeddingChatRequest | TokenizeChatRequest | ClassificationChatRequest ) SpeechToTextRequest: TypeAlias = TranscriptionRequest | TranslationRequest AnyRequest: TypeAlias = ( CompletionLikeRequest | ChatLikeRequest | SpeechToTextRequest | ResponsesRequest | IOProcessorRequest | GenerateRequest ) AnyResponse: TypeAlias = ( CompletionResponse | ChatCompletionResponse | EmbeddingResponse | TranscriptionResponse | TokenizeResponse | PoolingResponse | ClassificationResponse | ScoreResponse | GenerateResponse ) RequestT = TypeVar("RequestT", bound=AnyRequest) @dataclass(kw_only=True) class RequestProcessingMixin: """ Mixin for request processing, handling prompt preparation and engine input. """ engine_prompts: list[TokensPrompt] | None = field(default_factory=list) @dataclass(kw_only=True) class ResponseGenerationMixin: """ Mixin for response generation, managing result generators and final batch results. """ result_generator: ( AsyncGenerator[tuple[int, RequestOutput | PoolingRequestOutput], None] | None ) = None final_res_batch: list[RequestOutput | PoolingRequestOutput] = field( default_factory=list ) model_config = ConfigDict(arbitrary_types_allowed=True) @dataclass(kw_only=True) class ServeContext(RequestProcessingMixin, ResponseGenerationMixin, Generic[RequestT]): # Shared across all requests request: RequestT raw_request: Request | None = None model_name: str request_id: str created_time: int = field(default_factory=lambda: int(time.time())) lora_request: LoRARequest | None = None # Shared across most requests tokenizer: TokenizerLike | None = None @dataclass(kw_only=True) class ClassificationServeContext(ServeContext[ClassificationRequest]): pass @dataclass(kw_only=True) class EmbeddingServeContext(ServeContext[EmbeddingRequest]): chat_template: str | None = None chat_template_content_format: ChatTemplateContentFormatOption class OpenAIServing: request_id_prefix: ClassVar[str] = """ A short string prepended to every request’s ID (e.g. "embd", "classify") so you can easily tell “this ID came from Embedding vs Classification.” """ def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, return_tokens_as_token_ids: bool = False, log_error_stack: bool = False, ): super().__init__() self.engine_client = engine_client self.models = models self.request_logger = request_logger self.return_tokens_as_token_ids = return_tokens_as_token_ids self._tokenizer_executor = ThreadPoolExecutor(max_workers=1) self._apply_mistral_chat_template_async = make_async( apply_mistral_chat_template, executor=self._tokenizer_executor ) self._async_tokenizer_pool: dict[TokenizerLike, AsyncMicrobatchTokenizer] = {} self.log_error_stack = log_error_stack self.input_processor = self.models.input_processor self.io_processor = self.models.io_processor self.model_config = self.models.model_config self.max_model_len = self.model_config.max_model_len def _get_tool_parser( self, tool_parser_name: str | None = None, enable_auto_tools: bool = False ) -> Callable[[TokenizerLike], ToolParser] | None: """Get the tool parser based on the name.""" parser = None if not enable_auto_tools or tool_parser_name is None: return parser logger.info('"auto" tool choice has been enabled.') try: if tool_parser_name == "pythonic" and self.model_config.model.startswith( "meta-llama/Llama-3.2" ): logger.warning( "Llama3.2 models may struggle to emit valid pythonic tool calls" ) parser = ToolParserManager.get_tool_parser(tool_parser_name) except Exception as e: raise TypeError( "Error: --enable-auto-tool-choice requires " f"tool_parser:'{tool_parser_name}' which has not " "been registered" ) from e return parser def _get_reasoning_parser( self, reasoning_parser_name: str, ) -> Callable[[TokenizerLike], ReasoningParser] | None: """Get the reasoning parser based on the name.""" parser = None if not reasoning_parser_name: return None try: parser = ReasoningParserManager.get_reasoning_parser(reasoning_parser_name) assert parser is not None except Exception as e: raise TypeError(f"{reasoning_parser_name=} has not been registered") from e return parser async def reset_mm_cache(self) -> None: self.input_processor.clear_mm_cache() await self.engine_client.reset_mm_cache() async def beam_search( self, prompt: PromptType, request_id: str, params: BeamSearchParams, lora_request: LoRARequest | None = None, trace_headers: Mapping[str, str] | None = None, ) -> AsyncGenerator[RequestOutput, None]: beam_width = params.beam_width max_tokens = params.max_tokens ignore_eos = params.ignore_eos temperature = params.temperature length_penalty = params.length_penalty include_stop_str_in_output = params.include_stop_str_in_output input_processor = self.input_processor tokenizer = input_processor.tokenizer if tokenizer is None: raise VLLMValidationError( "You cannot use beam search when `skip_tokenizer_init=True`", parameter="skip_tokenizer_init", value=True, ) eos_token_id: int = tokenizer.eos_token_id # type: ignore if is_explicit_encoder_decoder_prompt(prompt): raise NotImplementedError prompt_text: str | None prompt_token_ids: list[int] multi_modal_data: MultiModalDataDict | None if isinstance(prompt, str): prompt_text = prompt prompt_token_ids = [] multi_modal_data = None else: prompt_text = prompt.get("prompt") # type: ignore prompt_token_ids = prompt.get("prompt_token_ids", []) # type: ignore multi_modal_data = prompt.get("multi_modal_data") # type: ignore mm_processor_kwargs: dict[str, Any] | None = None # This is a workaround to fix multimodal beam search; this is a # bandaid fix for 2 small problems: # 1. Multi_modal_data on the processed_inputs currently resolves to # `None`. # 2. preprocessing above expands the multimodal placeholders. However, # this happens again in generation, so the double expansion causes # a mismatch. # TODO - would be ideal to handle this more gracefully. tokenized_length = len(prompt_token_ids) sort_beams_key = create_sort_beams_key_function(eos_token_id, length_penalty) logprobs_num = 2 * beam_width beam_search_params = SamplingParams( logprobs=logprobs_num, max_tokens=1, temperature=temperature, ) all_beams = [ BeamSearchSequence( tokens=prompt_token_ids, cum_logprob=0, logprobs=[], multi_modal_data=multi_modal_data, mm_processor_kwargs=mm_processor_kwargs, lora_request=lora_request, ) ] completed = [] for _ in range(max_tokens): prompts_batch, lora_req_batch = zip( *[ ( TokensPrompt( prompt_token_ids=beam.tokens, multi_modal_data=beam.multi_modal_data, mm_processor_kwargs=beam.mm_processor_kwargs, ), beam.lora_request, ) for beam in all_beams ] ) tasks = [] request_id_batch = f"{request_id}-{random_uuid()}" for i, (individual_prompt, lora_req) in enumerate( zip(prompts_batch, lora_req_batch) ): request_id_item = f"{request_id_batch}-beam-{i}" task = asyncio.create_task( collect_from_async_generator( self.engine_client.generate( individual_prompt, beam_search_params, request_id_item, lora_request=lora_req, trace_headers=trace_headers, ) ) ) tasks.append(task) output = [x[0] for x in await asyncio.gather(*tasks)] new_beams = [] # Store all new tokens generated by beam all_beams_token_id = [] # Store the cumulative probability of all tokens # generated by beam search all_beams_logprob = [] # Iterate through all beam inference results for i, result in enumerate(output): current_beam = all_beams[i] # check for error finish reason and abort beam search if result.outputs[0].finish_reason == "error": # yield error output and terminate beam search yield RequestOutput( request_id=request_id, prompt=prompt_text, outputs=[ CompletionOutput( index=0, text="", token_ids=[], cumulative_logprob=None, logprobs=None, finish_reason="error", ) ], finished=True, prompt_token_ids=prompt_token_ids, prompt_logprobs=None, ) return if result.outputs[0].logprobs is not None: logprobs = result.outputs[0].logprobs[0] all_beams_token_id.extend(list(logprobs.keys())) all_beams_logprob.extend( [ current_beam.cum_logprob + obj.logprob for obj in logprobs.values() ] ) # Handle the token for the end of sentence (EOS) all_beams_token_id = np.array(all_beams_token_id) all_beams_logprob = np.array(all_beams_logprob) if not ignore_eos: # Get the index position of eos token in all generated results eos_idx = np.where(all_beams_token_id == eos_token_id)[0] for idx in eos_idx: current_beam = all_beams[idx // logprobs_num] result = output[idx // logprobs_num] assert result.outputs[0].logprobs is not None logprobs_entry = result.outputs[0].logprobs[0] completed.append( BeamSearchSequence( tokens=current_beam.tokens + [eos_token_id] if include_stop_str_in_output else current_beam.tokens, logprobs=current_beam.logprobs + [logprobs_entry], cum_logprob=float(all_beams_logprob[idx]), finish_reason="stop", stop_reason=eos_token_id, ) ) # After processing, set the log probability of the eos condition # to negative infinity. all_beams_logprob[eos_idx] = -np.inf # Processing non-EOS tokens # Get indices of the top beam_width probabilities topn_idx = np.argpartition(np.negative(all_beams_logprob), beam_width)[ :beam_width ] for idx in topn_idx: current_beam = all_beams[idx // logprobs_num] result = output[idx // logprobs_num] token_id = int(all_beams_token_id[idx]) assert result.outputs[0].logprobs is not None logprobs_entry = result.outputs[0].logprobs[0] new_beams.append( BeamSearchSequence( tokens=current_beam.tokens + [token_id], logprobs=current_beam.logprobs + [logprobs_entry], lora_request=current_beam.lora_request, cum_logprob=float(all_beams_logprob[idx]), multi_modal_data=current_beam.multi_modal_data, mm_processor_kwargs=current_beam.mm_processor_kwargs, ) ) all_beams = new_beams completed.extend(all_beams) sorted_completed = sorted(completed, key=sort_beams_key, reverse=True) best_beams = sorted_completed[:beam_width] for beam in best_beams: if beam.tokens[-1] == eos_token_id and not ignore_eos: # Skip the eos token in the text. tokens = beam.tokens[tokenized_length:-1] else: tokens = beam.tokens[tokenized_length:] beam.text = tokenizer.decode(tokens) yield RequestOutput( request_id=request_id, prompt=prompt_text, outputs=[ CompletionOutput( text=beam.text, # type: ignore cumulative_logprob=beam.cum_logprob, token_ids=beam.tokens[tokenized_length:], index=i, logprobs=beam.logprobs, finish_reason=beam.finish_reason if beam.finish_reason is not None else "length", stop_reason=beam.stop_reason, ) for (i, beam) in enumerate(best_beams) ], finished=True, prompt_token_ids=prompt_token_ids, prompt_logprobs=None, ) def _get_renderer(self, tokenizer: TokenizerLike | None) -> BaseRenderer: """ Get a Renderer instance with the provided tokenizer. Uses shared async tokenizer pool for efficiency. """ return CompletionRenderer( model_config=self.model_config, tokenizer=tokenizer, async_tokenizer_pool=self._async_tokenizer_pool, ) def _build_render_config( self, request: Any, ) -> RenderConfig: """ Build and return a `RenderConfig` for an endpoint. Used by the renderer to control how prompts are prepared (e.g., tokenization and length handling). Endpoints should implement this with logic appropriate to their request type. """ raise NotImplementedError def _get_async_tokenizer(self, tokenizer) -> AsyncMicrobatchTokenizer: """ Return (and cache) an `AsyncMicrobatchTokenizer` bound to the given tokenizer. """ async_tokenizer = self._async_tokenizer_pool.get(tokenizer) if async_tokenizer is None: async_tokenizer = AsyncMicrobatchTokenizer(tokenizer) self._async_tokenizer_pool[tokenizer] = async_tokenizer return async_tokenizer async def _preprocess( self, ctx: ServeContext, ) -> ErrorResponse | None: """ Default preprocessing hook. Subclasses may override to prepare `ctx` (classification, embedding, etc.). """ return None def _build_response( self, ctx: ServeContext, ) -> AnyResponse | ErrorResponse: """ Default response builder. Subclass may override this method to return the appropriate response object. """ return self.create_error_response("unimplemented endpoint") async def handle( self, ctx: ServeContext, ) -> AnyResponse | ErrorResponse: generation: AsyncGenerator[AnyResponse | ErrorResponse, None] generation = self._pipeline(ctx) async for response in generation: return response return self.create_error_response("No response yielded from pipeline") async def _pipeline( self, ctx: ServeContext, ) -> AsyncGenerator[AnyResponse | ErrorResponse, None]: """Execute the request processing pipeline yielding responses.""" if error := await self._check_model(ctx.request): yield error if error := self._validate_request(ctx): yield error preprocess_ret = await self._preprocess(ctx) if isinstance(preprocess_ret, ErrorResponse): yield preprocess_ret generators_ret = await self._prepare_generators(ctx) if isinstance(generators_ret, ErrorResponse): yield generators_ret collect_ret = await self._collect_batch(ctx) if isinstance(collect_ret, ErrorResponse): yield collect_ret yield self._build_response(ctx) def _validate_request(self, ctx: ServeContext) -> ErrorResponse | None: truncate_prompt_tokens = getattr(ctx.request, "truncate_prompt_tokens", None) if ( truncate_prompt_tokens is not None and truncate_prompt_tokens > self.max_model_len ): return self.create_error_response( "truncate_prompt_tokens value is " "greater than max_model_len." " Please, select a smaller truncation size." ) return None def _create_pooling_params( self, ctx: ServeContext, ) -> PoolingParams | ErrorResponse: if not hasattr(ctx.request, "to_pooling_params"): return self.create_error_response( "Request type does not support pooling parameters" ) return ctx.request.to_pooling_params() async def _prepare_generators( self, ctx: ServeContext, ) -> ErrorResponse | None: """Schedule the request and get the result generator.""" generators: list[ AsyncGenerator[RequestOutput | PoolingRequestOutput, None] ] = [] try: trace_headers = ( None if ctx.raw_request is None else await self._get_trace_headers(ctx.raw_request.headers) ) pooling_params = self._create_pooling_params(ctx) if isinstance(pooling_params, ErrorResponse): return pooling_params if ctx.engine_prompts is None: return self.create_error_response("Engine prompts not available") for i, engine_prompt in enumerate(ctx.engine_prompts): request_id_item = f"{ctx.request_id}-{i}" self._log_inputs( request_id_item, engine_prompt, params=pooling_params, lora_request=ctx.lora_request, ) generator = self.engine_client.encode( engine_prompt, pooling_params, request_id_item, lora_request=ctx.lora_request, trace_headers=trace_headers, priority=getattr(ctx.request, "priority", 0), ) generators.append(generator) ctx.result_generator = merge_async_iterators(*generators) return None except Exception as e: return self.create_error_response(e) async def _collect_batch( self, ctx: ServeContext, ) -> ErrorResponse | None: """Collect batch results from the result generator.""" try: if ctx.engine_prompts is None: return self.create_error_response("Engine prompts not available") num_prompts = len(ctx.engine_prompts) final_res_batch: list[RequestOutput | PoolingRequestOutput | None] final_res_batch = [None] * num_prompts if ctx.result_generator is None: return self.create_error_response("Result generator not available") async for i, res in ctx.result_generator: final_res_batch[i] = res if None in final_res_batch: return self.create_error_response( "Failed to generate results for all prompts" ) ctx.final_res_batch = [res for res in final_res_batch if res is not None] return None except Exception as e: return self.create_error_response(e) def create_error_response( self, message: str | Exception, err_type: str = "BadRequestError", status_code: HTTPStatus = HTTPStatus.BAD_REQUEST, param: str | None = None, ) -> ErrorResponse: exc: Exception | None = None if isinstance(message, Exception): exc = message from vllm.entrypoints.openai.protocol import VLLMValidationError if isinstance(exc, VLLMValidationError): err_type = "BadRequestError" status_code = HTTPStatus.BAD_REQUEST param = exc.parameter elif isinstance(exc, (ValueError, TypeError, RuntimeError)): # Common validation errors from user input err_type = "BadRequestError" status_code = HTTPStatus.BAD_REQUEST param = None elif exc.__class__.__name__ == "TemplateError": # jinja2.TemplateError (avoid importing jinja2) err_type = "BadRequestError" status_code = HTTPStatus.BAD_REQUEST param = None else: err_type = "InternalServerError" status_code = HTTPStatus.INTERNAL_SERVER_ERROR param = None message = str(exc) if self.log_error_stack: exc_type, _, _ = sys.exc_info() if exc_type is not None: traceback.print_exc() else: traceback.print_stack() return ErrorResponse( error=ErrorInfo( message=message, type=err_type, code=status_code.value, param=param, ) ) def create_streaming_error_response( self, message: str | Exception, err_type: str = "BadRequestError", status_code: HTTPStatus = HTTPStatus.BAD_REQUEST, param: str | None = None, ) -> str: json_str = json.dumps( self.create_error_response( message=message, err_type=err_type, status_code=status_code, param=param, ).model_dump() ) return json_str def _raise_if_error(self, finish_reason: str | None, request_id: str) -> None: """Raise GenerationError if finish_reason indicates an error.""" if finish_reason == "error": logger.error( "Request %s failed with an internal error during generation", request_id, ) raise GenerationError("Internal server error") def _convert_generation_error_to_response( self, e: GenerationError ) -> ErrorResponse: """Convert GenerationError to ErrorResponse.""" return self.create_error_response( str(e), err_type="InternalServerError", status_code=e.status_code, ) def _convert_generation_error_to_streaming_response( self, e: GenerationError ) -> str: """Convert GenerationError to streaming error response.""" return self.create_streaming_error_response( str(e), err_type="InternalServerError", status_code=e.status_code, ) async def _check_model( self, request: AnyRequest, ) -> ErrorResponse | None: error_response = None if self._is_model_supported(request.model): return None if request.model in self.models.lora_requests: return None if ( envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING and request.model and (load_result := await self.models.resolve_lora(request.model)) ): if isinstance(load_result, LoRARequest): return None if ( isinstance(load_result, ErrorResponse) and load_result.error.code == HTTPStatus.BAD_REQUEST.value ): error_response = load_result return error_response or self.create_error_response( message=f"The model `{request.model}` does not exist.", err_type="NotFoundError", status_code=HTTPStatus.NOT_FOUND, param="model", ) def _get_active_default_mm_loras(self, request: AnyRequest) -> LoRARequest | None: """Determine if there are any active default multimodal loras.""" # TODO: Currently this is only enabled for chat completions # to be better aligned with only being enabled for .generate # when run offline. It would be nice to support additional # tasks types in the future. message_types = self._get_message_types(request) default_mm_loras = set() for lora in self.models.lora_requests.values(): # Best effort match for default multimodal lora adapters; # There is probably a better way to do this, but currently # this matches against the set of 'types' in any content lists # up until '_', e.g., to match audio_url -> audio if lora.lora_name in message_types: default_mm_loras.add(lora) # Currently only support default modality specific loras if # we have exactly one lora matched on the request. if len(default_mm_loras) == 1: return default_mm_loras.pop() return None def _maybe_get_adapters( self, request: AnyRequest, supports_default_mm_loras: bool = False, ) -> LoRARequest | None: if request.model in self.models.lora_requests: return self.models.lora_requests[request.model] # Currently only support default modality specific loras # if we have exactly one lora matched on the request. if supports_default_mm_loras: default_mm_lora = self._get_active_default_mm_loras(request) if default_mm_lora is not None: return default_mm_lora if self._is_model_supported(request.model): return None # if _check_model has been called earlier, this will be unreachable raise ValueError(f"The model `{request.model}` does not exist.") def _get_message_types(self, request: AnyRequest) -> set[str]: """Retrieve the set of types from message content dicts up until `_`; we use this to match potential multimodal data with default per modality loras. """ message_types: set[str] = set() if not hasattr(request, "messages"): return message_types messages = request.messages if messages is None or isinstance(messages, (str, bytes)):
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_transcription.py
vllm/entrypoints/openai/serving_transcription.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import AsyncGenerator from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ErrorResponse, RequestResponseMetadata, TranscriptionRequest, TranscriptionResponse, TranscriptionResponseStreamChoice, TranscriptionResponseVerbose, TranscriptionStreamResponse, TranslationRequest, TranslationResponse, TranslationResponseStreamChoice, TranslationResponseVerbose, TranslationStreamResponse, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.openai.speech_to_text import OpenAISpeechToText from vllm.logger import init_logger from vllm.outputs import RequestOutput logger = init_logger(__name__) class OpenAIServingTranscription(OpenAISpeechToText): """Handles transcription requests.""" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, return_tokens_as_token_ids: bool = False, log_error_stack: bool = False, enable_force_include_usage: bool = False, ): super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, task_type="transcribe", log_error_stack=log_error_stack, enable_force_include_usage=enable_force_include_usage, ) async def create_transcription( self, audio_data: bytes, request: TranscriptionRequest, raw_request: Request ) -> ( TranscriptionResponse | TranscriptionResponseVerbose | AsyncGenerator[str, None] | ErrorResponse ): """Transcription API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/audio/createTranscription for the API specification. This API mimics the OpenAI transcription API. """ return await self._create_speech_to_text( audio_data=audio_data, request=request, raw_request=raw_request, response_class=( TranscriptionResponseVerbose if request.response_format == "verbose_json" else TranscriptionResponse ), stream_generator_method=self.transcription_stream_generator, ) async def transcription_stream_generator( self, request: TranscriptionRequest, result_generator: list[AsyncGenerator[RequestOutput, None]], request_id: str, request_metadata: RequestResponseMetadata, audio_duration_s: float, ) -> AsyncGenerator[str, None]: generator = self._speech_to_text_stream_generator( request=request, list_result_generator=result_generator, request_id=request_id, request_metadata=request_metadata, audio_duration_s=audio_duration_s, chunk_object_type="transcription.chunk", response_stream_choice_class=TranscriptionResponseStreamChoice, stream_response_class=TranscriptionStreamResponse, ) async for chunk in generator: yield chunk class OpenAIServingTranslation(OpenAISpeechToText): """Handles translation requests.""" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, return_tokens_as_token_ids: bool = False, log_error_stack: bool = False, enable_force_include_usage: bool = False, ): super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, task_type="translate", log_error_stack=log_error_stack, enable_force_include_usage=enable_force_include_usage, ) async def create_translation( self, audio_data: bytes, request: TranslationRequest, raw_request: Request ) -> ( TranslationResponse | TranslationResponseVerbose | AsyncGenerator[str, None] | ErrorResponse ): """Translation API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/audio/createTranslation for the API specification. This API mimics the OpenAI translation API. """ return await self._create_speech_to_text( audio_data=audio_data, request=request, raw_request=raw_request, response_class=( TranslationResponseVerbose if request.response_format == "verbose_json" else TranslationResponse ), stream_generator_method=self.translation_stream_generator, ) async def translation_stream_generator( self, request: TranslationRequest, result_generator: list[AsyncGenerator[RequestOutput, None]], request_id: str, request_metadata: RequestResponseMetadata, audio_duration_s: float, ) -> AsyncGenerator[str, None]: generator = self._speech_to_text_stream_generator( request=request, list_result_generator=result_generator, request_id=request_id, request_metadata=request_metadata, audio_duration_s=audio_duration_s, chunk_object_type="translation.chunk", response_stream_choice_class=TranslationResponseStreamChoice, stream_response_class=TranslationStreamResponse, ) async for chunk in generator: yield chunk
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/utils.py
vllm/entrypoints/openai/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import TypeVar from fastapi import Request from fastapi.exceptions import RequestValidationError from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ) # Used internally _ChatCompletionResponseChoiceT = TypeVar( "_ChatCompletionResponseChoiceT", ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ) def maybe_filter_parallel_tool_calls( choice: _ChatCompletionResponseChoiceT, request: ChatCompletionRequest ) -> _ChatCompletionResponseChoiceT: """Filter to first tool call only when parallel_tool_calls is False.""" if request.parallel_tool_calls: return choice if isinstance(choice, ChatCompletionResponseChoice) and choice.message.tool_calls: choice.message.tool_calls = choice.message.tool_calls[:1] elif ( isinstance(choice, ChatCompletionResponseStreamChoice) and choice.delta.tool_calls ): choice.delta.tool_calls = [ tool_call for tool_call in choice.delta.tool_calls if tool_call.index == 0 ] return choice async def validate_json_request(raw_request: Request): content_type = raw_request.headers.get("content-type", "").lower() media_type = content_type.split(";", maxsplit=1)[0] if media_type != "application/json": raise RequestValidationError( errors=["Unsupported Media Type: Only 'application/json' is allowed"] )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/__init__.py
vllm/entrypoints/openai/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/protocol.py
vllm/entrypoints/openai/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from # https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py import json import time from http import HTTPStatus from typing import Annotated, Any, ClassVar, Literal, TypeAlias import regex as re import torch from fastapi import HTTPException, UploadFile from openai.types.chat.chat_completion_audio import ( ChatCompletionAudio as OpenAIChatCompletionAudio, ) from openai.types.chat.chat_completion_message import Annotation as OpenAIAnnotation from openai.types.responses import ( ResponseCodeInterpreterCallCodeDeltaEvent, ResponseCodeInterpreterCallCodeDoneEvent, ResponseCodeInterpreterCallCompletedEvent, ResponseCodeInterpreterCallInProgressEvent, ResponseCodeInterpreterCallInterpretingEvent, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseFunctionToolCall, ResponseInputItemParam, ResponseMcpCallArgumentsDeltaEvent, ResponseMcpCallArgumentsDoneEvent, ResponseMcpCallCompletedEvent, ResponseMcpCallInProgressEvent, ResponseOutputItem, ResponseOutputItemAddedEvent, ResponseOutputItemDoneEvent, ResponsePrompt, ResponseReasoningTextDeltaEvent, ResponseReasoningTextDoneEvent, ResponseStatus, ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallInProgressEvent, ResponseWebSearchCallSearchingEvent, ) from openai.types.responses import ( ResponseCompletedEvent as OpenAIResponseCompletedEvent, ) from openai.types.responses import ResponseCreatedEvent as OpenAIResponseCreatedEvent from openai.types.responses import ( ResponseInProgressEvent as OpenAIResponseInProgressEvent, ) from openai.types.responses.response_reasoning_item import ( Content as ResponseReasoningTextContent, ) from openai_harmony import Message as OpenAIHarmonyMessage # Backward compatibility for OpenAI client versions try: # For older openai versions (< 1.100.0) from openai.types.responses import ResponseTextConfig except ImportError: # For newer openai versions (>= 1.100.0) from openai.types.responses import ResponseFormatTextConfig as ResponseTextConfig from openai.types.responses.response import IncompleteDetails, ToolChoice from openai.types.responses.tool import Tool from openai.types.shared import Metadata, Reasoning from pydantic import ( BaseModel, ConfigDict, Field, ValidationError, field_serializer, model_validator, ) from vllm.entrypoints.chat_utils import ChatCompletionMessageParam, make_tool_call_id from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.sampling_params import ( BeamSearchParams, RequestOutputKind, SamplingParams, StructuredOutputsParams, ) from vllm.utils import random_uuid from vllm.utils.import_utils import resolve_obj_by_qualname logger = init_logger(__name__) _LONG_INFO = torch.iinfo(torch.long) class OpenAIBaseModel(BaseModel): # OpenAI API does allow extra fields model_config = ConfigDict(extra="allow") # Cache class field names field_names: ClassVar[set[str] | None] = None @model_validator(mode="wrap") @classmethod def __log_extra_fields__(cls, data, handler): result = handler(data) if not isinstance(data, dict): return result field_names = cls.field_names if field_names is None: # Get all class field names and their potential aliases field_names = set() for field_name, field in cls.model_fields.items(): field_names.add(field_name) if alias := getattr(field, "alias", None): field_names.add(alias) cls.field_names = field_names # Compare against both field names and aliases if any(k not in field_names for k in data): logger.warning( "The following fields were present in the request but ignored: %s", data.keys() - field_names, ) return result class ErrorInfo(OpenAIBaseModel): message: str type: str param: str | None = None code: int class ErrorResponse(OpenAIBaseModel): error: ErrorInfo class VLLMValidationError(ValueError): """vLLM-specific validation error for request validation failures. Args: message: The error message describing the validation failure. parameter: Optional parameter name that failed validation. value: Optional value that was rejected during validation. """ def __init__( self, message: str, *, parameter: str | None = None, value: Any = None, ) -> None: super().__init__(message) self.parameter = parameter self.value = value def __str__(self): base = super().__str__() extras = [] if self.parameter is not None: extras.append(f"parameter={self.parameter}") if self.value is not None: extras.append(f"value={self.value}") return f"{base} ({', '.join(extras)})" if extras else base class ModelPermission(OpenAIBaseModel): id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}") object: str = "model_permission" created: int = Field(default_factory=lambda: int(time.time())) allow_create_engine: bool = False allow_sampling: bool = True allow_logprobs: bool = True allow_search_indices: bool = False allow_view: bool = True allow_fine_tuning: bool = False organization: str = "*" group: str | None = None is_blocking: bool = False class ModelCard(OpenAIBaseModel): id: str object: str = "model" created: int = Field(default_factory=lambda: int(time.time())) owned_by: str = "vllm" root: str | None = None parent: str | None = None max_model_len: int | None = None permission: list[ModelPermission] = Field(default_factory=list) class ModelList(OpenAIBaseModel): object: str = "list" data: list[ModelCard] = Field(default_factory=list) class PromptTokenUsageInfo(OpenAIBaseModel): cached_tokens: int | None = None class UsageInfo(OpenAIBaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: int | None = 0 prompt_tokens_details: PromptTokenUsageInfo | None = None class RequestResponseMetadata(BaseModel): request_id: str final_usage_info: UsageInfo | None = None class JsonSchemaResponseFormat(OpenAIBaseModel): name: str description: str | None = None # schema is the field in openai but that causes conflicts with pydantic so # instead use json_schema with an alias json_schema: dict[str, Any] | None = Field(default=None, alias="schema") strict: bool | None = None class LegacyStructuralTag(OpenAIBaseModel): begin: str # schema is the field, but that causes conflicts with pydantic so # instead use structural_tag_schema with an alias structural_tag_schema: dict[str, Any] | None = Field(default=None, alias="schema") end: str class LegacyStructuralTagResponseFormat(OpenAIBaseModel): type: Literal["structural_tag"] structures: list[LegacyStructuralTag] triggers: list[str] class StructuralTagResponseFormat(OpenAIBaseModel): type: Literal["structural_tag"] format: Any AnyStructuralTagResponseFormat: TypeAlias = ( LegacyStructuralTagResponseFormat | StructuralTagResponseFormat ) class ResponseFormat(OpenAIBaseModel): # type must be "json_schema", "json_object", or "text" type: Literal["text", "json_object", "json_schema"] json_schema: JsonSchemaResponseFormat | None = None AnyResponseFormat: TypeAlias = ( ResponseFormat | StructuralTagResponseFormat | LegacyStructuralTagResponseFormat ) class StreamOptions(OpenAIBaseModel): include_usage: bool | None = True continuous_usage_stats: bool | None = False class FunctionDefinition(OpenAIBaseModel): name: str description: str | None = None parameters: dict[str, Any] | None = None class ChatCompletionToolsParam(OpenAIBaseModel): type: Literal["function"] = "function" function: FunctionDefinition class ChatCompletionNamedFunction(OpenAIBaseModel): name: str class ChatCompletionNamedToolChoiceParam(OpenAIBaseModel): function: ChatCompletionNamedFunction type: Literal["function"] = "function" # extra="forbid" is a workaround to have kwargs as a field, # see https://github.com/pydantic/pydantic/issues/3125 class LogitsProcessorConstructor(BaseModel): qualname: str args: list[Any] | None = None kwargs: dict[str, Any] | None = None model_config = ConfigDict(extra="forbid") LogitsProcessors = list[str | LogitsProcessorConstructor] def get_logits_processors( processors: LogitsProcessors | None, pattern: str | None ) -> list[Any] | None: if processors and pattern: logits_processors = [] for processor in processors: qualname = processor if isinstance(processor, str) else processor.qualname if not re.match(pattern, qualname): raise ValueError( f"Logits processor '{qualname}' is not allowed by this " "server. See --logits-processor-pattern engine argument " "for more information." ) try: logits_processor = resolve_obj_by_qualname(qualname) except Exception as e: raise ValueError( f"Logits processor '{qualname}' could not be resolved: {e}" ) from e if isinstance(processor, LogitsProcessorConstructor): logits_processor = logits_processor( *processor.args or [], **processor.kwargs or {} ) logits_processors.append(logits_processor) return logits_processors elif processors: raise ValueError( "The `logits_processors` argument is not supported by this " "server. See --logits-processor-pattern engine argument " "for more information." ) return None ResponseInputOutputItem: TypeAlias = ResponseInputItemParam | ResponseOutputItem class ResponsesRequest(OpenAIBaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/responses/create background: bool | None = False include: ( list[ Literal[ "code_interpreter_call.outputs", "computer_call_output.output.image_url", "file_search_call.results", "message.input_image.image_url", "message.output_text.logprobs", "reasoning.encrypted_content", ], ] | None ) = None input: str | list[ResponseInputOutputItem] instructions: str | None = None max_output_tokens: int | None = None max_tool_calls: int | None = None metadata: Metadata | None = None model: str | None = None logit_bias: dict[str, float] | None = None parallel_tool_calls: bool | None = True previous_response_id: str | None = None prompt: ResponsePrompt | None = None reasoning: Reasoning | None = None service_tier: Literal["auto", "default", "flex", "scale", "priority"] = "auto" store: bool | None = True stream: bool | None = False temperature: float | None = None text: ResponseTextConfig | None = None tool_choice: ToolChoice = "auto" tools: list[Tool] = Field(default_factory=list) top_logprobs: int | None = 0 top_p: float | None = None top_k: int | None = None truncation: Literal["auto", "disabled"] | None = "disabled" user: str | None = None # --8<-- [start:responses-extra-params] request_id: str = Field( default_factory=lambda: f"resp_{random_uuid()}", description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) cache_salt: str | None = Field( default=None, description=( "If specified, the prefix cache will be salted with the provided " "string to prevent an attacker to guess prompts in multi-user " "environments. The salt should be random, protected from " "access by 3rd parties, and long enough to be " "unpredictable (e.g., 43 characters base64-encoded, corresponding " "to 256 bit)." ), ) enable_response_messages: bool = Field( default=False, description=( "Dictates whether or not to return messages as part of the " "response object. Currently only supported for" "non-background and gpt-oss only. " ), ) # similar to input_messages / output_messages in ResponsesResponse # we take in previous_input_messages (ie in harmony format) # this cannot be used in conjunction with previous_response_id # TODO: consider supporting non harmony messages as well previous_input_messages: list[OpenAIHarmonyMessage | dict] | None = None # --8<-- [end:responses-extra-params] _DEFAULT_SAMPLING_PARAMS = { "temperature": 1.0, "top_p": 1.0, "top_k": 0, } def to_sampling_params( self, default_max_tokens: int, default_sampling_params: dict | None = None, ) -> SamplingParams: if self.max_output_tokens is None: max_tokens = default_max_tokens else: max_tokens = min(self.max_output_tokens, default_max_tokens) default_sampling_params = default_sampling_params or {} if (temperature := self.temperature) is None: temperature = default_sampling_params.get( "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"] ) if (top_p := self.top_p) is None: top_p = default_sampling_params.get( "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"] ) if (top_k := self.top_k) is None: top_k = default_sampling_params.get( "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"] ) stop_token_ids = default_sampling_params.get("stop_token_ids") # Structured output structured_outputs = None if self.text is not None and self.text.format is not None: response_format = self.text.format if ( response_format.type == "json_schema" and response_format.schema_ is not None ): structured_outputs = StructuredOutputsParams( json=response_format.schema_ ) elif response_format.type == "json_object": raise NotImplementedError("json_object is not supported") # TODO: add more parameters return SamplingParams.from_optional( temperature=temperature, top_p=top_p, top_k=top_k, max_tokens=max_tokens, logprobs=self.top_logprobs if self.is_include_output_logprobs() else None, stop_token_ids=stop_token_ids, output_kind=( RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY ), structured_outputs=structured_outputs, logit_bias=self.logit_bias, skip_clone=True, # Created fresh per request, safe to skip clone ) def is_include_output_logprobs(self) -> bool: """Check if the request includes output logprobs.""" if self.include is None: return False return ( isinstance(self.include, list) and "message.output_text.logprobs" in self.include ) @model_validator(mode="before") def validate_background(cls, data): if not data.get("background"): return data if not data.get("store", True): raise ValueError("background can only be used when `store` is true") return data @model_validator(mode="before") def validate_prompt(cls, data): if data.get("prompt") is not None: raise VLLMValidationError( "prompt template is not supported", parameter="prompt" ) return data @model_validator(mode="before") def check_cache_salt_support(cls, data): if data.get("cache_salt") is not None and ( not isinstance(data["cache_salt"], str) or not data["cache_salt"] ): raise ValueError( "Parameter 'cache_salt' must be a non-empty string if provided." ) return data @model_validator(mode="before") def function_call_parsing(cls, data): """Parse function_call dictionaries into ResponseFunctionToolCall objects. This ensures Pydantic can properly resolve union types in the input field. Function calls provided as dicts are converted to ResponseFunctionToolCall objects before validation, while invalid structures are left for Pydantic to reject with appropriate error messages. """ input_data = data.get("input") # Early return for None, strings, or bytes # (strings are iterable but shouldn't be processed) if input_data is None or isinstance(input_data, (str, bytes)): return data # Convert iterators (like ValidatorIterator) to list if not isinstance(input_data, list): try: input_data = list(input_data) except TypeError: # Not iterable, leave as-is for Pydantic to handle return data processed_input = [] for item in input_data: if isinstance(item, dict) and item.get("type") == "function_call": try: processed_input.append(ResponseFunctionToolCall(**item)) except ValidationError: # Let Pydantic handle validation for malformed function calls logger.debug( "Failed to parse function_call to ResponseFunctionToolCall, " "leaving for Pydantic validation" ) processed_input.append(item) else: processed_input.append(item) data["input"] = processed_input return data class ChatCompletionRequest(OpenAIBaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/chat/create messages: list[ChatCompletionMessageParam] model: str | None = None frequency_penalty: float | None = 0.0 logit_bias: dict[str, float] | None = None logprobs: bool | None = False top_logprobs: int | None = 0 max_tokens: int | None = Field( default=None, deprecated="max_tokens is deprecated in favor of " "the max_completion_tokens field", ) max_completion_tokens: int | None = None n: int | None = 1 presence_penalty: float | None = 0.0 response_format: AnyResponseFormat | None = None seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max) stop: str | list[str] | None = [] stream: bool | None = False stream_options: StreamOptions | None = None temperature: float | None = None top_p: float | None = None tools: list[ChatCompletionToolsParam] | None = None tool_choice: ( Literal["none"] | Literal["auto"] | Literal["required"] | ChatCompletionNamedToolChoiceParam | None ) = "none" reasoning_effort: Literal["low", "medium", "high"] | None = None include_reasoning: bool = True parallel_tool_calls: bool | None = True # NOTE this will be ignored by vLLM user: str | None = None # --8<-- [start:chat-completion-sampling-params] use_beam_search: bool = False top_k: int | None = None min_p: float | None = None repetition_penalty: float | None = None length_penalty: float = 1.0 stop_token_ids: list[int] | None = [] include_stop_str_in_output: bool = False ignore_eos: bool = False min_tokens: int = 0 skip_special_tokens: bool = True spaces_between_special_tokens: bool = True truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None prompt_logprobs: int | None = None allowed_token_ids: list[int] | None = None bad_words: list[str] = Field(default_factory=list) # --8<-- [end:chat-completion-sampling-params] # --8<-- [start:chat-completion-extra-params] echo: bool = Field( default=False, description=( "If true, the new message will be prepended with the last message " "if they belong to the same role." ), ) add_generation_prompt: bool = Field( default=True, description=( "If true, the generation prompt will be added to the chat template. " "This is a parameter used by chat template in tokenizer config of the " "model." ), ) continue_final_message: bool = Field( default=False, description=( "If this is set, the chat will be formatted so that the final " "message in the chat is open-ended, without any EOS tokens. The " "model will continue this message rather than starting a new one. " 'This allows you to "prefill" part of the model\'s response for it. ' "Cannot be used at the same time as `add_generation_prompt`." ), ) add_special_tokens: bool = Field( default=False, description=( "If true, special tokens (e.g. BOS) will be added to the prompt " "on top of what is added by the chat template. " "For most models, the chat template takes care of adding the " "special tokens so this should be set to false (as is the " "default)." ), ) documents: list[dict[str, str]] | None = Field( default=None, description=( "A list of dicts representing documents that will be accessible to " "the model if it is performing RAG (retrieval-augmented generation)." " If the template does not support RAG, this argument will have no " "effect. We recommend that each document should be a dict containing " '"title" and "text" keys.' ), ) chat_template: str | None = Field( default=None, description=( "A Jinja template to use for this conversion. " "As of transformers v4.44, default chat template is no longer " "allowed, so you must provide a chat template if the tokenizer " "does not define one." ), ) chat_template_kwargs: dict[str, Any] | None = Field( default=None, description=( "Additional keyword args to pass to the template renderer. " "Will be accessible by the chat template." ), ) mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) structured_outputs: StructuredOutputsParams | None = Field( default=None, description="Additional kwargs for structured outputs", ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) request_id: str = Field( default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) logits_processors: LogitsProcessors | None = Field( default=None, description=( "A list of either qualified names of logits processors, or " "constructor objects, to apply when sampling. A constructor is " "a JSON object with a required 'qualname' field specifying the " "qualified name of the processor class/factory, and optional " "'args' and 'kwargs' fields containing positional and keyword " "arguments. For example: {'qualname': " "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': " "{'param': 'value'}}." ), ) return_tokens_as_token_ids: bool | None = Field( default=None, description=( "If specified with 'logprobs', tokens are represented " " as strings of the form 'token_id:{token_id}' so that tokens " "that are not JSON-encodable can be identified." ), ) return_token_ids: bool | None = Field( default=None, description=( "If specified, the result will include token IDs alongside the " "generated text. In streaming mode, prompt_token_ids is included " "only in the first chunk, and token_ids contains the delta tokens " "for each chunk. This is useful for debugging or when you " "need to map generated text back to input tokens." ), ) cache_salt: str | None = Field( default=None, description=( "If specified, the prefix cache will be salted with the provided " "string to prevent an attacker to guess prompts in multi-user " "environments. The salt should be random, protected from " "access by 3rd parties, and long enough to be " "unpredictable (e.g., 43 characters base64-encoded, corresponding " "to 256 bit)." ), ) kv_transfer_params: dict[str, Any] | None = Field( default=None, description="KVTransfer parameters used for disaggregated serving.", ) vllm_xargs: dict[str, str | int | float | list[str | int | float]] | None = Field( default=None, description=( "Additional request parameters with (list of) string or " "numeric values, used by custom extensions." ), ) # --8<-- [end:chat-completion-extra-params] # Default sampling parameters for chat completion requests _DEFAULT_SAMPLING_PARAMS: dict = { "repetition_penalty": 1.0, "temperature": 1.0, "top_p": 1.0, "top_k": 0, "min_p": 0.0, } def to_beam_search_params( self, max_tokens: int, default_sampling_params: dict ) -> BeamSearchParams: n = self.n if self.n is not None else 1 if (temperature := self.temperature) is None: temperature = default_sampling_params.get( "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"] ) return BeamSearchParams( beam_width=n, max_tokens=max_tokens, ignore_eos=self.ignore_eos, temperature=temperature, length_penalty=self.length_penalty, include_stop_str_in_output=self.include_stop_str_in_output, ) def to_sampling_params( self, max_tokens: int, logits_processor_pattern: str | None, default_sampling_params: dict, ) -> SamplingParams: # Default parameters if (repetition_penalty := self.repetition_penalty) is None: repetition_penalty = default_sampling_params.get( "repetition_penalty", self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"], ) if (temperature := self.temperature) is None: temperature = default_sampling_params.get( "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"] ) if (top_p := self.top_p) is None: top_p = default_sampling_params.get( "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"] ) if (top_k := self.top_k) is None: top_k = default_sampling_params.get( "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"] ) if (min_p := self.min_p) is None: min_p = default_sampling_params.get( "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"] ) prompt_logprobs = self.prompt_logprobs if prompt_logprobs is None and self.echo: prompt_logprobs = self.top_logprobs response_format = self.response_format if response_format is not None: # If structured outputs wasn't already enabled, # we must enable it for these features to work if self.structured_outputs is None: self.structured_outputs = StructuredOutputsParams() # Set structured output params for response format if response_format.type == "json_object": self.structured_outputs.json_object = True elif response_format.type == "json_schema": json_schema = response_format.json_schema assert json_schema is not None self.structured_outputs.json = json_schema.json_schema elif response_format.type == "structural_tag": structural_tag = response_format assert structural_tag is not None and isinstance( structural_tag, ( LegacyStructuralTagResponseFormat, StructuralTagResponseFormat, ), ) s_tag_obj = structural_tag.model_dump(by_alias=True) self.structured_outputs.structural_tag = json.dumps(s_tag_obj) extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {} if self.kv_transfer_params: # Pass in kv_transfer_params via extra_args extra_args["kv_transfer_params"] = self.kv_transfer_params return SamplingParams.from_optional( n=self.n, presence_penalty=self.presence_penalty, frequency_penalty=self.frequency_penalty, repetition_penalty=repetition_penalty, temperature=temperature, top_p=top_p, top_k=top_k, min_p=min_p, seed=self.seed, stop=self.stop, stop_token_ids=self.stop_token_ids, logprobs=self.top_logprobs if self.logprobs else None, prompt_logprobs=prompt_logprobs, ignore_eos=self.ignore_eos, max_tokens=max_tokens, min_tokens=self.min_tokens, skip_special_tokens=self.skip_special_tokens, spaces_between_special_tokens=self.spaces_between_special_tokens, logits_processors=get_logits_processors( self.logits_processors, logits_processor_pattern ), include_stop_str_in_output=self.include_stop_str_in_output, truncate_prompt_tokens=self.truncate_prompt_tokens, output_kind=RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY, structured_outputs=self.structured_outputs, logit_bias=self.logit_bias, bad_words=self.bad_words, allowed_token_ids=self.allowed_token_ids, extra_args=extra_args or None, skip_clone=True, # Created fresh per request, safe to skip clone ) @model_validator(mode="before") @classmethod def validate_stream_options(cls, data): if data.get("stream_options") and not data.get("stream"): raise VLLMValidationError( "Stream options can only be defined when `stream=True`.",
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/run_batch.py
vllm/entrypoints/openai/run_batch.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import tempfile from argparse import Namespace from collections.abc import Awaitable, Callable from http import HTTPStatus from io import StringIO from typing import Any, TypeAlias import aiohttp import torch from prometheus_client import start_http_server from pydantic import TypeAdapter, field_validator from pydantic_core.core_schema import ValidationInfo from tqdm import tqdm from vllm.engine.arg_utils import AsyncEngineArgs, optional_type from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionResponse, ErrorResponse, OpenAIBaseModel, ) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_models import BaseModelPath, OpenAIServingModels from vllm.entrypoints.pooling.embed.protocol import EmbeddingRequest, EmbeddingResponse from vllm.entrypoints.pooling.embed.serving import OpenAIServingEmbedding from vllm.entrypoints.pooling.score.protocol import ( RerankRequest, RerankResponse, ScoreRequest, ScoreResponse, ) from vllm.entrypoints.pooling.score.serving import ServingScores from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.utils import random_uuid from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) BatchRequestInputBody: TypeAlias = ( ChatCompletionRequest | EmbeddingRequest | ScoreRequest | RerankRequest ) class BatchRequestInput(OpenAIBaseModel): """ The per-line object of the batch input file. NOTE: Currently only the `/v1/chat/completions` endpoint is supported. """ # A developer-provided per-request id that will be used to match outputs to # inputs. Must be unique for each request in a batch. custom_id: str # The HTTP method to be used for the request. Currently only POST is # supported. method: str # The OpenAI API relative URL to be used for the request. Currently # /v1/chat/completions is supported. url: str # The parameters of the request. body: BatchRequestInputBody @field_validator("body", mode="plain") @classmethod def check_type_for_url(cls, value: Any, info: ValidationInfo): # Use url to disambiguate models url: str = info.data["url"] if url == "/v1/chat/completions": return ChatCompletionRequest.model_validate(value) if url == "/v1/embeddings": return TypeAdapter(EmbeddingRequest).validate_python(value) if url.endswith("/score"): return ScoreRequest.model_validate(value) if url.endswith("/rerank"): return RerankRequest.model_validate(value) return TypeAdapter(BatchRequestInputBody).validate_python(value) class BatchResponseData(OpenAIBaseModel): # HTTP status code of the response. status_code: int = 200 # An unique identifier for the API request. request_id: str # The body of the response. body: ( ChatCompletionResponse | EmbeddingResponse | ScoreResponse | RerankResponse | None ) = None class BatchRequestOutput(OpenAIBaseModel): """ The per-line object of the batch output and error files """ id: str # A developer-provided per-request id that will be used to match outputs to # inputs. custom_id: str response: BatchResponseData | None # For requests that failed with a non-HTTP error, this will contain more # information on the cause of the failure. error: Any | None def make_arg_parser(parser: FlexibleArgumentParser): parser.add_argument( "-i", "--input-file", required=True, type=str, help="The path or url to a single input file. Currently supports local file " "paths, or the http protocol (http or https). If a URL is specified, " "the file should be available via HTTP GET.", ) parser.add_argument( "-o", "--output-file", required=True, type=str, help="The path or url to a single output file. Currently supports " "local file paths, or web (http or https) urls. If a URL is specified," " the file should be available via HTTP PUT.", ) parser.add_argument( "--output-tmp-dir", type=str, default=None, help="The directory to store the output file before uploading it " "to the output URL.", ) parser.add_argument( "--response-role", type=optional_type(str), default="assistant", help="The role name to return if `request.add_generation_prompt=True`.", ) parser = AsyncEngineArgs.add_cli_args(parser) parser.add_argument( "--max-log-len", type=int, default=None, help="Max number of prompt characters or prompt " "ID numbers being printed in log." "\n\nDefault: Unlimited", ) parser.add_argument( "--enable-metrics", action="store_true", help="Enable Prometheus metrics" ) parser.add_argument( "--url", type=str, default="0.0.0.0", help="URL to the Prometheus metrics server " "(only needed if enable-metrics is set).", ) parser.add_argument( "--port", type=int, default=8000, help="Port number for the Prometheus metrics server " "(only needed if enable-metrics is set).", ) parser.add_argument( "--enable-prompt-tokens-details", action="store_true", default=False, help="If set to True, enable prompt_tokens_details in usage.", ) parser.add_argument( "--enable-force-include-usage", action="store_true", default=False, help="If set to True, include usage on every request " "(even when stream_options is not specified)", ) return parser def parse_args(): parser = FlexibleArgumentParser(description="vLLM OpenAI-Compatible batch runner.") return make_arg_parser(parser).parse_args() # explicitly use pure text format, with a newline at the end # this makes it impossible to see the animation in the progress bar # but will avoid messing up with ray or multiprocessing, which wraps # each line of output with some prefix. _BAR_FORMAT = "{desc}: {percentage:3.0f}% Completed | {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]\n" # noqa: E501 class BatchProgressTracker: def __init__(self): self._total = 0 self._pbar: tqdm | None = None def submitted(self): self._total += 1 def completed(self): if self._pbar: self._pbar.update() def pbar(self) -> tqdm: enable_tqdm = ( not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 ) self._pbar = tqdm( total=self._total, unit="req", desc="Running batch", mininterval=5, disable=not enable_tqdm, bar_format=_BAR_FORMAT, ) return self._pbar async def read_file(path_or_url: str) -> str: if path_or_url.startswith("http://") or path_or_url.startswith("https://"): async with aiohttp.ClientSession() as session, session.get(path_or_url) as resp: return await resp.text() else: with open(path_or_url, encoding="utf-8") as f: return f.read() async def write_local_file( output_path: str, batch_outputs: list[BatchRequestOutput] ) -> None: """ Write the responses to a local file. output_path: The path to write the responses to. batch_outputs: The list of batch outputs to write. """ # We should make this async, but as long as run_batch runs as a # standalone program, blocking the event loop won't affect performance. with open(output_path, "w", encoding="utf-8") as f: for o in batch_outputs: print(o.model_dump_json(), file=f) async def upload_data(output_url: str, data_or_file: str, from_file: bool) -> None: """ Upload a local file to a URL. output_url: The URL to upload the file to. data_or_file: Either the data to upload or the path to the file to upload. from_file: If True, data_or_file is the path to the file to upload. """ # Timeout is a common issue when uploading large files. # We retry max_retries times before giving up. max_retries = 5 # Number of seconds to wait before retrying. delay = 5 for attempt in range(1, max_retries + 1): try: # We increase the timeout to 1000 seconds to allow # for large files (default is 300). async with aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(total=1000) ) as session: if from_file: with open(data_or_file, "rb") as file: async with session.put(output_url, data=file) as response: if response.status != 200: raise Exception( f"Failed to upload file.\n" f"Status: {response.status}\n" f"Response: {response.text()}" ) else: async with session.put(output_url, data=data_or_file) as response: if response.status != 200: raise Exception( f"Failed to upload data.\n" f"Status: {response.status}\n" f"Response: {response.text()}" ) except Exception as e: if attempt < max_retries: logger.error( "Failed to upload data (attempt %d). Error message: %s.\nRetrying in %d seconds...", # noqa: E501 attempt, e, delay, ) await asyncio.sleep(delay) else: raise Exception( f"Failed to upload data (attempt {attempt}). Error message: {str(e)}." # noqa: E501 ) from e async def write_file( path_or_url: str, batch_outputs: list[BatchRequestOutput], output_tmp_dir: str ) -> None: """ Write batch_outputs to a file or upload to a URL. path_or_url: The path or URL to write batch_outputs to. batch_outputs: The list of batch outputs to write. output_tmp_dir: The directory to store the output file before uploading it to the output URL. """ if path_or_url.startswith("http://") or path_or_url.startswith("https://"): if output_tmp_dir is None: logger.info("Writing outputs to memory buffer") output_buffer = StringIO() for o in batch_outputs: print(o.model_dump_json(), file=output_buffer) output_buffer.seek(0) logger.info("Uploading outputs to %s", path_or_url) await upload_data( path_or_url, output_buffer.read().strip().encode("utf-8"), from_file=False, ) else: # Write responses to a temporary file and then upload it to the URL. with tempfile.NamedTemporaryFile( mode="w", encoding="utf-8", dir=output_tmp_dir, prefix="tmp_batch_output_", suffix=".jsonl", ) as f: logger.info("Writing outputs to temporary local file %s", f.name) await write_local_file(f.name, batch_outputs) logger.info("Uploading outputs to %s", path_or_url) await upload_data(path_or_url, f.name, from_file=True) else: logger.info("Writing outputs to local file %s", path_or_url) await write_local_file(path_or_url, batch_outputs) def make_error_request_output( request: BatchRequestInput, error_msg: str ) -> BatchRequestOutput: batch_output = BatchRequestOutput( id=f"vllm-{random_uuid()}", custom_id=request.custom_id, response=BatchResponseData( status_code=HTTPStatus.BAD_REQUEST, request_id=f"vllm-batch-{random_uuid()}", ), error=error_msg, ) return batch_output async def make_async_error_request_output( request: BatchRequestInput, error_msg: str ) -> BatchRequestOutput: return make_error_request_output(request, error_msg) async def run_request( serving_engine_func: Callable, request: BatchRequestInput, tracker: BatchProgressTracker, ) -> BatchRequestOutput: response = await serving_engine_func(request.body) if isinstance( response, (ChatCompletionResponse, EmbeddingResponse, ScoreResponse, RerankResponse), ): batch_output = BatchRequestOutput( id=f"vllm-{random_uuid()}", custom_id=request.custom_id, response=BatchResponseData( body=response, request_id=f"vllm-batch-{random_uuid()}" ), error=None, ) elif isinstance(response, ErrorResponse): batch_output = BatchRequestOutput( id=f"vllm-{random_uuid()}", custom_id=request.custom_id, response=BatchResponseData( status_code=response.error.code, request_id=f"vllm-batch-{random_uuid()}", ), error=response, ) else: batch_output = make_error_request_output( request, error_msg="Request must not be sent in stream mode" ) tracker.completed() return batch_output def validate_run_batch_args(args): valid_reasoning_parsers = ReasoningParserManager.list_registered() if ( reasoning_parser := args.structured_outputs_config.reasoning_parser ) and reasoning_parser not in valid_reasoning_parsers: raise KeyError( f"invalid reasoning parser: {reasoning_parser} " f"(chose from {{ {','.join(valid_reasoning_parsers)} }})" ) async def run_batch( engine_client: EngineClient, args: Namespace, ) -> None: if args.served_model_name is not None: served_model_names = args.served_model_name else: served_model_names = [args.model] if args.enable_log_requests: request_logger = RequestLogger(max_log_len=args.max_log_len) else: request_logger = None base_model_paths = [ BaseModelPath(name=name, model_path=args.model) for name in served_model_names ] model_config = engine_client.model_config supported_tasks = await engine_client.get_supported_tasks() logger.info("Supported tasks: %s", supported_tasks) # Create the openai serving objects. openai_serving_models = OpenAIServingModels( engine_client=engine_client, base_model_paths=base_model_paths, lora_modules=None, ) openai_serving_chat = ( OpenAIServingChat( engine_client, openai_serving_models, args.response_role, request_logger=request_logger, chat_template=None, chat_template_content_format="auto", reasoning_parser=args.structured_outputs_config.reasoning_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_force_include_usage=args.enable_force_include_usage, default_chat_template_kwargs=getattr( args, "default_chat_template_kwargs", None ), ) if "generate" in supported_tasks else None ) openai_serving_embedding = ( OpenAIServingEmbedding( engine_client, openai_serving_models, request_logger=request_logger, chat_template=None, chat_template_content_format="auto", ) if "embed" in supported_tasks else None ) enable_serving_reranking = ( "classify" in supported_tasks and getattr(model_config.hf_config, "num_labels", 0) == 1 ) openai_serving_scores = ( ServingScores( engine_client, openai_serving_models, request_logger=request_logger, score_template=None, ) if ("embed" in supported_tasks or enable_serving_reranking) else None ) tracker = BatchProgressTracker() logger.info("Reading batch from %s...", args.input_file) # Submit all requests in the file to the engine "concurrently". response_futures: list[Awaitable[BatchRequestOutput]] = [] for request_json in (await read_file(args.input_file)).strip().split("\n"): # Skip empty lines. request_json = request_json.strip() if not request_json: continue request = BatchRequestInput.model_validate_json(request_json) # Determine the type of request and run it. if request.url == "/v1/chat/completions": chat_handler_fn = ( openai_serving_chat.create_chat_completion if openai_serving_chat is not None else None ) if chat_handler_fn is None: response_futures.append( make_async_error_request_output( request, error_msg="The model does not support Chat Completions API", ) ) continue response_futures.append(run_request(chat_handler_fn, request, tracker)) tracker.submitted() elif request.url == "/v1/embeddings": embed_handler_fn = ( openai_serving_embedding.create_embedding if openai_serving_embedding is not None else None ) if embed_handler_fn is None: response_futures.append( make_async_error_request_output( request, error_msg="The model does not support Embeddings API", ) ) continue response_futures.append(run_request(embed_handler_fn, request, tracker)) tracker.submitted() elif request.url.endswith("/score"): score_handler_fn = ( openai_serving_scores.create_score if openai_serving_scores is not None else None ) if score_handler_fn is None: response_futures.append( make_async_error_request_output( request, error_msg="The model does not support Scores API", ) ) continue response_futures.append(run_request(score_handler_fn, request, tracker)) tracker.submitted() elif request.url.endswith("/rerank"): rerank_handler_fn = ( openai_serving_scores.do_rerank if openai_serving_scores is not None else None ) if rerank_handler_fn is None: response_futures.append( make_async_error_request_output( request, error_msg="The model does not support Rerank API", ) ) continue response_futures.append(run_request(rerank_handler_fn, request, tracker)) tracker.submitted() else: response_futures.append( make_async_error_request_output( request, error_msg=f"URL {request.url} was used. " "Supported endpoints: /v1/chat/completions, /v1/embeddings," " /score, /rerank ." "See vllm/entrypoints/openai/api_server.py for supported " "score/rerank versions.", ) ) with tracker.pbar(): responses = await asyncio.gather(*response_futures) await write_file(args.output_file, responses, args.output_tmp_dir) async def main(args: Namespace): from vllm.entrypoints.openai.api_server import build_async_engine_client from vllm.usage.usage_lib import UsageContext validate_run_batch_args(args) async with build_async_engine_client( args, usage_context=UsageContext.OPENAI_BATCH_RUNNER, disable_frontend_multiprocessing=False, ) as engine_client: await run_batch(engine_client, args) if __name__ == "__main__": args = parse_args() logger.info("vLLM batch processing API version %s", VLLM_VERSION) logger.info("args: %s", args) # Start the Prometheus metrics server. LLMEngine uses the Prometheus client # to publish metrics at the /metrics endpoint. if args.enable_metrics: logger.info("Prometheus metrics enabled") start_http_server(port=args.port, addr=args.url) else: logger.info("Prometheus metrics disabled") asyncio.run(main(args))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/cli_args.py
vllm/entrypoints/openai/cli_args.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file contains the command line arguments for the vLLM's OpenAI-compatible server. It is kept in a separate file for documentation purposes. """ import argparse import json import ssl from collections.abc import Sequence from dataclasses import field from typing import Any, Literal from pydantic.dataclasses import dataclass import vllm.envs as envs from vllm.config import config from vllm.engine.arg_utils import AsyncEngineArgs, optional_type from vllm.entrypoints.chat_utils import ( ChatTemplateContentFormatOption, validate_chat_template, ) from vllm.entrypoints.constants import ( H11_MAX_HEADER_COUNT_DEFAULT, H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT, ) from vllm.entrypoints.openai.serving_models import LoRAModulePath from vllm.logger import init_logger from vllm.tool_parsers import ToolParserManager from vllm.utils.argparse_utils import FlexibleArgumentParser logger = init_logger(__name__) class LoRAParserAction(argparse.Action): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: str | Sequence[str] | None, option_string: str | None = None, ): if values is None: values = [] if isinstance(values, str): raise TypeError("Expected values to be a list") lora_list: list[LoRAModulePath] = [] for item in values: if item in [None, ""]: # Skip if item is None or empty string continue if "=" in item and "," not in item: # Old format: name=path name, path = item.split("=") lora_list.append(LoRAModulePath(name, path)) else: # Assume JSON format try: lora_dict = json.loads(item) lora = LoRAModulePath(**lora_dict) lora_list.append(lora) except json.JSONDecodeError: parser.error(f"Invalid JSON format for --lora-modules: {item}") except TypeError as e: parser.error( f"Invalid fields for --lora-modules: {item} - {str(e)}" ) setattr(namespace, self.dest, lora_list) @config @dataclass class FrontendArgs: """Arguments for the OpenAI-compatible frontend server.""" host: str | None = None """Host name.""" port: int = 8000 """Port number.""" uds: str | None = None """Unix domain socket path. If set, host and port arguments are ignored.""" uvicorn_log_level: Literal[ "debug", "info", "warning", "error", "critical", "trace" ] = "info" """Log level for uvicorn.""" disable_uvicorn_access_log: bool = False """Disable uvicorn access log.""" allow_credentials: bool = False """Allow credentials.""" allowed_origins: list[str] = field(default_factory=lambda: ["*"]) """Allowed origins.""" allowed_methods: list[str] = field(default_factory=lambda: ["*"]) """Allowed methods.""" allowed_headers: list[str] = field(default_factory=lambda: ["*"]) """Allowed headers.""" api_key: list[str] | None = None """If provided, the server will require one of these keys to be presented in the header.""" lora_modules: list[LoRAModulePath] | None = None """LoRA modules configurations in either 'name=path' format or JSON format or JSON list format. Example (old format): `'name=path'` Example (new format): `{\"name\": \"name\", \"path\": \"lora_path\", \"base_model_name\": \"id\"}`""" chat_template: str | None = None """The file path to the chat template, or the template in single-line form for the specified model.""" chat_template_content_format: ChatTemplateContentFormatOption = "auto" """The format to render message content within a chat template. * "string" will render the content as a string. Example: `"Hello World"` * "openai" will render the content as a list of dictionaries, similar to OpenAI schema. Example: `[{"type": "text", "text": "Hello world!"}]`""" trust_request_chat_template: bool = False """Whether to trust the chat template provided in the request. If False, the server will always use the chat template specified by `--chat-template` or the ones from tokenizer.""" default_chat_template_kwargs: dict[str, Any] | None = None """Default keyword arguments to pass to the chat template renderer. These will be merged with request-level chat_template_kwargs, with request values taking precedence. Useful for setting default behavior for reasoning models. Example: '{"enable_thinking": false}' to disable thinking mode by default for Qwen3/DeepSeek models.""" response_role: str = "assistant" """The role name to return if `request.add_generation_prompt=true`.""" ssl_keyfile: str | None = None """The file path to the SSL key file.""" ssl_certfile: str | None = None """The file path to the SSL cert file.""" ssl_ca_certs: str | None = None """The CA certificates file.""" enable_ssl_refresh: bool = False """Refresh SSL Context when SSL certificate files change""" ssl_cert_reqs: int = int(ssl.CERT_NONE) """Whether client certificate is required (see stdlib ssl module's).""" root_path: str | None = None """FastAPI root_path when app is behind a path based routing proxy.""" middleware: list[str] = field(default_factory=lambda: []) """Additional ASGI middleware to apply to the app. We accept multiple --middleware arguments. The value should be an import path. If a function is provided, vLLM will add it to the server using `@app.middleware('http')`. If a class is provided, vLLM will add it to the server using `app.add_middleware()`.""" return_tokens_as_token_ids: bool = False """When `--max-logprobs` is specified, represents single tokens as strings of the form 'token_id:{token_id}' so that tokens that are not JSON-encodable can be identified.""" disable_frontend_multiprocessing: bool = False """If specified, will run the OpenAI frontend server in the same process as the model serving engine.""" enable_request_id_headers: bool = False """If specified, API server will add X-Request-Id header to responses.""" enable_auto_tool_choice: bool = False """Enable auto tool choice for supported models. Use `--tool-call-parser` to specify which parser to use.""" exclude_tools_when_tool_choice_none: bool = False """If specified, exclude tool definitions in prompts when tool_choice='none'.""" tool_call_parser: str | None = None """Select the tool call parser depending on the model that you're using. This is used to parse the model-generated tool call into OpenAI API format. Required for `--enable-auto-tool-choice`. You can choose any option from the built-in parsers or register a plugin via `--tool-parser-plugin`.""" tool_parser_plugin: str = "" """Special the tool parser plugin write to parse the model-generated tool into OpenAI API format, the name register in this plugin can be used in `--tool-call-parser`.""" tool_server: str | None = None """Comma-separated list of host:port pairs (IPv4, IPv6, or hostname). Examples: 127.0.0.1:8000, [::1]:8000, localhost:1234. Or `demo` for demo purpose.""" log_config_file: str | None = envs.VLLM_LOGGING_CONFIG_PATH """Path to logging config JSON file for both vllm and uvicorn""" max_log_len: int | None = None """Max number of prompt characters or prompt ID numbers being printed in log. The default of None means unlimited.""" disable_fastapi_docs: bool = False """Disable FastAPI's OpenAPI schema, Swagger UI, and ReDoc endpoint.""" enable_prompt_tokens_details: bool = False """If set to True, enable prompt_tokens_details in usage.""" enable_server_load_tracking: bool = False """If set to True, enable tracking server_load_metrics in the app state.""" enable_force_include_usage: bool = False """If set to True, including usage on every request.""" enable_tokenizer_info_endpoint: bool = False """Enable the `/tokenizer_info` endpoint. May expose chat templates and other tokenizer configuration.""" enable_log_outputs: bool = False """If True, log model outputs (generations). Requires --enable-log-requests.""" h11_max_incomplete_event_size: int = H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT """Maximum size (bytes) of an incomplete HTTP event (header or body) for h11 parser. Helps mitigate header abuse. Default: 4194304 (4 MB).""" h11_max_header_count: int = H11_MAX_HEADER_COUNT_DEFAULT """Maximum number of HTTP headers allowed in a request for h11 parser. Helps mitigate header abuse. Default: 256.""" log_error_stack: bool = envs.VLLM_SERVER_DEV_MODE """If set to True, log the stack trace of error responses""" tokens_only: bool = False """ If set to True, only enable the Tokens In<>Out endpoint. This is intended for use in a Disaggregated Everything setup. """ enable_offline_docs: bool = False """ Enable offline FastAPI documentation for air-gapped environments. Uses vendored static assets bundled with vLLM. """ @staticmethod def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: from vllm.engine.arg_utils import get_kwargs frontend_kwargs = get_kwargs(FrontendArgs) # Special case: allowed_origins, allowed_methods, allowed_headers all # need json.loads type # Should also remove nargs frontend_kwargs["allowed_origins"]["type"] = json.loads frontend_kwargs["allowed_methods"]["type"] = json.loads frontend_kwargs["allowed_headers"]["type"] = json.loads del frontend_kwargs["allowed_origins"]["nargs"] del frontend_kwargs["allowed_methods"]["nargs"] del frontend_kwargs["allowed_headers"]["nargs"] # Special case: default_chat_template_kwargs needs json.loads type frontend_kwargs["default_chat_template_kwargs"]["type"] = json.loads # Special case: LoRA modules need custom parser action and # optional_type(str) frontend_kwargs["lora_modules"]["type"] = optional_type(str) frontend_kwargs["lora_modules"]["action"] = LoRAParserAction # Special case: Middleware needs to append action frontend_kwargs["middleware"]["action"] = "append" frontend_kwargs["middleware"]["type"] = str if "nargs" in frontend_kwargs["middleware"]: del frontend_kwargs["middleware"]["nargs"] frontend_kwargs["middleware"]["default"] = [] # Special case: Tool call parser shows built-in options. valid_tool_parsers = list(ToolParserManager.list_registered()) parsers_str = ",".join(valid_tool_parsers) frontend_kwargs["tool_call_parser"]["metavar"] = ( f"{{{parsers_str}}} or name registered in --tool-parser-plugin" ) frontend_group = parser.add_argument_group( title="Frontend", description=FrontendArgs.__doc__, ) for key, value in frontend_kwargs.items(): frontend_group.add_argument(f"--{key.replace('_', '-')}", **value) return parser def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: """Create the CLI argument parser used by the OpenAI API server. We rely on the helper methods of `FrontendArgs` and `AsyncEngineArgs` to register all arguments instead of manually enumerating them here. This avoids code duplication and keeps the argument definitions in one place. """ parser.add_argument( "model_tag", type=str, nargs="?", help="The model tag to serve (optional if specified in config)", ) parser.add_argument( "--headless", action="store_true", default=False, help="Run in headless mode. See multi-node data parallel " "documentation for more details.", ) parser.add_argument( "--api-server-count", "-asc", type=int, default=1, help="How many API server processes to run.", ) parser.add_argument( "--config", help="Read CLI options from a config file. " "Must be a YAML with the following options: " "https://docs.vllm.ai/en/latest/configuration/serve_args.html", ) parser = FrontendArgs.add_cli_args(parser) parser = AsyncEngineArgs.add_cli_args(parser) return parser def validate_parsed_serve_args(args: argparse.Namespace): """Quick checks for model serve args that raise prior to loading.""" if hasattr(args, "subparser") and args.subparser != "serve": return # Ensure that the chat template is valid; raises if it likely isn't validate_chat_template(args.chat_template) # Enable auto tool needs a tool call parser to be valid if args.enable_auto_tool_choice and not args.tool_call_parser: raise TypeError("Error: --enable-auto-tool-choice requires --tool-call-parser") if args.enable_log_outputs and not args.enable_log_requests: raise TypeError("Error: --enable-log-outputs requires --enable-log-requests") def create_parser_for_docs() -> FlexibleArgumentParser: parser_for_docs = FlexibleArgumentParser( prog="-m vllm.entrypoints.openai.api_server" ) return make_arg_parser(parser_for_docs)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_responses.py
vllm/entrypoints/openai/serving_responses.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json import time import uuid from collections import deque from collections.abc import AsyncGenerator, AsyncIterator, Callable, Sequence from contextlib import AsyncExitStack from copy import copy from http import HTTPStatus from typing import Final import jinja2 from fastapi import Request from openai.types.responses import ( ResponseCodeInterpreterCallCodeDeltaEvent, ResponseCodeInterpreterCallCodeDoneEvent, ResponseCodeInterpreterCallCompletedEvent, ResponseCodeInterpreterCallInProgressEvent, ResponseCodeInterpreterCallInterpretingEvent, ResponseCodeInterpreterToolCallParam, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, ResponseFunctionToolCall, ResponseFunctionWebSearch, ResponseOutputItem, ResponseOutputItemAddedEvent, ResponseOutputItemDoneEvent, ResponseOutputMessage, ResponseOutputText, ResponseReasoningItem, ResponseReasoningTextDeltaEvent, ResponseReasoningTextDoneEvent, ResponseStatus, ResponseTextDeltaEvent, ResponseTextDoneEvent, ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallInProgressEvent, ResponseWebSearchCallSearchingEvent, response_function_web_search, response_text_delta_event, ) from openai.types.responses.response_output_text import Logprob, LogprobTopLogprob from openai.types.responses.response_reasoning_item import ( Content as ResponseReasoningTextContent, ) from openai.types.responses.tool import Mcp, Tool from openai_harmony import Message as OpenAIHarmonyMessage from pydantic import TypeAdapter from vllm import envs from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ( ChatCompletionMessageParam, ChatTemplateContentFormatOption, ) from vllm.entrypoints.context import ( ConversationContext, HarmonyContext, ParsableContext, SimpleContext, StreamingHarmonyContext, ) from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.parser.harmony_utils import ( construct_harmony_previous_input_messages, get_developer_message, get_stop_tokens_for_assistant_actions, get_system_message, get_user_message, has_custom_tools, parse_output_message, parse_remaining_state, parse_response_input, render_for_completion, ) from vllm.entrypoints.openai.protocol import ( DeltaMessage, ErrorResponse, InputTokensDetails, OutputTokensDetails, RequestResponseMetadata, ResponseCompletedEvent, ResponseCreatedEvent, ResponseInProgressEvent, ResponseInputOutputMessage, ResponseReasoningPartAddedEvent, ResponseReasoningPartDoneEvent, ResponsesRequest, ResponsesResponse, ResponseUsage, StreamingResponsesResponse, VLLMValidationError, ) from vllm.entrypoints.openai.serving_engine import ( GenerationError, OpenAIServing, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.responses_utils import ( construct_input_messages, construct_tool_dicts, extract_tool_types, ) from vllm.entrypoints.tool_server import ToolServer from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob as SampleLogprob from vllm.logprobs import SampleLogprobs from vllm.outputs import CompletionOutput from vllm.sampling_params import SamplingParams, StructuredOutputsParams from vllm.tokenizers import TokenizerLike from vllm.utils import random_uuid logger = init_logger(__name__) def _extract_allowed_tools_from_mcp_requests( tools: list[Tool], ) -> dict[str, list[str] | None]: """ Extract allowed_tools mapping from MCP tool requests. Returns a dictionary mapping server_label to allowed_tools list. Handles both list format and McpAllowedToolsMcpToolFilter object format. Special handling: - If allowed_tools is None, returns None (allows all tools) - If allowed_tools contains "*", returns None (allows all tools) - Otherwise, returns the list of specific tool names This function can be reused for both harmony and non-harmony MCP calls. """ allowed_tools_map: dict[str, list[str] | None] = {} for tool in tools: if not isinstance(tool, Mcp): continue # allowed_tools can be a list or an object with tool_names # Extract the actual list of tool names allowed_tools_val = None if tool.allowed_tools is not None: if isinstance(tool.allowed_tools, list): allowed_tools_val = tool.allowed_tools elif hasattr(tool.allowed_tools, "tool_names"): # It's an McpAllowedToolsMcpToolFilter object allowed_tools_val = tool.allowed_tools.tool_names # Normalize "*" to None (both mean "allow all tools") if allowed_tools_val is not None and "*" in allowed_tools_val: allowed_tools_val = None allowed_tools_map[tool.server_label] = allowed_tools_val return allowed_tools_map class OpenAIServingResponses(OpenAIServing): def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, return_tokens_as_token_ids: bool = False, reasoning_parser: str = "", enable_auto_tools: bool = False, tool_parser: str | None = None, tool_server: ToolServer | None = None, enable_prompt_tokens_details: bool = False, enable_force_include_usage: bool = False, enable_log_outputs: bool = False, log_error_stack: bool = False, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, log_error_stack=log_error_stack, ) self.chat_template = chat_template self.chat_template_content_format: Final = chat_template_content_format self.enable_log_outputs = enable_log_outputs self.reasoning_parser = self._get_reasoning_parser( reasoning_parser_name=reasoning_parser ) self.enable_prompt_tokens_details = enable_prompt_tokens_details self.enable_force_include_usage = enable_force_include_usage self.default_sampling_params = self.model_config.get_diff_sampling_param() if self.default_sampling_params: source = self.model_config.generation_config source = "model" if source == "auto" else source logger.info( "Using default chat sampling params from %s: %s", source, self.default_sampling_params, ) # If False (default), the "store" option is (silently) ignored and the # response is not stored. If True, the response is stored in memory. # NOTE(woosuk): This may not be intuitive for users, as the default # behavior in OpenAI's Responses API is to store the response, but # vLLM's default behavior is not. self.enable_store = envs.VLLM_ENABLE_RESPONSES_API_STORE if self.enable_store: logger.warning_once( "`VLLM_ENABLE_RESPONSES_API_STORE` is enabled. This may " "cause a memory leak since we never remove responses from " "the store." ) self.use_harmony = self.model_config.hf_config.model_type == "gpt_oss" if self.use_harmony: logger.warning( "For gpt-oss, we ignore --enable-auto-tool-choice " "and always enable tool use." ) # OpenAI models have two EOS-like tokens: <|return|> and <|call|>. # We need to add them to the stop token ids. if "stop_token_ids" not in self.default_sampling_params: self.default_sampling_params["stop_token_ids"] = [] self.default_sampling_params["stop_token_ids"].extend( get_stop_tokens_for_assistant_actions() ) self.enable_auto_tools = enable_auto_tools # set up tool use self.tool_parser = self._get_tool_parser( tool_parser_name=tool_parser, enable_auto_tools=enable_auto_tools ) # HACK(woosuk): This is a hack. We should use a better store. # FIXME: If enable_store=True, this may cause a memory leak since we # never remove responses from the store. self.response_store: dict[str, ResponsesResponse] = {} self.response_store_lock = asyncio.Lock() # HACK(woosuk): This is a hack. We should use a better store. # FIXME: If enable_store=True, this may cause a memory leak since we # never remove messages from the store. self.msg_store: dict[str, list[ChatCompletionMessageParam]] = {} # HACK(wuhang): This is a hack. We should use a better store. # FIXME: If enable_store=True, this may cause a memory leak since we # never remove events from the store. self.event_store: dict[ str, tuple[deque[StreamingResponsesResponse], asyncio.Event] ] = {} self.background_tasks: dict[str, asyncio.Task] = {} self.tool_server = tool_server def _validate_generator_input( self, engine_prompt: TokensPrompt ) -> ErrorResponse | None: """Add validations to the input to the generator here.""" if self.max_model_len <= len(engine_prompt["prompt_token_ids"]): error_message = ( "The engine prompt length" f" {len(engine_prompt['prompt_token_ids'])} " f"exceeds the max_model_len {self.max_model_len}. " "Please reduce prompt." ) return self.create_error_response( err_type="invalid_request_error", message=error_message, status_code=HTTPStatus.BAD_REQUEST, param="input", ) return None def _validate_create_responses_input( self, request: ResponsesRequest ) -> ErrorResponse | None: if self.use_harmony and request.is_include_output_logprobs(): return self.create_error_response( err_type="invalid_request_error", message="logprobs are not supported with gpt-oss models", status_code=HTTPStatus.BAD_REQUEST, param="logprobs", ) if request.store and not self.enable_store and request.background: return self.create_error_response( err_type="invalid_request_error", message=( "This vLLM engine does not support `store=True` and " "therefore does not support the background mode. To " "enable these features, set the environment variable " "`VLLM_ENABLE_RESPONSES_API_STORE=1` when launching " "the vLLM server." ), status_code=HTTPStatus.BAD_REQUEST, param="background", ) if request.previous_input_messages and request.previous_response_id: return self.create_error_response( err_type="invalid_request_error", message="Only one of `previous_input_messages` and " "`previous_response_id` can be set.", status_code=HTTPStatus.BAD_REQUEST, param="previous_response_id", ) return None async def create_responses( self, request: ResponsesRequest, raw_request: Request | None = None, ) -> ( AsyncGenerator[StreamingResponsesResponse, None] | ResponsesResponse | ErrorResponse ): error_check_ret = await self._check_model(request) if error_check_ret is not None: logger.error("Error with model %s", error_check_ret) return error_check_ret maybe_validation_error = self._validate_create_responses_input(request) if maybe_validation_error is not None: return maybe_validation_error # If the engine is dead, raise the engine's DEAD_ERROR. # This is required for the streaming case, where we return a # success status before we actually start generating text :). if self.engine_client.errored: raise self.engine_client.dead_error if request.store and not self.enable_store: # Disable the store option. # NOTE(woosuk): Although returning an error is possible, we opted # to implicitly disable store and process the request anyway, as # we assume most users do not intend to actually store the response # (i.e., their request's `store=True` just because it's the default # value). request.store = False # Handle the previous response ID. prev_response_id = request.previous_response_id if prev_response_id is not None: async with self.response_store_lock: prev_response = self.response_store.get(prev_response_id) if prev_response is None: return self._make_not_found_error(prev_response_id) else: prev_response = None try: lora_request = self._maybe_get_adapters(request) model_name = self.models.model_name(lora_request) tokenizer = await self.engine_client.get_tokenizer() if self.use_harmony: messages, engine_prompts = self._make_request_with_harmony( request, prev_response ) else: messages, engine_prompts = await self._make_request( request, prev_response, tokenizer ) except ( ValueError, TypeError, RuntimeError, jinja2.TemplateError, NotImplementedError, ) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(f"{e} {e.__cause__}") request_metadata = RequestResponseMetadata(request_id=request.request_id) if raw_request: raw_request.state.request_metadata = request_metadata # Schedule the request and get the result generator. generators: list[AsyncGenerator[ConversationContext, None]] = [] builtin_tool_list: list[str] = [] if self.tool_server is not None: if self.tool_server.has_tool("browser"): builtin_tool_list.append("browser") if self.tool_server.has_tool("python"): builtin_tool_list.append("python") if self.tool_server.has_tool("container"): builtin_tool_list.append("container") if self.tool_server is not None: available_tools = builtin_tool_list else: assert len(builtin_tool_list) == 0 available_tools = [] try: for engine_prompt in engine_prompts: maybe_error = self._validate_generator_input(engine_prompt) if maybe_error is not None: return maybe_error default_max_tokens = self.max_model_len - len( engine_prompt["prompt_token_ids"] ) sampling_params = request.to_sampling_params( default_max_tokens, self.default_sampling_params ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) context: ConversationContext if self.use_harmony: if request.stream: context = StreamingHarmonyContext(messages, available_tools) else: context = HarmonyContext(messages, available_tools) else: if envs.VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT: # This is a feature in development for parsing # tokens during generation instead of at the end context = ParsableContext( response_messages=messages, tokenizer=tokenizer, reasoning_parser_cls=self.reasoning_parser, request=request, tool_parser_cls=self.tool_parser, available_tools=available_tools, chat_template=self.chat_template, chat_template_content_format=self.chat_template_content_format, ) else: context = SimpleContext() if self.reasoning_parser is not None: reasoning_parser = self.reasoning_parser(tokenizer) if sampling_params.structured_outputs is None: sampling_params.structured_outputs = StructuredOutputsParams() struct_out = sampling_params.structured_outputs if struct_out.all_non_structural_tag_constraints_none(): sampling_params.structured_outputs.structural_tag = ( reasoning_parser.prepare_structured_tag( sampling_params.structured_outputs.structural_tag, self.tool_server, ) ) generator = self._generate_with_builtin_tools( request_id=request.request_id, engine_prompt=engine_prompt, sampling_params=sampling_params, context=context, lora_request=lora_request, priority=request.priority, trace_headers=trace_headers, ) generators.append(generator) except ValueError as e: return self.create_error_response(e) assert len(generators) == 1 (result_generator,) = generators # Store the input messages. if request.store: self.msg_store[request.request_id] = messages if request.background: created_time = int(time.time()) response = ResponsesResponse.from_request( request, sampling_params, model_name=model_name, created_time=created_time, output=[], status="queued", usage=None, ) async with self.response_store_lock: self.response_store[response.id] = response # Run the request in the background. if request.stream: task = asyncio.create_task( self._run_background_request_stream( request, sampling_params, result_generator, context, model_name, tokenizer, request_metadata, created_time, ), name=f"create_{request.request_id}", ) else: task = asyncio.create_task( self._run_background_request( request, sampling_params, result_generator, context, model_name, tokenizer, request_metadata, created_time, ), name=f"create_{response.id}", ) # For cleanup. response_id = response.id self.background_tasks[response_id] = task task.add_done_callback( lambda _: self.background_tasks.pop(response_id, None) ) if request.stream: return self.responses_background_stream_generator(request.request_id) return response if request.stream: return self.responses_stream_generator( request, sampling_params, result_generator, context, model_name, tokenizer, request_metadata, ) try: return await self.responses_full_generator( request, sampling_params, result_generator, context, model_name, tokenizer, request_metadata, ) except GenerationError as e: return self._convert_generation_error_to_response(e) except Exception as e: return self.create_error_response(e) async def _make_request( self, request: ResponsesRequest, prev_response: ResponsesResponse | None, tokenizer: TokenizerLike, ): tool_dicts = construct_tool_dicts(request.tools, request.tool_choice) # Construct the input messages. messages = construct_input_messages( request_instructions=request.instructions, request_input=request.input, prev_msg=self.msg_store.get(prev_response.id) if prev_response else None, prev_response_output=prev_response.output if prev_response else None, ) _, engine_prompts = await self._preprocess_chat( request, tokenizer, messages, tool_dicts=tool_dicts, tool_parser=self.tool_parser, chat_template=self.chat_template, chat_template_content_format=self.chat_template_content_format, ) return messages, engine_prompts def _make_request_with_harmony( self, request: ResponsesRequest, prev_response: ResponsesResponse | None, ): if request.tool_choice != "auto": raise NotImplementedError( "Only 'auto' tool_choice is supported in response API with Harmony" ) messages = self._construct_input_messages_with_harmony(request, prev_response) prompt_token_ids = render_for_completion(messages) engine_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids) # Add cache_salt if provided in the request if request.cache_salt is not None: engine_prompt["cache_salt"] = request.cache_salt return messages, [engine_prompt] async def _initialize_tool_sessions( self, request: ResponsesRequest, context: ConversationContext, exit_stack: AsyncExitStack, ): # we should only initialize the tool session if the request needs tools if len(request.tools) == 0: return mcp_tools = { tool.server_label: tool for tool in request.tools if tool.type == "mcp" } await context.init_tool_sessions( self.tool_server, exit_stack, request.request_id, mcp_tools ) async def responses_full_generator( self, request: ResponsesRequest, sampling_params: SamplingParams, result_generator: AsyncIterator[ConversationContext], context: ConversationContext, model_name: str, tokenizer: TokenizerLike, request_metadata: RequestResponseMetadata, created_time: int | None = None, ) -> ErrorResponse | ResponsesResponse: if created_time is None: created_time = int(time.time()) async with AsyncExitStack() as exit_stack: try: await self._initialize_tool_sessions(request, context, exit_stack) async for _ in result_generator: pass except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: return self.create_error_response(e) # NOTE: Implementation of stauts is still WIP, but for now # we guarantee that if the status is not "completed", it is accurate. # "completed" is implemented as the "catch-all" for now. status: ResponseStatus = "completed" input_messages: ResponseInputOutputMessage | None = None output_messages: ResponseInputOutputMessage | None = None if self.use_harmony: assert isinstance(context, HarmonyContext) output = self._make_response_output_items_with_harmony(context) if request.enable_response_messages: input_messages = context.messages[: context.num_init_messages] output_messages = context.messages[context.num_init_messages :] num_tool_output_tokens = context.num_tool_output_tokens if len(output) > 0: if context.finish_reason == "length": status = "incomplete" elif context.finish_reason == "abort": status = "cancelled" else: self._raise_if_error(context.finish_reason, request.request_id) else: status = "incomplete" elif isinstance(context, ParsableContext): output = context.parser.make_response_output_items_from_parsable_context() if request.enable_response_messages: input_messages = context.input_messages output_messages = context.output_messages # TODO: Calculate usage. # assert final_res.prompt_token_ids is not None num_tool_output_tokens = 0 else: assert isinstance(context, SimpleContext) # Use final_output which has accumulated text/token_ids/logprobs final_res = context.final_output assert final_res is not None assert len(final_res.outputs) == 1 final_output = final_res.outputs[0] # finish_reason='error' indicates retryable internal error self._raise_if_error(final_output.finish_reason, request.request_id) output = self._make_response_output_items(request, final_output, tokenizer) if request.enable_response_messages: input_messages = context.input_messages output_messages = context.output_messages # Calculate usage. assert final_res.prompt_token_ids is not None num_tool_output_tokens = 0 assert isinstance(context, (SimpleContext, HarmonyContext, ParsableContext)) num_prompt_tokens = context.num_prompt_tokens num_generated_tokens = context.num_output_tokens num_cached_tokens = context.num_cached_tokens num_reasoning_tokens = context.num_reasoning_tokens usage = ResponseUsage( input_tokens=num_prompt_tokens, output_tokens=num_generated_tokens, total_tokens=num_prompt_tokens + num_generated_tokens, input_tokens_details=InputTokensDetails( cached_tokens=num_cached_tokens, input_tokens_per_turn=[ turn.input_tokens for turn in context.all_turn_metrics ], cached_tokens_per_turn=[ turn.cached_input_tokens for turn in context.all_turn_metrics ], ), output_tokens_details=OutputTokensDetails( reasoning_tokens=num_reasoning_tokens, tool_output_tokens=num_tool_output_tokens, output_tokens_per_turn=[ turn.output_tokens for turn in context.all_turn_metrics ], tool_output_tokens_per_turn=[ turn.tool_output_tokens for turn in context.all_turn_metrics ], ), ) response = ResponsesResponse.from_request( request, sampling_params, input_messages=input_messages, output_messages=output_messages, model_name=model_name, created_time=created_time, output=output, status=status, usage=usage, ) if request.store: async with self.response_store_lock: stored_response = self.response_store.get(response.id) # If the response is already cancelled, don't update it. if stored_response is None or stored_response.status != "cancelled": self.response_store[response.id] = response return response def _topk_logprobs( self, logprobs: dict[int, SampleLogprob], top_logprobs: int, tokenizer: TokenizerLike, ) -> list[LogprobTopLogprob]: """Returns the top-k logprobs from the logprobs dictionary.""" out = [] for i, (token_id, _logprob) in enumerate(logprobs.items()): if i >= top_logprobs: break text = ( _logprob.decoded_token if _logprob.decoded_token is not None else tokenizer.decode([token_id]) ) out.append( LogprobTopLogprob( token=text, logprob=max(_logprob.logprob, -9999.0), bytes=list(text.encode("utf-8", errors="replace")), ) ) return out def _create_response_logprobs( self, token_ids: Sequence[int], logprobs: SampleLogprobs | None, tokenizer: TokenizerLike, top_logprobs: int | None = None, ) -> list[Logprob]: assert logprobs is not None, "logprobs must be provided" assert len(token_ids) == len(logprobs), ( "token_ids and logprobs.token_ids must have the same length" ) out = [] for i, token_id in enumerate(token_ids): logprob = logprobs[i] token_logprob = logprob[token_id] text = ( token_logprob.decoded_token if token_logprob.decoded_token is not None else tokenizer.decode([token_id]) ) out.append( Logprob( token=text, logprob=max(token_logprob.logprob, -9999.0), bytes=list(text.encode("utf-8", errors="replace")), top_logprobs=( self._topk_logprobs( logprob, top_logprobs=top_logprobs, tokenizer=tokenizer ) if top_logprobs else [] ), ) ) return out def _create_stream_response_logprobs( self, token_ids: Sequence[int], logprobs: SampleLogprobs | None, tokenizer: TokenizerLike, top_logprobs: int | None = None, ) -> list[response_text_delta_event.Logprob]: lgs = self._create_response_logprobs( token_ids=token_ids, logprobs=logprobs, tokenizer=tokenizer, top_logprobs=top_logprobs, ) return [ response_text_delta_event.Logprob( token=lg.token, logprob=lg.logprob, top_logprobs=[ response_text_delta_event.LogprobTopLogprob( token=tl.token, logprob=tl.logprob ) for tl in lg.top_logprobs ], ) for lg in lgs ] def _make_response_output_items( self, request: ResponsesRequest, final_output: CompletionOutput, tokenizer: TokenizerLike, ) -> list[ResponseOutputItem]: if self.reasoning_parser: try:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/serving_completion.py
vllm/entrypoints/openai/serving_completion.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import time from collections.abc import AsyncGenerator, AsyncIterator from collections.abc import Sequence as GenericSequence from typing import cast import jinja2 from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( CompletionLogProbs, CompletionRequest, CompletionResponse, CompletionResponseChoice, CompletionResponseStreamChoice, CompletionStreamResponse, ErrorResponse, PromptTokenUsageInfo, RequestResponseMetadata, UsageInfo, VLLMValidationError, ) from vllm.entrypoints.openai.serving_engine import ( GenerationError, OpenAIServing, clamp_prompt_logprobs, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.renderer import RenderConfig from vllm.entrypoints.utils import get_max_tokens, should_include_usage from vllm.inputs.data import EmbedsPrompt, TokensPrompt, is_embeds_prompt from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.outputs import RequestOutput from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.tokenizers import TokenizerLike from vllm.utils.async_utils import merge_async_iterators from vllm.utils.collection_utils import as_list from vllm.v1.sample.logits_processor import validate_logits_processors_parameters logger = init_logger(__name__) class OpenAIServingCompletion(OpenAIServing): def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, return_tokens_as_token_ids: bool = False, enable_prompt_tokens_details: bool = False, enable_force_include_usage: bool = False, log_error_stack: bool = False, ): super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, log_error_stack=log_error_stack, ) # set up logits processors self.logits_processors = self.model_config.logits_processors self.enable_prompt_tokens_details = enable_prompt_tokens_details self.default_sampling_params = self.model_config.get_diff_sampling_param() self.enable_force_include_usage = enable_force_include_usage if self.default_sampling_params: source = self.model_config.generation_config source = "model" if source == "auto" else source logger.info( "Using default completion sampling params from %s: %s", source, self.default_sampling_params, ) async def create_completion( self, request: CompletionRequest, raw_request: Request | None = None, ) -> AsyncGenerator[str, None] | CompletionResponse | ErrorResponse: """Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/completions/create for the API specification. This API mimics the OpenAI Completion API. NOTE: Currently we do not support the following feature: - suffix (the language models we currently support do not support suffix) """ error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret # If the engine is dead, raise the engine's DEAD_ERROR. # This is required for the streaming case, where we return a # success status before we actually start generating text :). if self.engine_client.errored: raise self.engine_client.dead_error # Return error for unsupported features. if request.suffix is not None: return self.create_error_response("suffix is not currently supported") if request.echo and request.prompt_embeds is not None: return self.create_error_response("Echo is unsupported with prompt embeds.") if request.prompt_logprobs is not None and request.prompt_embeds is not None: return self.create_error_response( "prompt_logprobs is not compatible with prompt embeds." ) request_id = f"cmpl-{self._base_request_id(raw_request, request.request_id)}" created_time = int(time.time()) request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: raw_request.state.request_metadata = request_metadata try: lora_request = self._maybe_get_adapters(request) if self.model_config.skip_tokenizer_init: tokenizer = None else: tokenizer = await self.engine_client.get_tokenizer() renderer = self._get_renderer(tokenizer) engine_prompts = await renderer.render_prompt_and_embeds( prompt_or_prompts=request.prompt, prompt_embeds=request.prompt_embeds, config=self._build_render_config(request), ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) except TypeError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) except RuntimeError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) except jinja2.TemplateError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) # Extract data_parallel_rank from header (router can inject it) data_parallel_rank = self._get_data_parallel_rank(raw_request) # Schedule the request and get the result generator. generators: list[AsyncGenerator[RequestOutput, None]] = [] try: for i, engine_prompt in enumerate(engine_prompts): prompt_text, prompt_token_ids, prompt_embeds = ( self._get_prompt_components(engine_prompt) ) input_length = None if prompt_token_ids is not None: input_length = len(prompt_token_ids) elif prompt_embeds is not None: input_length = len(prompt_embeds) else: raise NotImplementedError if self.default_sampling_params is None: self.default_sampling_params = {} max_tokens = get_max_tokens( max_model_len=self.max_model_len, request=request, input_length=input_length, default_sampling_params=self.default_sampling_params, ) sampling_params: SamplingParams | BeamSearchParams if request.use_beam_search: sampling_params = request.to_beam_search_params( max_tokens, self.default_sampling_params ) else: sampling_params = request.to_sampling_params( max_tokens, self.model_config.logits_processor_pattern, self.default_sampling_params, ) validate_logits_processors_parameters( self.logits_processors, sampling_params, ) request_id_item = f"{request_id}-{i}" self._log_inputs( request_id_item, engine_prompt, params=sampling_params, lora_request=lora_request, ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) # Mypy inconsistently requires this second cast in different # environments. It shouldn't be necessary (redundant from above) # but pre-commit in CI fails without it. engine_prompt = cast(EmbedsPrompt | TokensPrompt, engine_prompt) if isinstance(sampling_params, BeamSearchParams): generator = self.beam_search( prompt=engine_prompt, request_id=request_id, params=sampling_params, lora_request=lora_request, trace_headers=trace_headers, ) else: engine_request, tokenization_kwargs = await self._process_inputs( request_id_item, engine_prompt, sampling_params, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, data_parallel_rank=data_parallel_rank, ) generator = self.engine_client.generate( engine_request, sampling_params, request_id_item, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, prompt_text=prompt_text, tokenization_kwargs=tokenization_kwargs, data_parallel_rank=data_parallel_rank, ) generators.append(generator) except ValueError as e: return self.create_error_response(e) result_generator = merge_async_iterators(*generators) model_name = self.models.model_name(lora_request) num_prompts = len(engine_prompts) # We do not stream the results when using beam search. stream = request.stream and not request.use_beam_search # Streaming response if stream: return self.completion_stream_generator( request, engine_prompts, result_generator, request_id, created_time, model_name, num_prompts=num_prompts, tokenizer=tokenizer, request_metadata=request_metadata, ) # Non-streaming response final_res_batch: list[RequestOutput | None] = [None] * num_prompts try: async for i, res in result_generator: final_res_batch[i] = res for i, final_res in enumerate(final_res_batch): assert final_res is not None # The output should contain the input text # We did not pass it into vLLM engine to avoid being redundant # with the inputs token IDs if final_res.prompt is None: engine_prompt = engine_prompts[i] final_res.prompt = ( None if is_embeds_prompt(engine_prompt) else engine_prompt.get("prompt") ) final_res_batch_checked = cast(list[RequestOutput], final_res_batch) response = self.request_output_to_completion_response( final_res_batch_checked, request, request_id, created_time, model_name, tokenizer, request_metadata, ) except asyncio.CancelledError: return self.create_error_response("Client disconnected") except GenerationError as e: return self._convert_generation_error_to_response(e) except ValueError as e: return self.create_error_response(e) # When user requests streaming but we don't stream, we still need to # return a streaming response with a single event. if request.stream: response_json = response.model_dump_json() async def fake_stream_generator() -> AsyncGenerator[str, None]: yield f"data: {response_json}\n\n" yield "data: [DONE]\n\n" return fake_stream_generator() return response async def completion_stream_generator( self, request: CompletionRequest, engine_prompts: list[TokensPrompt | EmbedsPrompt], result_generator: AsyncIterator[tuple[int, RequestOutput]], request_id: str, created_time: int, model_name: str, num_prompts: int, tokenizer: TokenizerLike | None, request_metadata: RequestResponseMetadata, ) -> AsyncGenerator[str, None]: num_choices = 1 if request.n is None else request.n previous_text_lens = [0] * num_choices * num_prompts previous_num_tokens = [0] * num_choices * num_prompts has_echoed = [False] * num_choices * num_prompts num_prompt_tokens = [0] * num_prompts num_cached_tokens = None first_iteration = True stream_options = request.stream_options include_usage, include_continuous_usage = should_include_usage( stream_options, self.enable_force_include_usage ) try: async for prompt_idx, res in result_generator: prompt_token_ids = res.prompt_token_ids prompt_logprobs = res.prompt_logprobs if first_iteration: num_cached_tokens = res.num_cached_tokens first_iteration = False prompt_text = res.prompt if prompt_text is None: engine_prompt = engine_prompts[prompt_idx] prompt_text = ( None if is_embeds_prompt(engine_prompt) else engine_prompt.get("prompt") ) # Prompt details are excluded from later streamed outputs if prompt_token_ids is not None: num_prompt_tokens[prompt_idx] = len(prompt_token_ids) delta_token_ids: GenericSequence[int] out_logprobs: GenericSequence[dict[int, Logprob] | None] | None for output in res.outputs: i = output.index + prompt_idx * num_choices # Useful when request.return_token_ids is True # Returning prompt token IDs shares the same logic # with the echo implementation. prompt_token_ids_to_return: list[int] | None = None assert request.max_tokens is not None if request.echo and not has_echoed[i]: assert prompt_token_ids is not None if request.return_token_ids: prompt_text = "" assert prompt_text is not None if request.max_tokens == 0: # only return the prompt delta_text = prompt_text delta_token_ids = prompt_token_ids out_logprobs = prompt_logprobs else: # echo the prompt and first token delta_text = prompt_text + output.text delta_token_ids = [ *prompt_token_ids, *output.token_ids, ] out_logprobs = [ *(prompt_logprobs or []), *(output.logprobs or []), ] prompt_token_ids_to_return = prompt_token_ids has_echoed[i] = True else: # return just the delta delta_text = output.text delta_token_ids = output.token_ids out_logprobs = output.logprobs # has_echoed[i] is reused here to indicate whether # we have already returned the prompt token IDs. if not has_echoed[i] and request.return_token_ids: prompt_token_ids_to_return = prompt_token_ids has_echoed[i] = True if ( not delta_text and not delta_token_ids and not previous_num_tokens[i] ): # Chunked prefill case, don't return empty chunks continue if request.logprobs is not None: assert out_logprobs is not None, "Did not output logprobs" logprobs = self._create_completion_logprobs( token_ids=delta_token_ids, top_logprobs=out_logprobs, num_output_top_logprobs=request.logprobs, tokenizer=tokenizer, initial_text_offset=previous_text_lens[i], return_as_token_id=request.return_tokens_as_token_ids, ) else: logprobs = None previous_text_lens[i] += len(output.text) previous_num_tokens[i] += len(output.token_ids) finish_reason = output.finish_reason stop_reason = output.stop_reason self._raise_if_error(finish_reason, request_id) chunk = CompletionStreamResponse( id=request_id, created=created_time, model=model_name, choices=[ CompletionResponseStreamChoice( index=i, text=delta_text, logprobs=logprobs, finish_reason=finish_reason, stop_reason=stop_reason, prompt_token_ids=prompt_token_ids_to_return, token_ids=( as_list(output.token_ids) if request.return_token_ids else None ), ) ], ) if include_continuous_usage: prompt_tokens = num_prompt_tokens[prompt_idx] completion_tokens = previous_num_tokens[i] chunk.usage = UsageInfo( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) response_json = chunk.model_dump_json(exclude_unset=False) yield f"data: {response_json}\n\n" total_prompt_tokens = sum(num_prompt_tokens) total_completion_tokens = sum(previous_num_tokens) final_usage_info = UsageInfo( prompt_tokens=total_prompt_tokens, completion_tokens=total_completion_tokens, total_tokens=total_prompt_tokens + total_completion_tokens, ) if self.enable_prompt_tokens_details and num_cached_tokens: final_usage_info.prompt_tokens_details = PromptTokenUsageInfo( cached_tokens=num_cached_tokens ) if include_usage: final_usage_chunk = CompletionStreamResponse( id=request_id, created=created_time, model=model_name, choices=[], usage=final_usage_info, ) final_usage_data = final_usage_chunk.model_dump_json( exclude_unset=False, exclude_none=True ) yield f"data: {final_usage_data}\n\n" # report to FastAPI middleware aggregate usage across all choices request_metadata.final_usage_info = final_usage_info except GenerationError as e: yield f"data: {self._convert_generation_error_to_streaming_response(e)}\n\n" except Exception as e: logger.exception("Error in completion stream generator.") data = self.create_streaming_error_response(e) yield f"data: {data}\n\n" yield "data: [DONE]\n\n" def request_output_to_completion_response( self, final_res_batch: list[RequestOutput], request: CompletionRequest, request_id: str, created_time: int, model_name: str, tokenizer: TokenizerLike | None, request_metadata: RequestResponseMetadata, ) -> CompletionResponse: choices: list[CompletionResponseChoice] = [] num_prompt_tokens = 0 num_generated_tokens = 0 kv_transfer_params = None last_final_res = None for final_res in final_res_batch: last_final_res = final_res prompt_token_ids = final_res.prompt_token_ids assert prompt_token_ids is not None prompt_logprobs = clamp_prompt_logprobs(final_res.prompt_logprobs) prompt_text = final_res.prompt token_ids: GenericSequence[int] out_logprobs: GenericSequence[dict[int, Logprob] | None] | None for output in final_res.outputs: self._raise_if_error(output.finish_reason, request_id) assert request.max_tokens is not None if request.echo: if request.return_token_ids: prompt_text = "" assert prompt_text is not None if request.max_tokens == 0: token_ids = prompt_token_ids out_logprobs = prompt_logprobs output_text = prompt_text else: token_ids = [*prompt_token_ids, *output.token_ids] if request.logprobs is None: out_logprobs = None else: assert prompt_logprobs is not None assert output.logprobs is not None out_logprobs = [ *prompt_logprobs, *output.logprobs, ] output_text = prompt_text + output.text else: token_ids = output.token_ids out_logprobs = output.logprobs output_text = output.text if request.logprobs is not None: assert out_logprobs is not None, "Did not output logprobs" logprobs = self._create_completion_logprobs( token_ids=token_ids, top_logprobs=out_logprobs, tokenizer=tokenizer, num_output_top_logprobs=request.logprobs, return_as_token_id=request.return_tokens_as_token_ids, ) else: logprobs = None choice_data = CompletionResponseChoice( index=len(choices), text=output_text, logprobs=logprobs, finish_reason=output.finish_reason, stop_reason=output.stop_reason, prompt_logprobs=final_res.prompt_logprobs, prompt_token_ids=( prompt_token_ids if request.return_token_ids else None ), token_ids=( as_list(output.token_ids) if request.return_token_ids else None ), ) choices.append(choice_data) num_generated_tokens += len(output.token_ids) num_prompt_tokens += len(prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=num_generated_tokens, total_tokens=num_prompt_tokens + num_generated_tokens, ) if ( self.enable_prompt_tokens_details and last_final_res and last_final_res.num_cached_tokens ): usage.prompt_tokens_details = PromptTokenUsageInfo( cached_tokens=last_final_res.num_cached_tokens ) request_metadata.final_usage_info = usage if final_res_batch: kv_transfer_params = final_res_batch[0].kv_transfer_params return CompletionResponse( id=request_id, created=created_time, model=model_name, choices=choices, usage=usage, kv_transfer_params=kv_transfer_params, ) def _create_completion_logprobs( self, token_ids: GenericSequence[int], top_logprobs: GenericSequence[dict[int, Logprob] | None], num_output_top_logprobs: int, tokenizer: TokenizerLike | None, initial_text_offset: int = 0, return_as_token_id: bool | None = None, ) -> CompletionLogProbs: """Create logprobs for OpenAI Completion API.""" out_text_offset: list[int] = [] out_token_logprobs: list[float | None] = [] out_tokens: list[str] = [] out_top_logprobs: list[dict[str, float] | None] = [] last_token_len = 0 should_return_as_token_id = ( return_as_token_id if return_as_token_id is not None else self.return_tokens_as_token_ids ) for i, token_id in enumerate(token_ids): step_top_logprobs = top_logprobs[i] if step_top_logprobs is None: if should_return_as_token_id: token = f"token_id:{token_id}" else: if tokenizer is None: raise VLLMValidationError( "Unable to get tokenizer because " "`skip_tokenizer_init=True`", parameter="skip_tokenizer_init", value=True, ) token = tokenizer.decode(token_id) out_tokens.append(token) out_token_logprobs.append(None) out_top_logprobs.append(None) else: step_token = step_top_logprobs[token_id] token = self._get_decoded_token( step_token, token_id, tokenizer, return_as_token_id=should_return_as_token_id, ) token_logprob = max(step_token.logprob, -9999.0) out_tokens.append(token) out_token_logprobs.append(token_logprob) # makes sure to add the top num_output_top_logprobs + 1 # logprobs, as defined in the openai API # (cf. https://github.com/openai/openai-openapi/blob/ # 893ba52242dbd5387a97b96444ee1c742cfce9bd/openapi.yaml#L7153) out_top_logprobs.append( { # Convert float("-inf") to the # JSON-serializable float that OpenAI uses self._get_decoded_token( top_lp[1], top_lp[0], tokenizer, return_as_token_id=should_return_as_token_id, ): max(top_lp[1].logprob, -9999.0) for i, top_lp in enumerate(step_top_logprobs.items()) if num_output_top_logprobs >= i } ) if len(out_text_offset) == 0: out_text_offset.append(initial_text_offset) else: out_text_offset.append(out_text_offset[-1] + last_token_len) last_token_len = len(token) return CompletionLogProbs( text_offset=out_text_offset, token_logprobs=out_token_logprobs, tokens=out_tokens, top_logprobs=out_top_logprobs, ) def _build_render_config( self, request: CompletionRequest, max_input_length: int | None = None, ) -> RenderConfig: # Validate max_tokens before using it if request.max_tokens is not None and request.max_tokens > self.max_model_len: raise VLLMValidationError( f"'max_tokens' ({request.max_tokens}) cannot be greater than " f"the model's maximum context length ({self.max_model_len}).", parameter="max_tokens", value=request.max_tokens, ) max_input_tokens_len = self.max_model_len - (request.max_tokens or 0) return RenderConfig( max_length=max_input_tokens_len, truncate_prompt_tokens=request.truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, cache_salt=request.cache_salt, needs_detokenization=bool(request.echo and not request.return_token_ids), )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/api_server.py
vllm/entrypoints/openai/api_server.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import hashlib import importlib import inspect import json import multiprocessing import multiprocessing.forkserver as forkserver import os import secrets import signal import socket import tempfile import uuid from argparse import Namespace from collections.abc import AsyncGenerator, AsyncIterator, Awaitable from contextlib import asynccontextmanager from http import HTTPStatus from typing import Annotated, Any import model_hosting_container_standards.sagemaker as sagemaker_standards import pydantic import uvloop from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, StreamingResponse from starlette.concurrency import iterate_in_threadpool from starlette.datastructures import URL, Headers, MutableHeaders, State from starlette.types import ASGIApp, Message, Receive, Scope, Send import vllm.envs as envs from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.protocol import EngineClient from vllm.entrypoints.anthropic.protocol import ( AnthropicError, AnthropicErrorResponse, AnthropicMessagesRequest, AnthropicMessagesResponse, ) from vllm.entrypoints.anthropic.serving_messages import AnthropicServingMessages from vllm.entrypoints.launcher import serve_http from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.cli_args import make_arg_parser, validate_parsed_serve_args from vllm.entrypoints.openai.orca_metrics import metrics_header from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse, ErrorInfo, ErrorResponse, ResponsesRequest, ResponsesResponse, StreamingResponsesResponse, TranscriptionRequest, TranscriptionResponseVariant, TranslationRequest, TranslationResponseVariant, ) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_models import ( BaseModelPath, OpenAIServingModels, ) from vllm.entrypoints.openai.serving_responses import OpenAIServingResponses from vllm.entrypoints.openai.serving_transcription import ( OpenAIServingTranscription, OpenAIServingTranslation, ) from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.classify.serving import ServingClassification from vllm.entrypoints.pooling.embed.serving import OpenAIServingEmbedding from vllm.entrypoints.pooling.pooling.serving import OpenAIServingPooling from vllm.entrypoints.pooling.score.serving import ServingScores from vllm.entrypoints.serve.disagg.serving import ServingTokens from vllm.entrypoints.serve.elastic_ep.middleware import ( ScalingMiddleware, ) from vllm.entrypoints.serve.tokenize.serving import OpenAIServingTokenization from vllm.entrypoints.tool_server import DemoToolServer, MCPToolServer, ToolServer from vllm.entrypoints.utils import ( cli_env_setup, load_aware_call, log_non_default_args, process_chat_template, process_lora_modules, with_cancellation, ) from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.tasks import POOLING_TASKS from vllm.tool_parsers import ToolParserManager from vllm.usage.usage_lib import UsageContext from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.gc_utils import freeze_gc_heap from vllm.utils.network_utils import is_valid_ipv6_address from vllm.utils.system_utils import decorate_logs, set_ulimit from vllm.version import __version__ as VLLM_VERSION prometheus_multiproc_dir: tempfile.TemporaryDirectory # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765) logger = init_logger("vllm.entrypoints.openai.api_server") ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = "endpoint-load-metrics-format" _running_tasks: set[asyncio.Task] = set() @asynccontextmanager async def lifespan(app: FastAPI): try: if app.state.log_stats: engine_client: EngineClient = app.state.engine_client async def _force_log(): while True: await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL) await engine_client.do_log_stats() task = asyncio.create_task(_force_log()) _running_tasks.add(task) task.add_done_callback(_running_tasks.remove) else: task = None # Mark the startup heap as static so that it's ignored by GC. # Reduces pause times of oldest generation collections. freeze_gc_heap() try: yield finally: if task is not None: task.cancel() finally: # Ensure app state including engine ref is gc'd del app.state @asynccontextmanager async def build_async_engine_client( args: Namespace, *, usage_context: UsageContext = UsageContext.OPENAI_API_SERVER, disable_frontend_multiprocessing: bool | None = None, client_config: dict[str, Any] | None = None, ) -> AsyncIterator[EngineClient]: if os.getenv("VLLM_WORKER_MULTIPROC_METHOD") == "forkserver": # The executor is expected to be mp. # Pre-import heavy modules in the forkserver process logger.debug("Setup forkserver with pre-imports") multiprocessing.set_start_method("forkserver") multiprocessing.set_forkserver_preload(["vllm.v1.engine.async_llm"]) forkserver.ensure_running() logger.debug("Forkserver setup complete!") # Context manager to handle engine_client lifecycle # Ensures everything is shutdown and cleaned up on error/exit engine_args = AsyncEngineArgs.from_cli_args(args) if client_config: engine_args._api_process_count = client_config.get("client_count", 1) engine_args._api_process_rank = client_config.get("client_index", 0) if disable_frontend_multiprocessing is None: disable_frontend_multiprocessing = bool(args.disable_frontend_multiprocessing) async with build_async_engine_client_from_engine_args( engine_args, usage_context=usage_context, disable_frontend_multiprocessing=disable_frontend_multiprocessing, client_config=client_config, ) as engine: yield engine @asynccontextmanager async def build_async_engine_client_from_engine_args( engine_args: AsyncEngineArgs, *, usage_context: UsageContext = UsageContext.OPENAI_API_SERVER, disable_frontend_multiprocessing: bool = False, client_config: dict[str, Any] | None = None, ) -> AsyncIterator[EngineClient]: """ Create EngineClient, either: - in-process using the AsyncLLMEngine Directly - multiprocess using AsyncLLMEngine RPC Returns the Client or None if the creation failed. """ # Create the EngineConfig (determines if we can use V1). vllm_config = engine_args.create_engine_config(usage_context=usage_context) if disable_frontend_multiprocessing: logger.warning("V1 is enabled, but got --disable-frontend-multiprocessing.") from vllm.v1.engine.async_llm import AsyncLLM async_llm: AsyncLLM | None = None # Don't mutate the input client_config client_config = dict(client_config) if client_config else {} client_count = client_config.pop("client_count", 1) client_index = client_config.pop("client_index", 0) try: async_llm = AsyncLLM.from_vllm_config( vllm_config=vllm_config, usage_context=usage_context, enable_log_requests=engine_args.enable_log_requests, aggregate_engine_logging=engine_args.aggregate_engine_logging, disable_log_stats=engine_args.disable_log_stats, client_addresses=client_config, client_count=client_count, client_index=client_index, ) # Don't keep the dummy data in memory assert async_llm is not None await async_llm.reset_mm_cache() yield async_llm finally: if async_llm: async_llm.shutdown() router = APIRouter() def base(request: Request) -> OpenAIServing: # Reuse the existing instance return tokenization(request) def models(request: Request) -> OpenAIServingModels: return request.app.state.openai_serving_models def responses(request: Request) -> OpenAIServingResponses | None: return request.app.state.openai_serving_responses def messages(request: Request) -> AnthropicServingMessages: return request.app.state.anthropic_serving_messages def chat(request: Request) -> OpenAIServingChat | None: return request.app.state.openai_serving_chat def completion(request: Request) -> OpenAIServingCompletion | None: return request.app.state.openai_serving_completion def tokenization(request: Request) -> OpenAIServingTokenization: return request.app.state.openai_serving_tokenization def transcription(request: Request) -> OpenAIServingTranscription: return request.app.state.openai_serving_transcription def translation(request: Request) -> OpenAIServingTranslation: return request.app.state.openai_serving_translation def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client def generate_tokens(request: Request) -> ServingTokens | None: return request.app.state.serving_tokens @router.get("/load") async def get_server_load_metrics(request: Request): # This endpoint returns the current server load metrics. # It tracks requests utilizing the GPU from the following routes: # - /v1/chat/completions # - /v1/completions # - /v1/audio/transcriptions # - /v1/audio/translations # - /v1/embeddings # - /pooling # - /classify # - /score # - /v1/score # - /rerank # - /v1/rerank # - /v2/rerank return JSONResponse(content={"server_load": request.app.state.server_load_metrics}) @router.get("/v1/models") async def show_available_models(raw_request: Request): handler = models(raw_request) models_ = await handler.show_available_models() return JSONResponse(content=models_.model_dump()) @router.get("/version") async def show_version(): ver = {"version": VLLM_VERSION} return JSONResponse(content=ver) async def _convert_stream_to_sse_events( generator: AsyncGenerator[StreamingResponsesResponse, None], ) -> AsyncGenerator[str, None]: """Convert the generator to a stream of events in SSE format""" async for event in generator: event_type = getattr(event, "type", "unknown") # https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format event_data = ( f"event: {event_type}\ndata: {event.model_dump_json(indent=None)}\n\n" ) yield event_data @router.post( "/v1/responses", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation async def create_responses(request: ResponsesRequest, raw_request: Request): handler = responses(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Responses API" ) try: generator = await handler.create_responses(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, ResponsesResponse): return JSONResponse(content=generator.model_dump()) return StreamingResponse( content=_convert_stream_to_sse_events(generator), media_type="text/event-stream" ) @router.get("/v1/responses/{response_id}") async def retrieve_responses( response_id: str, raw_request: Request, starting_after: int | None = None, stream: bool | None = False, ): handler = responses(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Responses API" ) try: response = await handler.retrieve_responses( response_id, starting_after=starting_after, stream=stream, ) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(response, ErrorResponse): return JSONResponse( content=response.model_dump(), status_code=response.error.code ) elif isinstance(response, ResponsesResponse): return JSONResponse(content=response.model_dump()) return StreamingResponse( content=_convert_stream_to_sse_events(response), media_type="text/event-stream" ) @router.post("/v1/responses/{response_id}/cancel") async def cancel_responses(response_id: str, raw_request: Request): handler = responses(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Responses API" ) try: response = await handler.cancel_responses(response_id) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(response, ErrorResponse): return JSONResponse( content=response.model_dump(), status_code=response.error.code ) return JSONResponse(content=response.model_dump()) @router.post( "/v1/messages", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": AnthropicErrorResponse}, HTTPStatus.NOT_FOUND.value: {"model": AnthropicErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": AnthropicErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_messages(request: AnthropicMessagesRequest, raw_request: Request): def translate_error_response(response: ErrorResponse) -> JSONResponse: anthropic_error = AnthropicErrorResponse( error=AnthropicError( type=response.error.type, message=response.error.message, ) ) return JSONResponse( status_code=response.error.code, content=anthropic_error.model_dump() ) handler = messages(raw_request) if handler is None: error = base(raw_request).create_error_response( message="The model does not support Messages API" ) return translate_error_response(error) try: generator = await handler.create_messages(request, raw_request) except Exception as e: logger.exception("Error in create_messages: %s", e) return JSONResponse( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, content=AnthropicErrorResponse( error=AnthropicError( type="internal_error", message=str(e), ) ).model_dump(), ) if isinstance(generator, ErrorResponse): return translate_error_response(generator) elif isinstance(generator, AnthropicMessagesResponse): resp = generator.model_dump(exclude_none=True) logger.debug("Anthropic Messages Response: %s", resp) return JSONResponse(content=resp) return StreamingResponse(content=generator, media_type="text/event-stream") @router.post( "/v1/chat/completions", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request): metrics_header_format = raw_request.headers.get( ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, "" ) handler = chat(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Chat Completions API" ) try: generator = await handler.create_chat_completion(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, ChatCompletionResponse): return JSONResponse( content=generator.model_dump(), headers=metrics_header(metrics_header_format), ) return StreamingResponse(content=generator, media_type="text/event-stream") @router.post( "/v1/completions", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_completion(request: CompletionRequest, raw_request: Request): metrics_header_format = raw_request.headers.get( ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, "" ) handler = completion(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Completions API" ) try: generator = await handler.create_completion(request, raw_request) except OverflowError as e: raise HTTPException( status_code=HTTPStatus.BAD_REQUEST.value, detail=str(e) ) from e except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, CompletionResponse): return JSONResponse( content=generator.model_dump(), headers=metrics_header(metrics_header_format), ) return StreamingResponse(content=generator, media_type="text/event-stream") @router.post( "/v1/audio/transcriptions", responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_transcriptions( raw_request: Request, request: Annotated[TranscriptionRequest, Form()] ): handler = transcription(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Transcriptions API" ) audio_data = await request.file.read() try: generator = await handler.create_transcription(audio_data, request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, TranscriptionResponseVariant): return JSONResponse(content=generator.model_dump()) return StreamingResponse(content=generator, media_type="text/event-stream") @router.post( "/v1/audio/translations", responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_translations( request: Annotated[TranslationRequest, Form()], raw_request: Request ): handler = translation(raw_request) if handler is None: return base(raw_request).create_error_response( message="The model does not support Translations API" ) audio_data = await request.file.read() try: generator = await handler.create_translation(audio_data, request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, TranslationResponseVariant): return JSONResponse(content=generator.model_dump()) return StreamingResponse(content=generator, media_type="text/event-stream") def load_log_config(log_config_file: str | None) -> dict | None: if not log_config_file: return None try: with open(log_config_file) as f: return json.load(f) except Exception as e: logger.warning( "Failed to load log config from file %s: error %s", log_config_file, e ) return None class AuthenticationMiddleware: """ Pure ASGI middleware that authenticates each request by checking if the Authorization Bearer token exists and equals anyof "{api_key}". Notes ----- There are two cases in which authentication is skipped: 1. The HTTP method is OPTIONS. 2. The request path doesn't start with /v1 (e.g. /health). """ def __init__(self, app: ASGIApp, tokens: list[str]) -> None: self.app = app self.api_tokens = [hashlib.sha256(t.encode("utf-8")).digest() for t in tokens] def verify_token(self, headers: Headers) -> bool: authorization_header_value = headers.get("Authorization") if not authorization_header_value: return False scheme, _, param = authorization_header_value.partition(" ") if scheme.lower() != "bearer": return False param_hash = hashlib.sha256(param.encode("utf-8")).digest() token_match = False for token_hash in self.api_tokens: token_match |= secrets.compare_digest(param_hash, token_hash) return token_match def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]: if scope["type"] not in ("http", "websocket") or scope["method"] == "OPTIONS": # scope["type"] can be "lifespan" or "startup" for example, # in which case we don't need to do anything return self.app(scope, receive, send) root_path = scope.get("root_path", "") url_path = URL(scope=scope).path.removeprefix(root_path) headers = Headers(scope=scope) # Type narrow to satisfy mypy. if url_path.startswith("/v1") and not self.verify_token(headers): response = JSONResponse(content={"error": "Unauthorized"}, status_code=401) return response(scope, receive, send) return self.app(scope, receive, send) class XRequestIdMiddleware: """ Middleware the set's the X-Request-Id header for each response to a random uuid4 (hex) value if the header isn't already present in the request, otherwise use the provided request id. """ def __init__(self, app: ASGIApp) -> None: self.app = app def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]: if scope["type"] not in ("http", "websocket"): return self.app(scope, receive, send) # Extract the request headers. request_headers = Headers(scope=scope) async def send_with_request_id(message: Message) -> None: """ Custom send function to mutate the response headers and append X-Request-Id to it. """ if message["type"] == "http.response.start": response_headers = MutableHeaders(raw=message["headers"]) request_id = request_headers.get("X-Request-Id", uuid.uuid4().hex) response_headers.append("X-Request-Id", request_id) await send(message) return self.app(scope, receive, send_with_request_id) def _extract_content_from_chunk(chunk_data: dict) -> str: """Extract content from a streaming response chunk.""" try: from vllm.entrypoints.openai.protocol import ( ChatCompletionStreamResponse, CompletionStreamResponse, ) # Try using Completion types for type-safe parsing if chunk_data.get("object") == "chat.completion.chunk": chat_response = ChatCompletionStreamResponse.model_validate(chunk_data) if chat_response.choices and chat_response.choices[0].delta.content: return chat_response.choices[0].delta.content elif chunk_data.get("object") == "text_completion": completion_response = CompletionStreamResponse.model_validate(chunk_data) if completion_response.choices and completion_response.choices[0].text: return completion_response.choices[0].text except pydantic.ValidationError: # Fallback to manual parsing if "choices" in chunk_data and chunk_data["choices"]: choice = chunk_data["choices"][0] if "delta" in choice and choice["delta"].get("content"): return choice["delta"]["content"] elif choice.get("text"): return choice["text"] return "" class SSEDecoder: """Robust Server-Sent Events decoder for streaming responses.""" def __init__(self): self.buffer = "" self.content_buffer = [] def decode_chunk(self, chunk: bytes) -> list[dict]: """Decode a chunk of SSE data and return parsed events.""" import json try: chunk_str = chunk.decode("utf-8") except UnicodeDecodeError: # Skip malformed chunks return [] self.buffer += chunk_str events = [] # Process complete lines while "\n" in self.buffer: line, self.buffer = self.buffer.split("\n", 1) line = line.rstrip("\r") # Handle CRLF if line.startswith("data: "): data_str = line[6:].strip() if data_str == "[DONE]": events.append({"type": "done"}) elif data_str: try: event_data = json.loads(data_str) events.append({"type": "data", "data": event_data}) except json.JSONDecodeError: # Skip malformed JSON continue return events def extract_content(self, event_data: dict) -> str: """Extract content from event data.""" return _extract_content_from_chunk(event_data) def add_content(self, content: str) -> None: """Add content to the buffer.""" if content: self.content_buffer.append(content) def get_complete_content(self) -> str: """Get the complete buffered content.""" return "".join(self.content_buffer) def _log_streaming_response(response, response_body: list) -> None: """Log streaming response with robust SSE parsing.""" from starlette.concurrency import iterate_in_threadpool sse_decoder = SSEDecoder() chunk_count = 0 def buffered_iterator(): nonlocal chunk_count for chunk in response_body: chunk_count += 1 yield chunk # Parse SSE events from chunk events = sse_decoder.decode_chunk(chunk) for event in events: if event["type"] == "data": content = sse_decoder.extract_content(event["data"]) sse_decoder.add_content(content) elif event["type"] == "done": # Log complete content when done full_content = sse_decoder.get_complete_content() if full_content: # Truncate if too long if len(full_content) > 2048: full_content = full_content[:2048] + "" "...[truncated]" logger.info( "response_body={streaming_complete: content=%r, chunks=%d}", full_content, chunk_count, ) else: logger.info( "response_body={streaming_complete: no_content, chunks=%d}", chunk_count, ) return response.body_iterator = iterate_in_threadpool(buffered_iterator()) logger.info("response_body={streaming_started: chunks=%d}", len(response_body)) def _log_non_streaming_response(response_body: list) -> None: """Log non-streaming response.""" try: decoded_body = response_body[0].decode() logger.info("response_body={%s}", decoded_body) except UnicodeDecodeError: logger.info("response_body={<binary_data>}") def build_app(args: Namespace) -> FastAPI: if args.disable_fastapi_docs: app = FastAPI( openapi_url=None, docs_url=None, redoc_url=None, lifespan=lifespan ) elif args.enable_offline_docs: app = FastAPI(docs_url=None, redoc_url=None, lifespan=lifespan) else: app = FastAPI(lifespan=lifespan) app.state.args = args from vllm.entrypoints.serve import register_vllm_serve_api_routers register_vllm_serve_api_routers(app) from vllm.entrypoints.sagemaker.routes import register_sagemaker_routes register_sagemaker_routes(router) app.include_router(router) app.root_path = args.root_path from vllm.entrypoints.pooling import register_pooling_api_routers register_pooling_api_routers(app) app.add_middleware( CORSMiddleware, allow_origins=args.allowed_origins, allow_credentials=args.allow_credentials, allow_methods=args.allowed_methods, allow_headers=args.allowed_headers, ) @app.exception_handler(HTTPException) async def http_exception_handler(_: Request, exc: HTTPException): err = ErrorResponse( error=ErrorInfo( message=exc.detail, type=HTTPStatus(exc.status_code).phrase, code=exc.status_code, ) ) return JSONResponse(err.model_dump(), status_code=exc.status_code) @app.exception_handler(RequestValidationError) async def validation_exception_handler(_: Request, exc: RequestValidationError): from vllm.entrypoints.openai.protocol import VLLMValidationError param = None for error in exc.errors(): if "ctx" in error and "error" in error["ctx"]: ctx_error = error["ctx"]["error"] if isinstance(ctx_error, VLLMValidationError): param = ctx_error.parameter break exc_str = str(exc) errors_str = str(exc.errors()) if exc.errors() and errors_str and errors_str != exc_str:
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/parser/responses_parser.py
vllm/entrypoints/openai/parser/responses_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import logging from collections.abc import Callable from openai.types.responses import ResponseFunctionToolCall, ResponseOutputItem from openai.types.responses.response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem, ) from openai.types.responses.response_output_item import McpCall from openai.types.responses.response_output_message import ResponseOutputMessage from openai.types.responses.response_output_text import ResponseOutputText from openai.types.responses.response_reasoning_item import ( Content, ResponseReasoningItem, ) from vllm.entrypoints.constants import MCP_PREFIX from vllm.entrypoints.openai.protocol import ResponseInputOutputItem, ResponsesRequest from vllm.outputs import CompletionOutput from vllm.reasoning.abs_reasoning_parsers import ReasoningParser from vllm.tokenizers import TokenizerLike from vllm.tool_parsers.abstract_tool_parser import ToolParser from vllm.utils import random_uuid logger = logging.getLogger(__name__) class ResponsesParser: """Incremental parser over completion tokens with reasoning support.""" def __init__( self, *, tokenizer: TokenizerLike, reasoning_parser_cls: Callable[[TokenizerLike], ReasoningParser], response_messages: list[ResponseInputOutputItem], request: ResponsesRequest, tool_parser_cls: Callable[[TokenizerLike], ToolParser] | None, ): self.response_messages: list[ResponseInputOutputItem] = ( # TODO: initial messages may not be properly typed response_messages ) self.num_init_messages = len(response_messages) self.tokenizer = tokenizer self.request = request self.reasoning_parser_instance = reasoning_parser_cls(tokenizer) self.tool_parser_instance = None if tool_parser_cls is not None: self.tool_parser_instance = tool_parser_cls(tokenizer) def process(self, output: CompletionOutput) -> "ResponsesParser": reasoning_content, content = self.reasoning_parser_instance.extract_reasoning( output.text, request=self.request ) if reasoning_content: self.response_messages.append( ResponseReasoningItem( type="reasoning", id=f"rs_{random_uuid()}", summary=[], content=[ Content( type="reasoning_text", text=reasoning_content, ) ], ) ) function_calls: list[ResponseFunctionToolCall] = [] if self.tool_parser_instance is not None: tool_call_info = self.tool_parser_instance.extract_tool_calls( content if content is not None else "", request=self.request, # type: ignore ) if tool_call_info is not None and tool_call_info.tools_called: # extract_tool_calls() returns a list of tool calls. function_calls.extend( ResponseFunctionToolCall( id=f"fc_{random_uuid()}", call_id=f"call_{random_uuid()}", type="function_call", status="completed", name=tool_call.function.name, arguments=tool_call.function.arguments, ) for tool_call in tool_call_info.tool_calls ) content = tool_call_info.content if content and content.strip() == "": content = None if content: self.response_messages.append( ResponseOutputMessage( type="message", id=f"msg_{random_uuid()}", status="completed", role="assistant", content=[ ResponseOutputText( annotations=[], # TODO type="output_text", text=content, logprobs=None, # TODO ) ], ) ) if len(function_calls) > 0: self.response_messages.extend(function_calls) return self def make_response_output_items_from_parsable_context( self, ) -> list[ResponseOutputItem]: """Given a list of sentences, construct ResponseOutput Items.""" response_messages = self.response_messages[self.num_init_messages :] output_messages: list[ResponseOutputItem] = [] for message in response_messages: if not isinstance(message, ResponseFunctionToolCallOutputItem): output_messages.append(message) else: if len(output_messages) == 0: raise ValueError( "Cannot have a FunctionToolCallOutput before FunctionToolCall." ) if isinstance(output_messages[-1], ResponseFunctionToolCall): mcp_message = McpCall( id=f"{MCP_PREFIX}{random_uuid()}", arguments=output_messages[-1].arguments, name=output_messages[-1].name, server_label=output_messages[ -1 ].name, # TODO: store the server label type="mcp_call", status="completed", output=message.output, # TODO: support error output ) output_messages[-1] = mcp_message return output_messages def get_responses_parser_for_simple_context( *, tokenizer: TokenizerLike, reasoning_parser_cls: Callable[[TokenizerLike], ReasoningParser], response_messages: list[ResponseInputOutputItem], request: ResponsesRequest, tool_parser_cls, ) -> ResponsesParser: """Factory function to create a ResponsesParser with optional reasoning parser. Returns: ResponsesParser instance configured with the provided parser """ return ResponsesParser( tokenizer=tokenizer, reasoning_parser_cls=reasoning_parser_cls, response_messages=response_messages, request=request, tool_parser_cls=tool_parser_cls, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/parser/__init__.py
vllm/entrypoints/openai/parser/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/openai/parser/harmony_utils.py
vllm/entrypoints/openai/parser/harmony_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import datetime import json from collections.abc import Iterable, Sequence from typing import Literal from openai.types.responses import ( ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseOutputText, ResponseReasoningItem, ) from openai.types.responses.response_function_web_search import ( ActionFind, ActionOpenPage, ActionSearch, ResponseFunctionWebSearch, ) from openai.types.responses.response_output_item import McpCall from openai.types.responses.response_reasoning_item import ( Content as ResponseReasoningTextContent, ) from openai.types.responses.tool import Tool from openai_harmony import ( Author, ChannelConfig, Conversation, DeveloperContent, HarmonyEncodingName, Message, ReasoningEffort, Role, StreamableParser, SystemContent, TextContent, ToolDescription, load_harmony_encoding, ) from openai_harmony import Message as OpenAIHarmonyMessage from openai_harmony import Role as OpenAIHarmonyRole from vllm import envs from vllm.entrypoints.openai.protocol import ( ChatCompletionToolsParam, ResponseInputOutputItem, ResponsesRequest, ) from vllm.utils import random_uuid REASONING_EFFORT = { "high": ReasoningEffort.HIGH, "medium": ReasoningEffort.MEDIUM, "low": ReasoningEffort.LOW, } _harmony_encoding = None # Builtin tools that should be included in the system message when # they are available and requested by the user. # Tool args are provided by MCP tool descriptions. Output # of the tools are stringified. MCP_BUILTIN_TOOLS: set[str] = { "web_search_preview", "code_interpreter", "container", } def has_custom_tools(tool_types: set[str]) -> bool: """ Checks if the given tool types are custom tools (i.e. any tool other than MCP buildin tools) """ return not tool_types.issubset(MCP_BUILTIN_TOOLS) def get_encoding(): global _harmony_encoding if _harmony_encoding is None: _harmony_encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS) return _harmony_encoding def get_system_message( model_identity: str | None = None, reasoning_effort: Literal["high", "medium", "low"] | None = None, start_date: str | None = None, browser_description: str | None = None, python_description: str | None = None, container_description: str | None = None, instructions: str | None = None, with_custom_tools: bool = False, ) -> Message: sys_msg_content = SystemContent.new() if model_identity is not None: sys_msg_content = sys_msg_content.with_model_identity(model_identity) if instructions is not None and envs.VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS: current_identity = sys_msg_content.model_identity new_identity = ( f"{current_identity}\n{instructions}" if current_identity else instructions ) sys_msg_content = sys_msg_content.with_model_identity(new_identity) if reasoning_effort is not None: sys_msg_content = sys_msg_content.with_reasoning_effort( REASONING_EFFORT[reasoning_effort] ) if start_date is None: # NOTE(woosuk): This brings non-determinism in vLLM. Be careful. start_date = datetime.datetime.now().strftime("%Y-%m-%d") sys_msg_content = sys_msg_content.with_conversation_start_date(start_date) if browser_description is not None: sys_msg_content = sys_msg_content.with_tools(browser_description) if python_description is not None: sys_msg_content = sys_msg_content.with_tools(python_description) if container_description is not None: sys_msg_content = sys_msg_content.with_tools(container_description) if not with_custom_tools: channel_config = sys_msg_content.channel_config invalid_channel = "commentary" new_config = ChannelConfig.require_channels( [c for c in channel_config.valid_channels if c != invalid_channel] ) sys_msg_content = sys_msg_content.with_channel_config(new_config) sys_msg = Message.from_role_and_content(Role.SYSTEM, sys_msg_content) return sys_msg def create_tool_definition(tool: ChatCompletionToolsParam | Tool): if isinstance(tool, ChatCompletionToolsParam): return ToolDescription.new( name=tool.function.name, description=tool.function.description, parameters=tool.function.parameters, ) return ToolDescription.new( name=tool.name, description=tool.description, parameters=tool.parameters, ) def get_developer_message( instructions: str | None = None, tools: list[Tool | ChatCompletionToolsParam] | None = None, ) -> Message: dev_msg_content = DeveloperContent.new() if instructions is not None and not envs.VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS: dev_msg_content = dev_msg_content.with_instructions(instructions) if tools is not None: function_tools: list[Tool | ChatCompletionToolsParam] = [] for tool in tools: if tool.type in ( "web_search_preview", "code_interpreter", "container", ): pass elif tool.type == "function": function_tools.append(tool) else: raise ValueError(f"tool type {tool.type} not supported") if function_tools: function_tool_descriptions = [ create_tool_definition(tool) for tool in function_tools ] dev_msg_content = dev_msg_content.with_function_tools( function_tool_descriptions ) dev_msg = Message.from_role_and_content(Role.DEVELOPER, dev_msg_content) return dev_msg def get_user_message(content: str) -> Message: return Message.from_role_and_content(Role.USER, content) def parse_response_input( response_msg: ResponseInputOutputItem, prev_responses: list[ResponseOutputItem | ResponseReasoningItem], ) -> Message: if not isinstance(response_msg, dict): response_msg = response_msg.model_dump() if "type" not in response_msg or response_msg["type"] == "message": role = response_msg["role"] content = response_msg["content"] if role == "system": # User is trying to set a system message. Change it to: # <|start|>developer<|message|># Instructions # {instructions}<|end|> role = "developer" text_prefix = "Instructions:\n" else: text_prefix = "" if isinstance(content, str): msg = Message.from_role_and_content(role, text_prefix + content) else: contents = [TextContent(text=text_prefix + c["text"]) for c in content] msg = Message.from_role_and_contents(role, contents) if role == "assistant": msg = msg.with_channel("final") elif response_msg["type"] == "function_call_output": call_id = response_msg["call_id"] call_response: ResponseFunctionToolCall | None = None for prev_response in reversed(prev_responses): if ( isinstance(prev_response, ResponseFunctionToolCall) and prev_response.call_id == call_id ): call_response = prev_response break if call_response is None: raise ValueError(f"No call message found for {call_id}") msg = Message.from_author_and_content( Author.new(Role.TOOL, f"functions.{call_response.name}"), response_msg["output"], ) elif response_msg["type"] == "reasoning": content = response_msg["content"] assert len(content) == 1 msg = Message.from_role_and_content(Role.ASSISTANT, content[0]["text"]) elif response_msg["type"] == "function_call": msg = Message.from_role_and_content(Role.ASSISTANT, response_msg["arguments"]) msg = msg.with_channel("commentary") msg = msg.with_recipient(f"functions.{response_msg['name']}") msg = msg.with_content_type("json") else: raise ValueError(f"Unknown input type: {response_msg['type']}") return msg def parse_chat_inputs_to_harmony_messages(chat_msgs: list) -> list[Message]: """ Parse a list of messages from request.messages in the Chat Completion API to Harmony messages. """ msgs: list[Message] = [] tool_id_names: dict[str, str] = {} # Collect tool id to name mappings for tool response recipient values for chat_msg in chat_msgs: for tool_call in chat_msg.get("tool_calls", []): tool_id_names[tool_call.get("id")] = tool_call.get("function", {}).get( "name" ) for chat_msg in chat_msgs: msgs.extend(parse_chat_input_to_harmony_message(chat_msg, tool_id_names)) msgs = auto_drop_analysis_messages(msgs) return msgs def auto_drop_analysis_messages(msgs: list[Message]) -> list[Message]: """ Harmony models expect the analysis messages (representing raw chain of thought) to be dropped after an assistant message to the final channel is produced from the reasoning of those messages. The openai-harmony library does this if the very last assistant message is to the final channel, but it does not handle the case where we're in longer multi-turn conversations and the client gave us reasoning content from previous turns of the conversation with multiple assistant messages to the final channel in the conversation. So, we find the index of the last assistant message to the final channel and drop all analysis messages that precede it, leaving only the analysis messages that are relevant to the current part of the conversation. """ last_assistant_final_index = -1 for i in range(len(msgs) - 1, -1, -1): msg = msgs[i] if msg.author.role == "assistant" and msg.channel == "final": last_assistant_final_index = i break cleaned_msgs: list[Message] = [] for i, msg in enumerate(msgs): if i < last_assistant_final_index and msg.channel == "analysis": continue cleaned_msgs.append(msg) return cleaned_msgs def flatten_chat_text_content(content: str | list | None) -> str | None: """ Extract the text parts from a chat message content field and flatten them into a single string. """ if isinstance(content, list): return "".join( item.get("text", "") for item in content if isinstance(item, dict) and item.get("type") == "text" ) return content def parse_chat_input_to_harmony_message( chat_msg, tool_id_names: dict[str, str] | None = None ) -> list[Message]: """ Parse a message from request.messages in the Chat Completion API to Harmony messages. """ tool_id_names = tool_id_names or {} if not isinstance(chat_msg, dict): # Handle Pydantic models chat_msg = chat_msg.model_dump(exclude_none=True) role = chat_msg.get("role") msgs: list[Message] = [] # Assistant message with tool calls tool_calls = chat_msg.get("tool_calls", []) if role == "assistant" and tool_calls: content = flatten_chat_text_content(chat_msg.get("content")) if content: commentary_msg = Message.from_role_and_content(Role.ASSISTANT, content) commentary_msg = commentary_msg.with_channel("commentary") msgs.append(commentary_msg) reasoning_content = chat_msg.get("reasoning") or chat_msg.get( "reasoning_content" ) if reasoning_content: analysis_msg = Message.from_role_and_content( Role.ASSISTANT, reasoning_content ) analysis_msg = analysis_msg.with_channel("analysis") msgs.append(analysis_msg) for call in tool_calls: func = call.get("function", {}) name = func.get("name", "") arguments = func.get("arguments", "") or "" msg = Message.from_role_and_content(Role.ASSISTANT, arguments) msg = msg.with_channel("commentary") msg = msg.with_recipient(f"functions.{name}") # Officially, this should be `<|constrain|>json` but there is not clear # evidence that improves accuracy over `json` and some anecdotes to the # contrary. Further testing of the different content_types is needed. msg = msg.with_content_type("json") msgs.append(msg) return msgs # Tool role message (tool output) if role == "tool": tool_call_id = chat_msg.get("tool_call_id", "") name = tool_id_names.get(tool_call_id, "") content = chat_msg.get("content", "") or "" content = flatten_chat_text_content(content) msg = ( Message.from_author_and_content( Author.new(Role.TOOL, f"functions.{name}"), content ) .with_channel("commentary") .with_recipient("assistant") ) return [msg] # Non-tool reasoning content reasoning_content = chat_msg.get("reasoning") or chat_msg.get("reasoning_content") if role == "assistant" and reasoning_content: analysis_msg = Message.from_role_and_content(Role.ASSISTANT, reasoning_content) analysis_msg = analysis_msg.with_channel("analysis") msgs.append(analysis_msg) # Default: user/assistant/system messages with content content = chat_msg.get("content") or "" if content is None: content = "" if isinstance(content, str): contents = [TextContent(text=content)] else: # TODO: Support refusal. contents = [TextContent(text=c.get("text", "")) for c in content] # Only add assistant messages if they have content, as reasoning or tool calling # assistant messages were already added above. if role == "assistant" and contents and contents[0].text: msg = Message.from_role_and_contents(role, contents) # Send non-tool assistant messages to the final channel msg = msg.with_channel("final") msgs.append(msg) # For user/system/developer messages, add them directly even if no content. elif role != "assistant": msg = Message.from_role_and_contents(role, contents) msgs.append(msg) return msgs def parse_input_to_harmony_message(chat_msg) -> list[Message]: """ Parse a message from request.previous_input_messages in the Responsees API to Harmony messages. """ if not isinstance(chat_msg, dict): # Handle Pydantic models chat_msg = chat_msg.model_dump(exclude_none=True) role = chat_msg.get("role") # Assistant message with tool calls tool_calls = chat_msg.get("tool_calls") if role == "assistant" and tool_calls: msgs: list[Message] = [] for call in tool_calls: func = call.get("function", {}) name = func.get("name", "") arguments = func.get("arguments", "") or "" msg = Message.from_role_and_content(Role.ASSISTANT, arguments) msg = msg.with_channel("commentary") msg = msg.with_recipient(f"functions.{name}") msg = msg.with_content_type("json") msgs.append(msg) return msgs # Tool role message (tool output) if role == "tool": name = chat_msg.get("name", "") content = chat_msg.get("content", "") or "" content = flatten_chat_text_content(content) msg = Message.from_author_and_content( Author.new(Role.TOOL, f"functions.{name}"), content ).with_channel("commentary") return [msg] # Default: user/assistant/system messages with content content = chat_msg.get("content", "") if isinstance(content, str): contents = [TextContent(text=content)] else: # TODO: Support refusal. contents = [TextContent(text=c.get("text", "")) for c in content] msg = Message.from_role_and_contents(role, contents) return [msg] def construct_harmony_previous_input_messages( request: ResponsesRequest, ) -> list[OpenAIHarmonyMessage]: messages: list[OpenAIHarmonyMessage] = [] if request.previous_input_messages: for message in request.previous_input_messages: # Handle both OpenAIHarmonyMessage objects and dictionary inputs if isinstance(message, OpenAIHarmonyMessage): message_role = message.author.role # To match OpenAI, instructions, reasoning and tools are # always taken from the most recent Responses API request # not carried over from previous requests if ( message_role == OpenAIHarmonyRole.SYSTEM or message_role == OpenAIHarmonyRole.DEVELOPER ): continue messages.append(message) else: harmony_messages = parse_input_to_harmony_message(message) for harmony_msg in harmony_messages: message_role = harmony_msg.author.role # To match OpenAI, instructions, reasoning and tools are # always taken from the most recent Responses API request # not carried over from previous requests if ( message_role == OpenAIHarmonyRole.SYSTEM or message_role == OpenAIHarmonyRole.DEVELOPER ): continue messages.append(harmony_msg) return messages def render_for_completion(messages: list[Message]) -> list[int]: conversation = Conversation.from_messages(messages) token_ids = get_encoding().render_conversation_for_completion( conversation, Role.ASSISTANT ) return token_ids def _parse_browser_tool_call(message: Message, recipient: str) -> ResponseOutputItem: """Parse browser tool calls (search, open, find) into web search items.""" if len(message.content) != 1: raise ValueError("Invalid number of contents in browser message") content = message.content[0] # Parse JSON args (with retry detection) try: browser_call = json.loads(content.text) except json.JSONDecodeError: json_retry_output_message = ( f"Invalid JSON args, caught and retried: {content.text}" ) browser_call = { "query": json_retry_output_message, "url": json_retry_output_message, "pattern": json_retry_output_message, } # Create appropriate action based on recipient if recipient == "browser.search": action = ActionSearch( query=f"cursor:{browser_call.get('query', '')}", type="search" ) elif recipient == "browser.open": action = ActionOpenPage( url=f"cursor:{browser_call.get('url', '')}", type="open_page" ) elif recipient == "browser.find": action = ActionFind( pattern=browser_call.get("pattern", ""), url=f"cursor:{browser_call.get('url', '')}", type="find", ) else: raise ValueError(f"Unknown browser action: {recipient}") return ResponseFunctionWebSearch( id=f"ws_{random_uuid()}", action=action, status="completed", type="web_search_call", ) def _parse_function_call(message: Message, recipient: str) -> list[ResponseOutputItem]: """Parse function calls into function tool call items.""" function_name = recipient.split(".")[-1] output_items = [] for content in message.content: random_id = random_uuid() response_item = ResponseFunctionToolCall( arguments=content.text, call_id=f"call_{random_id}", type="function_call", name=function_name, id=f"fc_{random_id}", ) output_items.append(response_item) return output_items def _parse_reasoning_content(message: Message) -> list[ResponseOutputItem]: """Parse reasoning/analysis content into reasoning items.""" output_items = [] for content in message.content: reasoning_item = ResponseReasoningItem( id=f"rs_{random_uuid()}", summary=[], type="reasoning", content=[ ResponseReasoningTextContent(text=content.text, type="reasoning_text") ], status=None, ) output_items.append(reasoning_item) return output_items def _parse_final_message(message: Message) -> ResponseOutputItem: """Parse final channel messages into output message items.""" contents = [] for content in message.content: output_text = ResponseOutputText( text=content.text, annotations=[], # TODO type="output_text", logprobs=None, # TODO ) contents.append(output_text) return ResponseOutputMessage( id=f"msg_{random_uuid()}", content=contents, role=message.author.role, status="completed", type="message", ) def _parse_mcp_recipient(recipient: str) -> tuple[str, str]: """ Parse MCP recipient into (server_label, tool_name). For dotted recipients like "repo_browser.list": - server_label: "repo_browser" (namespace/server) - tool_name: "list" (specific tool) For simple recipients like "filesystem": - server_label: "filesystem" - tool_name: "filesystem" """ if "." in recipient: server_label = recipient.split(".")[0] tool_name = recipient.split(".")[-1] else: server_label = recipient tool_name = recipient return server_label, tool_name def _parse_mcp_call(message: Message, recipient: str) -> list[ResponseOutputItem]: """Parse MCP calls into MCP call items.""" server_label, tool_name = _parse_mcp_recipient(recipient) output_items = [] for content in message.content: response_item = McpCall( arguments=content.text, type="mcp_call", name=tool_name, server_label=server_label, id=f"mcp_{random_uuid()}", status="completed", ) output_items.append(response_item) return output_items def parse_output_message(message: Message) -> list[ResponseOutputItem]: """ Parse a Harmony message into a list of output response items. """ if message.author.role != "assistant": # This is a message from a tool to the assistant (e.g., search result). # Don't include it in the final output for now. This aligns with # OpenAI's behavior on models like o4-mini. return [] output_items: list[ResponseOutputItem] = [] recipient = message.recipient if recipient is not None: # Browser tool calls if recipient.startswith("browser."): output_items.append(_parse_browser_tool_call(message, recipient)) # Function calls (should only happen on commentary channel) elif message.channel == "commentary" and recipient.startswith("functions."): output_items.extend(_parse_function_call(message, recipient)) # Built-in tools are treated as reasoning elif recipient.startswith(("python", "browser", "container")): # Built-in tool recipients (python/browser/container) # generate reasoning output output_items.extend(_parse_reasoning_content(message)) # All other recipients are MCP calls else: output_items.extend(_parse_mcp_call(message, recipient)) # No recipient - handle based on channel for non-tool messages elif message.channel == "analysis": output_items.extend(_parse_reasoning_content(message)) elif message.channel == "commentary": # Per Harmony format, commentary channel can contain preambles to calling # multiple functions - explanatory text with no recipient output_items.extend(_parse_reasoning_content(message)) elif message.channel == "final": output_items.append(_parse_final_message(message)) else: raise ValueError(f"Unknown channel: {message.channel}") return output_items def parse_remaining_state(parser: StreamableParser) -> list[ResponseOutputItem]: if not parser.current_content: return [] if parser.current_role != Role.ASSISTANT: return [] current_recipient = parser.current_recipient if current_recipient is not None and current_recipient.startswith("browser."): return [] if current_recipient and parser.current_channel in ("commentary", "analysis"): if current_recipient.startswith("functions."): rid = random_uuid() return [ ResponseFunctionToolCall( arguments=parser.current_content, call_id=f"call_{rid}", type="function_call", name=current_recipient.split(".")[-1], id=f"fc_{rid}", status="in_progress", ) ] # Built-in tools (python, browser, container) should be treated as reasoning elif not ( current_recipient.startswith("python") or current_recipient.startswith("browser") or current_recipient.startswith("container") ): # All other recipients are MCP calls rid = random_uuid() server_label, tool_name = _parse_mcp_recipient(current_recipient) return [ McpCall( arguments=parser.current_content, type="mcp_call", name=tool_name, server_label=server_label, id=f"mcp_{rid}", status="in_progress", ) ] if parser.current_channel == "commentary": return [ ResponseReasoningItem( id=f"rs_{random_uuid()}", summary=[], type="reasoning", content=[ ResponseReasoningTextContent( text=parser.current_content, type="reasoning_text" ) ], status=None, ) ] if parser.current_channel == "analysis": return [ ResponseReasoningItem( id=f"rs_{random_uuid()}", summary=[], type="reasoning", content=[ ResponseReasoningTextContent( text=parser.current_content, type="reasoning_text" ) ], status=None, ) ] if parser.current_channel == "final": output_text = ResponseOutputText( text=parser.current_content, annotations=[], # TODO type="output_text", logprobs=None, # TODO ) text_item = ResponseOutputMessage( id=f"msg_{random_uuid()}", content=[output_text], role="assistant", # if the parser still has messages (ie if the generator got cut # abruptly), this should be incomplete status="incomplete", type="message", ) return [text_item] return [] def get_stop_tokens_for_assistant_actions() -> list[int]: return get_encoding().stop_tokens_for_assistant_actions() def get_streamable_parser_for_assistant() -> StreamableParser: return StreamableParser(get_encoding(), role=Role.ASSISTANT) def parse_output_into_messages(token_ids: Iterable[int]) -> StreamableParser: parser = get_streamable_parser_for_assistant() for token_id in token_ids: parser.process(token_id) return parser def parse_chat_output( token_ids: Sequence[int], ) -> tuple[str | None, str | None, bool]: """ Parse the output of a Harmony chat completion into reasoning and final content. Note that when the `openai` tool parser is used, serving_chat only uses this for the reasoning content and gets the final content from the tool call parser. When the `openai` tool parser is not enabled, or when `GptOssReasoningParser` is in use,this needs to return the final content without any tool calls parsed. Empty reasoning or final content is returned as None instead of an empty string. """ parser = parse_output_into_messages(token_ids) output_msgs = parser.messages is_tool_call = False # TODO: update this when tool call is supported # Get completed messages from the parser reasoning_texts = [ msg.content[0].text for msg in output_msgs if msg.channel == "analysis" ] final_texts = [ msg.content[0].text for msg in output_msgs if msg.channel != "analysis" ] # Extract partial messages from the parser if parser.current_channel == "analysis" and parser.current_content: reasoning_texts.append(parser.current_content) elif parser.current_channel != "analysis" and parser.current_content: final_texts.append(parser.current_content) # Flatten multiple messages into a single string reasoning: str | None = "\n".join(reasoning_texts) final_content: str | None = "\n".join(final_texts) # Return None instead of empty string since existing callers check for None reasoning = reasoning or None final_content = final_content or None return reasoning, final_content, is_tool_call
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/sagemaker/__init__.py
vllm/entrypoints/sagemaker/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """SageMaker-specific integration for vLLM."""
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/sagemaker/routes.py
vllm/entrypoints/sagemaker/routes.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import Awaitable, Callable from http import HTTPStatus from typing import Any import model_hosting_container_standards.sagemaker as sagemaker_standards import pydantic from fastapi import APIRouter, Depends, HTTPException, Request from fastapi.responses import JSONResponse, Response from vllm.entrypoints.openai.api_server import ( base, chat, completion, create_chat_completion, create_completion, validate_json_request, ) from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, CompletionRequest, ErrorResponse, ) from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.pooling.classify.api_router import classify, create_classify from vllm.entrypoints.pooling.classify.protocol import ClassificationRequest from vllm.entrypoints.pooling.embed.api_router import create_embedding, embedding from vllm.entrypoints.pooling.embed.protocol import EmbeddingRequest from vllm.entrypoints.pooling.pooling.api_router import create_pooling, pooling from vllm.entrypoints.pooling.pooling.protocol import PoolingRequest from vllm.entrypoints.pooling.score.api_router import ( create_score, do_rerank, rerank, score, ) from vllm.entrypoints.pooling.score.protocol import RerankRequest, ScoreRequest from vllm.entrypoints.serve.instrumentator.health import health # TODO: RequestType = TypeForm[BaseModel] when recognized by type checkers # (requires typing_extensions >= 4.13) RequestType = Any GetHandlerFn = Callable[[Request], OpenAIServing | None] EndpointFn = Callable[[RequestType, Request], Awaitable[Any]] # NOTE: Items defined earlier take higher priority INVOCATION_TYPES: list[tuple[RequestType, tuple[GetHandlerFn, EndpointFn]]] = [ (ChatCompletionRequest, (chat, create_chat_completion)), (CompletionRequest, (completion, create_completion)), (EmbeddingRequest, (embedding, create_embedding)), (ClassificationRequest, (classify, create_classify)), (ScoreRequest, (score, create_score)), (RerankRequest, (rerank, do_rerank)), (PoolingRequest, (pooling, create_pooling)), ] # NOTE: Construct the TypeAdapters only once INVOCATION_VALIDATORS = [ (pydantic.TypeAdapter(request_type), (get_handler, endpoint)) for request_type, (get_handler, endpoint) in INVOCATION_TYPES ] def register_sagemaker_routes(router: APIRouter): @router.post("/ping", response_class=Response) @router.get("/ping", response_class=Response) @sagemaker_standards.register_ping_handler async def ping(raw_request: Request) -> Response: """Ping check. Endpoint required for SageMaker""" return await health(raw_request) @router.post( "/invocations", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @sagemaker_standards.register_invocation_handler @sagemaker_standards.stateful_session_manager() @sagemaker_standards.inject_adapter_id(adapter_path="model") async def invocations(raw_request: Request): """For SageMaker, routes requests based on the request type.""" try: body = await raw_request.json() except json.JSONDecodeError as e: raise HTTPException( status_code=HTTPStatus.BAD_REQUEST.value, detail=f"JSON decode error: {e}", ) from e valid_endpoints = [ (validator, endpoint) for validator, (get_handler, endpoint) in INVOCATION_VALIDATORS if get_handler(raw_request) is not None ] for request_validator, endpoint in valid_endpoints: try: request = request_validator.validate_python(body) except pydantic.ValidationError: continue return await endpoint(request, raw_request) type_names = [ t.__name__ if isinstance(t := validator._type, type) else str(t) for validator, _ in valid_endpoints ] msg = f"Cannot find suitable handler for request. Expected one of: {type_names}" res = base(raw_request).create_error_response(message=msg) return JSONResponse(content=res.model_dump(), status_code=res.error.code) return router
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/__init__.py
vllm/entrypoints/pooling/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from fastapi import FastAPI def register_pooling_api_routers(app: FastAPI): from vllm.entrypoints.pooling.classify.api_router import router as classify_router from vllm.entrypoints.pooling.embed.api_router import router as embed_router from vllm.entrypoints.pooling.pooling.api_router import router as pooling_router from vllm.entrypoints.pooling.score.api_router import router as score_router app.include_router(classify_router) app.include_router(embed_router) app.include_router(score_router) app.include_router(pooling_router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/score/serving.py
vllm/entrypoints/pooling/score/serving.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import time from collections.abc import AsyncGenerator, Mapping from typing import Any from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ErrorResponse, UsageInfo, ) from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.score.protocol import ( RerankDocument, RerankRequest, RerankResponse, RerankResult, RerankUsage, ScoreRequest, ScoreResponse, ScoreResponseData, ) from vllm.entrypoints.score_utils import ( ScoreContentPartParam, ScoreMultiModalParam, _cosine_similarity, _validate_score_input_lens, compress_token_type_ids, get_score_prompt, ) from vllm.entrypoints.utils import _validate_truncation_size from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.outputs import PoolingRequestOutput, ScoringRequestOutput from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.async_utils import make_async, merge_async_iterators logger = init_logger(__name__) class ServingScores(OpenAIServing): def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, score_template: str | None = None, log_error_stack: bool = False, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, log_error_stack=log_error_stack, ) self.score_template = score_template async def _embedding_score( self, tokenizer: TokenizerLike, texts_1: list[str], texts_2: list[str], request: RerankRequest | ScoreRequest, request_id: str, tokenization_kwargs: dict[str, Any] | None = None, lora_request: LoRARequest | None | None = None, trace_headers: Mapping[str, str] | None = None, ) -> list[PoolingRequestOutput] | ErrorResponse: input_texts = texts_1 + texts_2 engine_prompts: list[TokensPrompt] = [] tokenize_async = make_async( tokenizer.__call__, executor=self._tokenizer_executor ) tokenization_kwargs = tokenization_kwargs or {} tokenized_prompts = await asyncio.gather( *(tokenize_async(t, **tokenization_kwargs) for t in input_texts) ) for tok_result, input_text in zip(tokenized_prompts, input_texts): text_token_prompt = self._validate_input( request, tok_result["input_ids"], input_text ) engine_prompts.append( TokensPrompt(prompt_token_ids=text_token_prompt["prompt_token_ids"]) ) # Schedule the request and get the result generator. generators: list[AsyncGenerator[PoolingRequestOutput, None]] = [] pooling_params = request.to_pooling_params() try: pooling_params.verify("embed", self.model_config) except ValueError as e: return self.create_error_response(str(e)) for i, engine_prompt in enumerate(engine_prompts): request_id_item = f"{request_id}-{i}" self._log_inputs( request_id_item, input_texts[i], params=pooling_params, lora_request=lora_request, ) generators.append( self.engine_client.encode( engine_prompt, pooling_params, request_id_item, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, ) ) result_generator = merge_async_iterators(*generators) # Non-streaming response final_res_batch: list[PoolingRequestOutput] = [] embeddings: list[PoolingRequestOutput | None] = [None] * len(engine_prompts) async for i, res in result_generator: embeddings[i] = res emb_texts_1: list[PoolingRequestOutput] = [] emb_texts_2: list[PoolingRequestOutput] = [] for i in range(0, len(texts_1)): assert (emb := embeddings[i]) is not None emb_texts_1.append(emb) for i in range(len(texts_1), len(embeddings)): assert (emb := embeddings[i]) is not None emb_texts_2.append(emb) if len(emb_texts_1) == 1: emb_texts_1 = emb_texts_1 * len(emb_texts_2) final_res_batch = _cosine_similarity( tokenizer=tokenizer, embed_1=emb_texts_1, embed_2=emb_texts_2 ) return final_res_batch def _preprocess_score( self, request: RerankRequest | ScoreRequest, tokenizer: TokenizerLike, tokenization_kwargs: dict[str, Any], data_1: str | ScoreContentPartParam, data_2: str | ScoreContentPartParam, ) -> tuple[str, TokensPrompt]: model_config = self.model_config full_prompt, engine_prompt = get_score_prompt( model_config=model_config, data_1=data_1, data_2=data_2, tokenizer=tokenizer, tokenization_kwargs=tokenization_kwargs, score_template=self.score_template, ) self._validate_input(request, engine_prompt["prompt_token_ids"], full_prompt) if request.mm_processor_kwargs is not None: engine_prompt["mm_processor_kwargs"] = request.mm_processor_kwargs return full_prompt, engine_prompt async def _cross_encoding_score( self, tokenizer: TokenizerLike, data_1: list[str] | list[ScoreContentPartParam], data_2: list[str] | list[ScoreContentPartParam], request: RerankRequest | ScoreRequest, request_id: str, tokenization_kwargs: dict[str, Any] | None = None, lora_request: LoRARequest | None | None = None, trace_headers: Mapping[str, str] | None = None, ) -> list[PoolingRequestOutput] | ErrorResponse: request_prompts: list[str] = [] engine_prompts: list[TokensPrompt] = [] if len(data_1) == 1: data_1 = data_1 * len(data_2) if isinstance(tokenizer, MistralTokenizer): raise ValueError("MistralTokenizer not supported for cross-encoding") tokenization_kwargs = tokenization_kwargs or {} input_pairs = [(t1, t2) for t1, t2 in zip(data_1, data_2)] preprocess_async = make_async( self._preprocess_score, executor=self._tokenizer_executor ) preprocessed_prompts = await asyncio.gather( *( preprocess_async( request=request, tokenizer=tokenizer, tokenization_kwargs=tokenization_kwargs, data_1=t1, data_2=t2, ) for t1, t2 in input_pairs ) ) for full_prompt, engine_prompt in preprocessed_prompts: request_prompts.append(full_prompt) engine_prompts.append(engine_prompt) # Schedule the request and get the result generator. generators: list[AsyncGenerator[PoolingRequestOutput, None]] = [] default_pooling_params = request.to_pooling_params() try: default_pooling_params.verify("score", self.model_config) except ValueError as e: return self.create_error_response(str(e)) for i, engine_prompt in enumerate(engine_prompts): request_id_item = f"{request_id}-{i}" self._log_inputs( request_id_item, request_prompts[i], params=default_pooling_params, lora_request=lora_request, ) if token_type_ids := engine_prompt.pop("token_type_ids", None): pooling_params = default_pooling_params.clone() compressed = compress_token_type_ids(token_type_ids) pooling_params.extra_kwargs = {"compressed_token_type_ids": compressed} else: pooling_params = default_pooling_params generator = self.engine_client.encode( engine_prompt, pooling_params, request_id_item, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, ) generators.append(generator) result_generator = merge_async_iterators(*generators) # Non-streaming response final_res_batch: list[PoolingRequestOutput | None] = [None] * len( engine_prompts ) async for i, res in result_generator: final_res_batch[i] = res return [out for out in final_res_batch if out is not None] async def _run_scoring( self, data_1: list[str] | str | ScoreMultiModalParam, data_2: list[str] | str | ScoreMultiModalParam, request: ScoreRequest | RerankRequest, request_id: str, raw_request: Request | None = None, ) -> list[PoolingRequestOutput] | ErrorResponse: lora_request = self._maybe_get_adapters(request) tokenizer = await self.engine_client.get_tokenizer() truncate_prompt_tokens = getattr(request, "truncate_prompt_tokens", None) tokenization_kwargs: dict[str, Any] = {} _validate_truncation_size( self.max_model_len, truncate_prompt_tokens, tokenization_kwargs ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) if not self.model_config.is_multimodal_model and ( isinstance(data_1, dict) or isinstance(data_2, dict) ): raise ValueError( f"MultiModalParam is not supported for {self.model_config.architecture}" # noqa: E501 ) if isinstance(data_1, str): data_1 = [data_1] elif isinstance(data_1, dict): data_1 = data_1.get("content") # type: ignore[assignment] if isinstance(data_2, str): data_2 = [data_2] elif isinstance(data_2, dict): data_2 = data_2.get("content") # type: ignore[assignment] _validate_score_input_lens(data_1, data_2) # type: ignore[arg-type] if self.model_config.is_cross_encoder: return await self._cross_encoding_score( tokenizer=tokenizer, data_1=data_1, # type: ignore[arg-type] data_2=data_2, # type: ignore[arg-type] request=request, request_id=request_id, tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, trace_headers=trace_headers, ) else: return await self._embedding_score( tokenizer=tokenizer, texts_1=data_1, # type: ignore[arg-type] texts_2=data_2, # type: ignore[arg-type] request=request, request_id=request_id, tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, trace_headers=trace_headers, ) async def create_score( self, request: ScoreRequest, raw_request: Request | None = None, ) -> ScoreResponse | ErrorResponse: """ Score API similar to Sentence Transformers cross encoder See https://sbert.net/docs/package_reference/cross_encoder """ error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret request_id = f"score-{self._base_request_id(raw_request)}" created_time = int(time.time()) try: final_res_batch = await self._run_scoring( request.text_1, request.text_2, request, request_id, raw_request, ) if isinstance(final_res_batch, ErrorResponse): return final_res_batch return self.request_output_to_score_response( final_res_batch, request_id, created_time, self.models.model_name(), ) except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) async def do_rerank( self, request: RerankRequest, raw_request: Request | None = None ) -> RerankResponse | ErrorResponse: """ Rerank API based on JinaAI's rerank API; implements the same API interface. Designed for compatibility with off-the-shelf tooling, since this is a common standard for reranking APIs See example client implementations at https://github.com/infiniflow/ragflow/blob/main/rag/llm/rerank_model.py numerous clients use this standard. """ error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret request_id = f"rerank-{self._base_request_id(raw_request)}" documents = request.documents top_n = ( request.top_n if request.top_n > 0 else ( len(documents) if isinstance(documents, list) else len(documents["content"]) ) ) try: final_res_batch = await self._run_scoring( request.query, documents, request, request_id, raw_request, ) if isinstance(final_res_batch, ErrorResponse): return final_res_batch return self.request_output_to_rerank_response( final_res_batch, request_id, self.models.model_name(), documents, top_n, ) except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) def request_output_to_score_response( self, final_res_batch: list[PoolingRequestOutput], request_id: str, created_time: int, model_name: str, ) -> ScoreResponse: items: list[ScoreResponseData] = [] num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch): classify_res = ScoringRequestOutput.from_base(final_res) item = ScoreResponseData( index=idx, score=classify_res.outputs.score, ) prompt_token_ids = final_res.prompt_token_ids items.append(item) num_prompt_tokens += len(prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, total_tokens=num_prompt_tokens, ) return ScoreResponse( id=request_id, created=created_time, model=model_name, data=items, usage=usage, ) def request_output_to_rerank_response( self, final_res_batch: list[PoolingRequestOutput], request_id: str, model_name: str, documents: list[str] | ScoreMultiModalParam, top_n: int, ) -> RerankResponse: """ Convert the output of do_rank to a RerankResponse """ results: list[RerankResult] = [] num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch): classify_res = ScoringRequestOutput.from_base(final_res) result = RerankResult( index=idx, document=RerankDocument(text=documents[idx]) if isinstance(documents, list) else RerankDocument(multi_modal=documents["content"][idx]), relevance_score=classify_res.outputs.score, ) results.append(result) prompt_token_ids = final_res.prompt_token_ids num_prompt_tokens += len(prompt_token_ids) # sort by relevance, then return the top n if set results.sort(key=lambda x: x.relevance_score, reverse=True) if top_n < len(documents): results = results[:top_n] return RerankResponse( id=request_id, model=model_name, results=results, usage=RerankUsage( total_tokens=num_prompt_tokens, prompt_tokens=num_prompt_tokens ), )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/score/__init__.py
vllm/entrypoints/pooling/score/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/score/api_router.py
vllm/entrypoints/pooling/score/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from http import HTTPStatus from fastapi import APIRouter, Depends, HTTPException, Request from fastapi.responses import JSONResponse from typing_extensions import assert_never from vllm.entrypoints.openai.protocol import ErrorResponse from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.score.protocol import ( RerankRequest, RerankResponse, ScoreRequest, ScoreResponse, ) from vllm.entrypoints.pooling.score.serving import ServingScores from vllm.entrypoints.utils import load_aware_call, with_cancellation from vllm.logger import init_logger router = APIRouter() logger = init_logger(__name__) def score(request: Request) -> ServingScores | None: return request.app.state.openai_serving_scores def rerank(request: Request) -> ServingScores | None: return request.app.state.openai_serving_scores @router.post( "/score", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_score(request: ScoreRequest, raw_request: Request): handler = score(raw_request) if handler is None: base_server = raw_request.app.state.openai_serving_tokenization return base_server.create_error_response( message="The model does not support Score API" ) try: generator = await handler.create_score(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, ScoreResponse): return JSONResponse(content=generator.model_dump()) assert_never(generator) @router.post( "/v1/score", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_score_v1(request: ScoreRequest, raw_request: Request): logger.warning( "To indicate that Score API is not part of standard OpenAI API, we " "have moved it to `/score`. Please update your client accordingly." ) return await create_score(request, raw_request) @router.post( "/rerank", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def do_rerank(request: RerankRequest, raw_request: Request): handler = rerank(raw_request) if handler is None: base_server = raw_request.app.state.openai_serving_tokenization return base_server.create_error_response( message="The model does not support Rerank (Score) API" ) try: generator = await handler.do_rerank(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, RerankResponse): return JSONResponse(content=generator.model_dump()) assert_never(generator) @router.post( "/v1/rerank", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation async def do_rerank_v1(request: RerankRequest, raw_request: Request): logger.warning_once( "To indicate that the rerank API is not part of the standard OpenAI" " API, we have located it at `/rerank`. Please update your client " "accordingly. (Note: Conforms to JinaAI rerank API)" ) return await do_rerank(request, raw_request) @router.post( "/v2/rerank", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation async def do_rerank_v2(request: RerankRequest, raw_request: Request): return await do_rerank(request, raw_request)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/score/protocol.py
vllm/entrypoints/pooling/score/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from typing import Annotated, Any from pydantic import ( BaseModel, Field, ) from vllm import PoolingParams from vllm.config.pooler import get_use_activation from vllm.entrypoints.openai.protocol import OpenAIBaseModel, UsageInfo from vllm.entrypoints.score_utils import ScoreContentPartParam, ScoreMultiModalParam from vllm.utils import random_uuid class ScoreRequest(OpenAIBaseModel): model: str | None = None text_1: list[str] | str | ScoreMultiModalParam text_2: list[str] | str | ScoreMultiModalParam truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None # --8<-- [start:score-extra-params] mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "Default is True.", ) # --8<-- [end:score-extra-params] def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, use_activation=get_use_activation(self), ) class RerankRequest(OpenAIBaseModel): model: str | None = None query: str | ScoreMultiModalParam documents: list[str] | ScoreMultiModalParam top_n: int = Field(default_factory=lambda: 0) truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None # --8<-- [start:rerank-extra-params] mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "Default is True.", ) # --8<-- [end:rerank-extra-params] def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, use_activation=get_use_activation(self), ) class RerankDocument(BaseModel): text: str | None = None multi_modal: ScoreContentPartParam | None = None class RerankResult(BaseModel): index: int document: RerankDocument relevance_score: float class RerankUsage(BaseModel): prompt_tokens: int total_tokens: int class RerankResponse(OpenAIBaseModel): id: str model: str usage: RerankUsage results: list[RerankResult] class ScoreResponseData(OpenAIBaseModel): index: int object: str = "score" score: float class ScoreResponse(OpenAIBaseModel): id: str = Field(default_factory=lambda: f"embd-{random_uuid()}") object: str = "list" created: int = Field(default_factory=lambda: int(time.time())) model: str data: list[ScoreResponseData] usage: UsageInfo
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/pooling/serving.py
vllm/entrypoints/pooling/pooling/serving.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json import time from collections.abc import AsyncGenerator, Sequence from typing import Final, cast import jinja2 from fastapi import Request from typing_extensions import assert_never from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ErrorResponse, UsageInfo, ) from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.pooling.protocol import ( IOProcessorRequest, IOProcessorResponse, PoolingBytesResponse, PoolingChatRequest, PoolingCompletionRequest, PoolingRequest, PoolingResponse, PoolingResponseData, ) from vllm.entrypoints.renderer import RenderConfig from vllm.entrypoints.utils import _validate_truncation_size from vllm.logger import init_logger from vllm.outputs import PoolingRequestOutput from vllm.tasks import PoolingTask, SupportedTask from vllm.utils.async_utils import merge_async_iterators from vllm.utils.serial_utils import ( EmbedDType, EncodingFormat, Endianness, encode_pooling_bytes, encode_pooling_output, ) logger = init_logger(__name__) class OpenAIServingPooling(OpenAIServing): def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, supported_tasks: tuple[SupportedTask, ...], request_logger: RequestLogger | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, trust_request_chat_template: bool = False, log_error_stack: bool = False, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, log_error_stack=log_error_stack, ) self.supported_tasks = supported_tasks self.chat_template = chat_template self.chat_template_content_format: Final = chat_template_content_format self.trust_request_chat_template = trust_request_chat_template async def create_pooling( self, request: PoolingRequest, raw_request: Request | None = None, ) -> PoolingResponse | IOProcessorResponse | PoolingBytesResponse | ErrorResponse: """ See https://platform.openai.com/docs/api-reference/embeddings/create for the API specification. This API mimics the OpenAI Embedding API. """ error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret model_name = self.models.model_name() request_id = f"pool-{self._base_request_id(raw_request)}" created_time = int(time.time()) is_io_processor_request = isinstance(request, IOProcessorRequest) try: lora_request = self._maybe_get_adapters(request) if self.model_config.skip_tokenizer_init: tokenizer = None else: tokenizer = await self.engine_client.get_tokenizer() renderer = self._get_renderer(tokenizer) if getattr(request, "dimensions", None) is not None: return self.create_error_response( "dimensions is currently not supported" ) truncate_prompt_tokens = getattr(request, "truncate_prompt_tokens", None) truncate_prompt_tokens = _validate_truncation_size( self.max_model_len, truncate_prompt_tokens ) if is_io_processor_request: if self.io_processor is None: raise ValueError( "No IOProcessor plugin installed. Please refer " "to the documentation and to the " "'prithvi_geospatial_mae_io_processor' " "offline inference example for more details." ) validated_prompt = self.io_processor.parse_request(request) engine_prompts = await self.io_processor.pre_process_async( prompt=validated_prompt, request_id=request_id ) if not isinstance(engine_prompts, Sequence) or isinstance( engine_prompts, (str, bytes, bytearray) ): engine_prompts = [engine_prompts] elif isinstance(request, PoolingChatRequest): error_check_ret = self._validate_chat_template( request_chat_template=request.chat_template, chat_template_kwargs=request.chat_template_kwargs, trust_request_chat_template=self.trust_request_chat_template, ) if error_check_ret is not None: return error_check_ret _, engine_prompts = await self._preprocess_chat( request, tokenizer, request.messages, chat_template=request.chat_template or self.chat_template, chat_template_content_format=self.chat_template_content_format, # In pooling requests, we are not generating tokens, # so there is no need to append extra tokens to the input add_generation_prompt=False, continue_final_message=False, add_special_tokens=request.add_special_tokens, ) elif isinstance(request, PoolingCompletionRequest): engine_prompts = await renderer.render_prompt( prompt_or_prompts=request.input, config=self._build_render_config(request), ) else: raise ValueError(f"Unsupported request of type {type(request)}") except (ValueError, TypeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) # Schedule the request and get the result generator. generators: list[AsyncGenerator[PoolingRequestOutput, None]] = [] try: if is_io_processor_request: assert self.io_processor is not None and isinstance( request, IOProcessorRequest ) pooling_params = self.io_processor.validate_or_generate_params() else: pooling_params = request.to_pooling_params() pooling_task: PoolingTask if request.task is None: if "token_embed" in self.supported_tasks: pooling_task = "token_embed" elif "token_classify" in self.supported_tasks: pooling_task = "token_classify" elif "plugin" in self.supported_tasks: pooling_task = "plugin" else: return self.create_error_response( f"pooling_task must be one of {self.supported_tasks}." ) else: pooling_task = request.task if pooling_task not in self.supported_tasks: return self.create_error_response( f"Task {pooling_task} is not supported, it" f" must be one of {self.supported_tasks}." ) try: pooling_params.verify(pooling_task, self.model_config) except ValueError as e: return self.create_error_response(str(e)) for i, engine_prompt in enumerate(engine_prompts): request_id_item = f"{request_id}-{i}" self._log_inputs( request_id_item, engine_prompt, params=pooling_params, lora_request=lora_request, ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) generator = self.engine_client.encode( engine_prompt, pooling_params, request_id_item, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, ) generators.append(generator) except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) result_generator = merge_async_iterators(*generators) if is_io_processor_request: assert self.io_processor is not None output = await self.io_processor.post_process_async( model_output=result_generator, request_id=request_id, ) return self.io_processor.output_to_response(output) assert isinstance(request, (PoolingCompletionRequest, PoolingChatRequest)) num_prompts = len(engine_prompts) # Non-streaming response final_res_batch: list[PoolingRequestOutput | None] final_res_batch = [None] * num_prompts try: async for i, res in result_generator: final_res_batch[i] = res assert all(final_res is not None for final_res in final_res_batch) final_res_batch_checked = cast(list[PoolingRequestOutput], final_res_batch) response = self.request_output_to_pooling_response( final_res_batch_checked, request_id, created_time, model_name, request.encoding_format, request.embed_dtype, request.endianness, ) except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) return response def request_output_to_pooling_response( self, final_res_batch: list[PoolingRequestOutput], request_id: str, created_time: int, model_name: str, encoding_format: EncodingFormat, embed_dtype: EmbedDType, endianness: Endianness, ) -> PoolingResponse | PoolingBytesResponse: def encode_float_base64(): items: list[PoolingResponseData] = [] num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch): item = PoolingResponseData( index=idx, data=encode_pooling_output( final_res, encoding_format=encoding_format, embed_dtype=embed_dtype, endianness=endianness, ), ) prompt_token_ids = final_res.prompt_token_ids items.append(item) num_prompt_tokens += len(prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, total_tokens=num_prompt_tokens, ) return PoolingResponse( id=request_id, created=created_time, model=model_name, data=items, usage=usage, ) def encode_bytes(bytes_only: bool) -> PoolingBytesResponse: content, items, usage = encode_pooling_bytes( pooling_outputs=final_res_batch, embed_dtype=embed_dtype, endianness=endianness, ) headers = ( None if bytes_only else { "metadata": json.dumps( { "id": request_id, "created": created_time, "model": model_name, "data": items, "usage": usage, } ) } ) return PoolingBytesResponse( content=content, headers=headers, ) if encoding_format == "float" or encoding_format == "base64": return encode_float_base64() elif encoding_format == "bytes" or encoding_format == "bytes_only": return encode_bytes(bytes_only=encoding_format == "bytes_only") else: assert_never(encoding_format) def _build_render_config(self, request: PoolingCompletionRequest) -> RenderConfig: return RenderConfig( max_length=self.max_model_len, truncate_prompt_tokens=request.truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/pooling/__init__.py
vllm/entrypoints/pooling/pooling/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/pooling/api_router.py
vllm/entrypoints/pooling/pooling/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from http import HTTPStatus from fastapi import APIRouter, Depends, HTTPException, Request from fastapi.responses import JSONResponse, StreamingResponse from typing_extensions import assert_never from vllm.entrypoints.openai.protocol import ErrorResponse from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.pooling.protocol import ( IOProcessorResponse, PoolingBytesResponse, PoolingRequest, PoolingResponse, ) from vllm.entrypoints.pooling.pooling.serving import OpenAIServingPooling from vllm.entrypoints.utils import load_aware_call, with_cancellation router = APIRouter() def pooling(request: Request) -> OpenAIServingPooling | None: return request.app.state.openai_serving_pooling @router.post( "/pooling", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_pooling(request: PoolingRequest, raw_request: Request): handler = pooling(raw_request) if handler is None: base_server = raw_request.app.state.openai_serving_tokenization return base_server.create_error_response( message="The model does not support Pooling API" ) try: generator = await handler.create_pooling(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, (PoolingResponse, IOProcessorResponse)): return JSONResponse(content=generator.model_dump()) elif isinstance(generator, PoolingBytesResponse): return StreamingResponse( content=generator.content, headers=generator.headers, media_type=generator.media_type, ) assert_never(generator)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/pooling/protocol.py
vllm/entrypoints/pooling/pooling/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from typing import Generic, TypeAlias, TypeVar from pydantic import ( Field, ) from vllm import PoolingParams from vllm.config.pooler import get_use_activation from vllm.entrypoints.openai.protocol import OpenAIBaseModel, UsageInfo from vllm.entrypoints.pooling.embed.protocol import ( EmbeddingChatRequest, EmbeddingCompletionRequest, ) from vllm.tasks import PoolingTask from vllm.utils import random_uuid from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness class PoolingCompletionRequest(EmbeddingCompletionRequest): task: PoolingTask | None = None softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "If it is a classify or token_classify task, the default is True; " "for other tasks, this value should be None.", ) def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, dimensions=self.dimensions, normalize=self.normalize, use_activation=get_use_activation(self), ) class PoolingChatRequest(EmbeddingChatRequest): task: PoolingTask | None = None softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "If it is a classify or token_classify task, the default is True; " "for other tasks, this value should be None.", ) def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, dimensions=self.dimensions, normalize=self.normalize, use_activation=get_use_activation(self), ) T = TypeVar("T") class IOProcessorRequest(OpenAIBaseModel, Generic[T]): model: str | None = None priority: int = Field(default=0) """ The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling. """ data: T task: PoolingTask = "plugin" encoding_format: EncodingFormat = "float" embed_dtype: EmbedDType = Field( default="float32", description=( "What dtype to use for encoding. Default to using float32 for base64 " "encoding to match the OpenAI python client behavior. " "This parameter will affect base64 and binary_response." ), ) endianness: Endianness = Field( default="native", description=( "What endianness to use for encoding. Default to using native for " "base64 encoding to match the OpenAI python client behavior." "This parameter will affect base64 and binary_response." ), ) def to_pooling_params(self): return PoolingParams() class IOProcessorResponse(OpenAIBaseModel, Generic[T]): request_id: str | None = None """ The request_id associated with this response """ created_at: int = Field(default_factory=lambda: int(time.time())) data: T """ When using plugins IOProcessor plugins, the actual output is generated by the plugin itself. Hence, we use a generic type for the response data """ PoolingRequest: TypeAlias = ( PoolingCompletionRequest | PoolingChatRequest | IOProcessorRequest ) class PoolingResponseData(OpenAIBaseModel): index: int object: str = "pooling" data: list[list[float]] | list[float] | str class PoolingResponse(OpenAIBaseModel): id: str = Field(default_factory=lambda: f"pool-{random_uuid()}") object: str = "list" created: int = Field(default_factory=lambda: int(time.time())) model: str data: list[PoolingResponseData] usage: UsageInfo class PoolingBytesResponse(OpenAIBaseModel): content: list[bytes] headers: dict[str, str] | None = None media_type: str = "application/octet-stream"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/embed/serving.py
vllm/entrypoints/pooling/embed/serving.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import AsyncGenerator, Mapping from typing import Any, Final, cast import torch from fastapi import Request from fastapi.responses import Response from typing_extensions import assert_never, override from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ErrorResponse, UsageInfo, ) from vllm.entrypoints.openai.serving_engine import ( EmbeddingServeContext, OpenAIServing, ServeContext, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.embed.protocol import ( EmbeddingBytesResponse, EmbeddingChatRequest, EmbeddingCompletionRequest, EmbeddingRequest, EmbeddingResponse, EmbeddingResponseData, ) from vllm.entrypoints.renderer import RenderConfig from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.outputs import ( EmbeddingRequestOutput, PoolingOutput, PoolingRequestOutput, RequestOutput, ) from vllm.pooling_params import PoolingParams from vllm.utils.async_utils import merge_async_iterators from vllm.utils.collection_utils import chunk_list from vllm.utils.serial_utils import ( EmbedDType, EncodingFormat, Endianness, encode_pooling_bytes, encode_pooling_output, ) logger = init_logger(__name__) class EmbeddingMixin(OpenAIServing): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) pooler_config = self.model_config.pooler_config # Avoid repeated attribute lookups self.supports_chunked_processing = bool( pooler_config and pooler_config.enable_chunked_processing ) self.max_embed_len = ( pooler_config.max_embed_len if pooler_config and pooler_config.max_embed_len else None ) @override async def _preprocess( self, ctx: ServeContext, ) -> ErrorResponse | None: ctx = cast(EmbeddingServeContext, ctx) try: ctx.lora_request = self._maybe_get_adapters(ctx.request) tokenizer = await self.engine_client.get_tokenizer() renderer = self._get_renderer(tokenizer) if isinstance(ctx.request, EmbeddingChatRequest): _, ctx.engine_prompts = await self._preprocess_chat( ctx.request, tokenizer, ctx.request.messages, chat_template=ctx.request.chat_template or ctx.chat_template, chat_template_content_format=ctx.chat_template_content_format, add_generation_prompt=ctx.request.add_generation_prompt, continue_final_message=ctx.request.continue_final_message, add_special_tokens=ctx.request.add_special_tokens, ) else: ctx.engine_prompts = await renderer.render_prompt( prompt_or_prompts=ctx.request.input, config=self._build_render_config(ctx.request), ) return None except (ValueError, TypeError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) def _build_render_config(self, request: EmbeddingCompletionRequest) -> RenderConfig: # Set max_length based on chunked processing capability if self._should_use_chunked_processing(request): max_length = None else: max_length = self.max_embed_len or self.max_model_len return RenderConfig( max_length=max_length, truncate_prompt_tokens=request.truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) @override def _build_response( self, ctx: ServeContext, ) -> EmbeddingResponse | Response | ErrorResponse: final_res_batch_checked = cast(list[PoolingRequestOutput], ctx.final_res_batch) encoding_format: EncodingFormat = ctx.request.encoding_format embed_dtype: EmbedDType = ctx.request.embed_dtype endianness: Endianness = ctx.request.endianness def encode_float_base64(): items: list[EmbeddingResponseData] = [] num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch_checked): item = EmbeddingResponseData( index=idx, embedding=encode_pooling_output( final_res, encoding_format=encoding_format, embed_dtype=embed_dtype, endianness=endianness, ), ) prompt_token_ids = final_res.prompt_token_ids items.append(item) num_prompt_tokens += len(prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, total_tokens=num_prompt_tokens, ) return EmbeddingResponse( id=ctx.request_id, created=ctx.created_time, model=ctx.model_name, data=items, usage=usage, ) def encode_bytes(bytes_only: bool) -> EmbeddingBytesResponse: content, items, usage = encode_pooling_bytes( pooling_outputs=final_res_batch_checked, embed_dtype=embed_dtype, endianness=endianness, ) headers = ( None if bytes_only else { "metadata": json.dumps( { "id": ctx.request_id, "created": ctx.created_time, "model": ctx.model_name, "data": items, "usage": usage, } ) } ) return EmbeddingBytesResponse(content=content, headers=headers) if encoding_format == "float" or encoding_format == "base64": return encode_float_base64() elif encoding_format == "bytes" or encoding_format == "bytes_only": return encode_bytes(bytes_only=encoding_format == "bytes_only") else: assert_never(encoding_format) def _get_max_position_embeddings(self) -> int: """Get the model's effective maximum sequence length for chunking.""" return self.model_config.max_model_len def _should_use_chunked_processing(self, request) -> bool: """Check if chunked processing should be used for this request.""" return ( isinstance(request, (EmbeddingCompletionRequest, EmbeddingChatRequest)) and self.supports_chunked_processing ) async def _process_chunked_request( self, ctx: EmbeddingServeContext, token_ids: list[int], pooling_params, trace_headers, prompt_idx: int, ) -> list[AsyncGenerator[PoolingRequestOutput, None]]: """Process a single prompt using chunked processing.""" generators: list[AsyncGenerator[PoolingRequestOutput, None]] = [] # Split into chunks using max_position_embeddings max_pos_embeddings = self._get_max_position_embeddings() # Process all chunks for MEAN aggregation for chunk_idx, chunk_tokens in enumerate( chunk_list(token_ids, max_pos_embeddings) ): # Create a request ID for this chunk chunk_request_id = f"{ctx.request_id}-prompt-{prompt_idx}-chunk-{chunk_idx}" # Create engine prompt for this chunk chunk_engine_prompt = TokensPrompt(prompt_token_ids=chunk_tokens) # Log the chunk self._log_inputs( chunk_request_id, chunk_engine_prompt, params=pooling_params, lora_request=ctx.lora_request, ) # Create generator for this chunk and wrap it to return indices original_generator = self.engine_client.encode( chunk_engine_prompt, pooling_params, chunk_request_id, lora_request=ctx.lora_request, trace_headers=trace_headers, priority=getattr(ctx.request, "priority", 0), ) generators.append(original_generator) return generators def _validate_input( self, request, input_ids: list[int], input_text: str, ) -> TokensPrompt: """Override to support chunked processing for embedding requests.""" token_num = len(input_ids) # Note: EmbeddingRequest doesn't have max_tokens if isinstance(request, (EmbeddingCompletionRequest, EmbeddingChatRequest)): # Check if chunked processing is enabled for pooling models enable_chunked = self._should_use_chunked_processing(request) # Use max_position_embeddings for chunked processing decisions max_pos_embeddings = self._get_max_position_embeddings() # Determine the effective max length for validation if self.max_embed_len is not None: # Use max_embed_len for validation instead of max_model_len length_type = "maximum embedding input length" max_length_value = self.max_embed_len else: # Fall back to max_model_len validation (original behavior) length_type = "maximum context length" max_length_value = self.max_model_len validation_error_msg = ( "This model's {length_type} is {max_length_value} tokens. " "However, you requested {token_num} tokens in the input for " "embedding generation. Please reduce the length of the input." ) chunked_processing_error_msg = ( "This model's {length_type} is {max_length_value} tokens. " "However, you requested {token_num} tokens in the input for " "embedding generation. Please reduce the length of the input " "or enable chunked processing." ) # Check if input exceeds max length if token_num > max_length_value: raise ValueError( validation_error_msg.format( length_type=length_type, max_length_value=max_length_value, token_num=token_num, ) ) # Check for chunked processing # when exceeding max_position_embeddings if token_num > max_pos_embeddings: if enable_chunked: # Allow long inputs when chunked processing is enabled logger.info( "Input length %s exceeds max_position_embeddings " "%s, will use chunked processing", token_num, max_pos_embeddings, ) else: raise ValueError( chunked_processing_error_msg.format( length_type="maximum position embeddings length", max_length_value=max_pos_embeddings, token_num=token_num, ) ) return TokensPrompt(prompt=input_text, prompt_token_ids=input_ids) # For other request types, use the parent's implementation return super()._validate_input(request, input_ids, input_text) async def _create_single_prompt_generator( self, ctx: EmbeddingServeContext, engine_prompt: TokensPrompt, pooling_params: PoolingParams, trace_headers: Mapping[str, str] | None, prompt_index: int, ) -> AsyncGenerator[RequestOutput | PoolingRequestOutput, None]: """Create a generator for a single prompt using standard processing.""" request_id_item = f"{ctx.request_id}-{prompt_index}" self._log_inputs( request_id_item, engine_prompt, params=pooling_params, lora_request=ctx.lora_request, ) # Return the original generator without wrapping return self.engine_client.encode( engine_prompt, pooling_params, request_id_item, lora_request=ctx.lora_request, trace_headers=trace_headers, priority=getattr(ctx.request, "priority", 0), ) @override async def _prepare_generators( self, ctx: ServeContext, ) -> ErrorResponse | None: """Override to support chunked processing.""" ctx = cast(EmbeddingServeContext, ctx) # Check if we should use chunked processing use_chunked = self._should_use_chunked_processing(ctx.request) # If no chunked processing needed, delegate to parent class if not use_chunked: return await super()._prepare_generators(ctx) # Custom logic for chunked processing generators: list[ AsyncGenerator[RequestOutput | PoolingRequestOutput, None] ] = [] try: trace_headers = ( None if ctx.raw_request is None else await self._get_trace_headers(ctx.raw_request.headers) ) pooling_params = self._create_pooling_params(ctx) if isinstance(pooling_params, ErrorResponse): return pooling_params # Verify and set the task for pooling params try: pooling_params.verify("embed", self.model_config) except ValueError as e: return self.create_error_response(str(e)) if ctx.engine_prompts is None: return self.create_error_response("Engine prompts not available") max_pos_embeddings = self._get_max_position_embeddings() for i, engine_prompt in enumerate(ctx.engine_prompts): # Check if this specific prompt needs chunked processing if "prompt_token_ids" in engine_prompt: prompt_token_ids = engine_prompt["prompt_token_ids"] if len(prompt_token_ids) > max_pos_embeddings: # Use chunked processing for this prompt chunk_generators = await self._process_chunked_request( ctx, prompt_token_ids, pooling_params, trace_headers, i, ) generators.extend(chunk_generators) continue # Normal processing for short prompts or non-token prompts generator = await self._create_single_prompt_generator( ctx, engine_prompt, pooling_params, trace_headers, i ) generators.append(generator) ctx.result_generator = merge_async_iterators(*generators) return None except Exception as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) @override async def _collect_batch( self, ctx: ServeContext, ) -> ErrorResponse | None: """Collect and aggregate batch results with support for chunked processing. For chunked requests, performs online aggregation to minimize memory usage. For regular requests, collects results normally. """ ctx = cast(EmbeddingServeContext, ctx) try: if ctx.engine_prompts is None: return self.create_error_response("Engine prompts not available") # Check if we used chunked processing use_chunked = self._should_use_chunked_processing(ctx.request) if not use_chunked: return await super()._collect_batch(ctx=ctx) if ctx.result_generator is None: return self.create_error_response("Result generator not available") # Online aggregation for chunked requests to # minimize memory usage # Track aggregation state for each prompt prompt_aggregators: dict[int, dict[str, Any]] = {} short_prompts_results: dict[int, PoolingRequestOutput] = {} async for result_idx, result in ctx.result_generator: if "-chunk-" in result.request_id: # Extract prompt_idx from chunked request_id parts = result.request_id.split("-") try: prompt_idx = int(parts[parts.index("prompt") + 1]) except (ValueError, IndexError): # Fallback: extract from result_idx if parsing fails prompt_idx = result_idx # Initialize aggregator for this prompt if needed if prompt_idx not in prompt_aggregators: prompt_aggregators[prompt_idx] = { "weighted_sum": None, "total_weight": 0, "chunk_count": 0, "request_id": result.request_id.split("-chunk-")[0], } aggregator = prompt_aggregators[prompt_idx] # MEAN pooling with online weighted averaging # Ensure result is PoolingRequestOutput # for embedding processing if not isinstance(result, PoolingRequestOutput): return self.create_error_response( f"Expected PoolingRequestOutput for " f"chunked embedding, got " f"{type(result).__name__}" ) # Handle both PoolingOutput and # EmbeddingOutput types if hasattr(result.outputs, "data"): # PoolingOutput case embedding_data = result.outputs.data elif hasattr(result.outputs, "embedding"): # EmbeddingOutput case - # convert embedding list to tensor embedding_data = result.outputs.embedding else: return self.create_error_response( f"Unsupported output type: {type(result.outputs).__name__}" ) if not isinstance(embedding_data, torch.Tensor): embedding_data = torch.tensor( embedding_data, dtype=torch.float32 ) if result.prompt_token_ids is None: return self.create_error_response( "prompt_token_ids cannot be None for chunked processing" ) weight = len(result.prompt_token_ids) weighted_embedding = embedding_data.to(dtype=torch.float32) * weight if aggregator["weighted_sum"] is None: # First chunk aggregator["weighted_sum"] = weighted_embedding else: # Accumulate aggregator["weighted_sum"] += weighted_embedding aggregator["total_weight"] += weight aggregator["chunk_count"] += 1 else: # Non-chunked result - extract prompt_idx from request_id parts = result.request_id.split("-") try: # Last part should be prompt index prompt_idx = int(parts[-1]) except (ValueError, IndexError): prompt_idx = result_idx # Fallback to result_idx short_prompts_results[prompt_idx] = cast( PoolingRequestOutput, result ) # Finalize aggregated results final_res_batch: list[PoolingRequestOutput | EmbeddingRequestOutput] = [] num_prompts = len(ctx.engine_prompts) for prompt_idx in range(num_prompts): if prompt_idx in prompt_aggregators: # Finalize MEAN aggregation for this chunked prompt aggregator = prompt_aggregators[prompt_idx] weighted_sum = aggregator["weighted_sum"] total_weight = aggregator["total_weight"] if ( weighted_sum is not None and isinstance(weighted_sum, torch.Tensor) and isinstance(total_weight, (int, float)) and total_weight > 0 ): # Compute final mean embedding final_embedding = weighted_sum / total_weight # Create a PoolingRequestOutput # for the aggregated result pooling_output_data = PoolingOutput(data=final_embedding) # Get original prompt token IDs for this prompt original_prompt = ctx.engine_prompts[prompt_idx] if "prompt_token_ids" not in original_prompt: return self.create_error_response( f"Chunked prompt {prompt_idx} does not contain " "token IDs" ) original_token_ids = original_prompt["prompt_token_ids"] pooling_request_output = PoolingRequestOutput( request_id=aggregator["request_id"], prompt_token_ids=original_token_ids, outputs=pooling_output_data, num_cached_tokens=0, finished=True, ) final_res_batch.append(pooling_request_output) else: return self.create_error_response( f"Failed to aggregate chunks for prompt {prompt_idx}" ) elif prompt_idx in short_prompts_results: final_res_batch.append( cast(PoolingRequestOutput, short_prompts_results[prompt_idx]) ) else: return self.create_error_response( f"Result not found for prompt {prompt_idx}" ) ctx.final_res_batch = cast( list[RequestOutput | PoolingRequestOutput], final_res_batch ) return None except Exception as e: return self.create_error_response(str(e)) class OpenAIServingEmbedding(EmbeddingMixin): request_id_prefix = "embd" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, trust_request_chat_template: bool = False, log_error_stack: bool = False, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, log_error_stack=log_error_stack, ) self.chat_template = chat_template self.chat_template_content_format: Final = chat_template_content_format self.trust_request_chat_template = trust_request_chat_template async def create_embedding( self, request: EmbeddingRequest, raw_request: Request | None = None, ) -> EmbeddingResponse | ErrorResponse: """ Embedding API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/embeddings/create for the API specification. This API mimics the OpenAI Embedding API. """ model_name = self.models.model_name() request_id = ( f"{self.request_id_prefix}-" f"{self._base_request_id(raw_request, request.request_id)}" ) ctx = EmbeddingServeContext( request=request, raw_request=raw_request, model_name=model_name, request_id=request_id, chat_template=self.chat_template, chat_template_content_format=self.chat_template_content_format, ) return await super().handle(ctx) # type: ignore @override def _create_pooling_params( self, ctx: ServeContext[EmbeddingRequest], ) -> PoolingParams | ErrorResponse: pooling_params = super()._create_pooling_params(ctx) if isinstance(pooling_params, ErrorResponse): return pooling_params try: pooling_params.verify("embed", self.model_config) except ValueError as e: return self.create_error_response(str(e)) return pooling_params async def _preprocess( self, ctx: ServeContext, ) -> ErrorResponse | None: if isinstance(ctx.request, EmbeddingChatRequest): error_check_ret = self._validate_chat_template( request_chat_template=ctx.request.chat_template, chat_template_kwargs=ctx.request.chat_template_kwargs, trust_request_chat_template=self.trust_request_chat_template, ) if error_check_ret is not None: return error_check_ret return await super()._preprocess(ctx)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/embed/conftest.py
vllm/entrypoints/pooling/embed/conftest.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Pytest configuration for vLLM pooling embed tests.""" import warnings import torch from vllm.platforms import current_platform def pytest_collection_modifyitems(config, items): """Configure ROCm-specific settings based on collected tests.""" if not current_platform.is_rocm(): return # Disable Flash/MemEfficient SDP on ROCm to avoid HF Transformers # accuracy issues: https://github.com/vllm-project/vllm/issues/30167 # TODO: Remove once ROCm SDP accuracy issues are resolved on HuggingFace torch.backends.cuda.enable_flash_sdp(False) torch.backends.cuda.enable_mem_efficient_sdp(False) torch.backends.cuda.enable_math_sdp(True) warnings.warn( "ROCm: Disabled flash_sdp and mem_efficient_sdp, enabled math_sdp " "to avoid HuggingFace Transformers accuracy issues", UserWarning, stacklevel=1, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/embed/__init__.py
vllm/entrypoints/pooling/embed/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/embed/api_router.py
vllm/entrypoints/pooling/embed/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from http import HTTPStatus from fastapi import APIRouter, Depends, HTTPException, Request from fastapi.responses import JSONResponse, StreamingResponse from typing_extensions import assert_never from vllm.entrypoints.openai.protocol import ErrorResponse from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.embed.protocol import ( EmbeddingBytesResponse, EmbeddingRequest, EmbeddingResponse, ) from vllm.entrypoints.pooling.embed.serving import OpenAIServingEmbedding from vllm.entrypoints.utils import load_aware_call, with_cancellation router = APIRouter() def embedding(request: Request) -> OpenAIServingEmbedding | None: return request.app.state.openai_serving_embedding @router.post( "/v1/embeddings", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def create_embedding( request: EmbeddingRequest, raw_request: Request, ): handler = embedding(raw_request) if handler is None: base_server = raw_request.app.state.openai_serving_tokenization return base_server.create_error_response( message="The model does not support Embeddings API" ) try: generator = await handler.create_embedding(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, EmbeddingResponse): return JSONResponse(content=generator.model_dump()) elif isinstance(generator, EmbeddingBytesResponse): return StreamingResponse( content=generator.content, headers=generator.headers, media_type=generator.media_type, ) assert_never(generator)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/embed/protocol.py
vllm/entrypoints/pooling/embed/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from typing import Annotated, Any, TypeAlias from pydantic import ( Field, model_validator, ) from vllm import PoolingParams from vllm.entrypoints.chat_utils import ChatCompletionMessageParam from vllm.entrypoints.openai.protocol import OpenAIBaseModel, UsageInfo from vllm.utils import random_uuid from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness class EmbeddingCompletionRequest(OpenAIBaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/embeddings model: str | None = None input: list[int] | list[list[int]] | str | list[str] encoding_format: EncodingFormat = "float" dimensions: int | None = None user: str | None = None truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None # --8<-- [start:embedding-extra-params] add_special_tokens: bool = Field( default=True, description=( "If true (the default), special tokens (e.g. BOS) will be added to " "the prompt." ), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) request_id: str = Field( default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) normalize: bool | None = Field( default=None, description="Whether to normalize the embeddings outputs. Default is True.", ) embed_dtype: EmbedDType = Field( default="float32", description=( "What dtype to use for encoding. Default to using float32 for base64 " "encoding to match the OpenAI python client behavior. " "This parameter will affect base64 and binary_response." ), ) endianness: Endianness = Field( default="native", description=( "What endianness to use for encoding. Default to using native for " "base64 encoding to match the OpenAI python client behavior." "This parameter will affect base64 and binary_response." ), ) # --8<-- [end:embedding-extra-params] def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, dimensions=self.dimensions, normalize=self.normalize, ) class EmbeddingChatRequest(OpenAIBaseModel): model: str | None = None messages: list[ChatCompletionMessageParam] encoding_format: EncodingFormat = "float" dimensions: int | None = None user: str | None = None truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None # --8<-- [start:chat-embedding-extra-params] add_generation_prompt: bool = Field( default=False, description=( "If true, the generation prompt will be added to the chat template. " "This is a parameter used by chat template in tokenizer config of the " "model." ), ) continue_final_message: bool = Field( default=False, description=( "If this is set, the chat will be formatted so that the final " "message in the chat is open-ended, without any EOS tokens. The " "model will continue this message rather than starting a new one. " 'This allows you to "prefill" part of the model\'s response for it. ' "Cannot be used at the same time as `add_generation_prompt`." ), ) add_special_tokens: bool = Field( default=False, description=( "If true, special tokens (e.g. BOS) will be added to the prompt " "on top of what is added by the chat template. " "For most models, the chat template takes care of adding the " "special tokens so this should be set to false (as is the " "default)." ), ) chat_template: str | None = Field( default=None, description=( "A Jinja template to use for this conversion. " "As of transformers v4.44, default chat template is no longer " "allowed, so you must provide a chat template if the tokenizer " "does not define one." ), ) chat_template_kwargs: dict[str, Any] | None = Field( default=None, description=( "Additional keyword args to pass to the template renderer. " "Will be accessible by the chat template." ), ) mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) request_id: str = Field( default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) normalize: bool | None = Field( default=None, description="Whether to normalize the embeddings outputs. Default is True.", ) embed_dtype: EmbedDType = Field( default="float32", description=( "What dtype to use for encoding. Default to using float32 for base64 " "encoding to match the OpenAI python client behavior. " "This parameter will affect base64 and binary_response." ), ) endianness: Endianness = Field( default="native", description=( "What endianness to use for encoding. Default to using native for " "base64 encoding to match the OpenAI python client behavior." "This parameter will affect base64 and binary_response." ), ) # --8<-- [end:chat-embedding-extra-params] @model_validator(mode="before") @classmethod def check_generation_prompt(cls, data): if data.get("continue_final_message") and data.get("add_generation_prompt"): raise ValueError( "Cannot set both `continue_final_message` and " "`add_generation_prompt` to True." ) return data def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, dimensions=self.dimensions, normalize=self.normalize, ) EmbeddingRequest: TypeAlias = EmbeddingCompletionRequest | EmbeddingChatRequest class EmbeddingResponseData(OpenAIBaseModel): index: int object: str = "embedding" embedding: list[float] | str class EmbeddingResponse(OpenAIBaseModel): id: str = Field(default_factory=lambda: f"embd-{random_uuid()}") object: str = "list" created: int = Field(default_factory=lambda: int(time.time())) model: str data: list[EmbeddingResponseData] usage: UsageInfo class EmbeddingBytesResponse(OpenAIBaseModel): content: list[bytes] headers: dict[str, str] | None = None media_type: str = "application/octet-stream"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/classify/serving.py
vllm/entrypoints/pooling/classify/serving.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from http import HTTPStatus from typing import cast import jinja2 import numpy as np from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ErrorResponse, UsageInfo, ) from vllm.entrypoints.openai.serving_engine import ( ClassificationServeContext, OpenAIServing, ServeContext, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.classify.protocol import ( ClassificationChatRequest, ClassificationCompletionRequest, ClassificationData, ClassificationRequest, ClassificationResponse, ) from vllm.entrypoints.renderer import RenderConfig from vllm.logger import init_logger from vllm.outputs import ClassificationOutput, PoolingRequestOutput from vllm.pooling_params import PoolingParams logger = init_logger(__name__) class ClassificationMixin(OpenAIServing): chat_template: str | None chat_template_content_format: ChatTemplateContentFormatOption trust_request_chat_template: bool async def _preprocess( self, ctx: ServeContext, ) -> ErrorResponse | None: """ Process classification inputs: tokenize text, resolve adapters, and prepare model-specific inputs. """ ctx = cast(ClassificationServeContext, ctx) try: ctx.tokenizer = await self.engine_client.get_tokenizer() request_obj = ctx.request if isinstance(request_obj, ClassificationChatRequest): chat_request = request_obj messages = chat_request.messages trust_request_chat_template = getattr( self, "trust_request_chat_template", False, ) ret = self._validate_chat_template( request_chat_template=chat_request.chat_template, chat_template_kwargs=chat_request.chat_template_kwargs, trust_request_chat_template=trust_request_chat_template, ) if ret: return ret _, engine_prompts = await self._preprocess_chat( cast(ChatCompletionRequest, chat_request), ctx.tokenizer, messages, chat_template=( chat_request.chat_template or getattr(self, "chat_template", None) ), chat_template_content_format=cast( ChatTemplateContentFormatOption, getattr(self, "chat_template_content_format", "auto"), ), add_generation_prompt=False, continue_final_message=False, add_special_tokens=chat_request.add_special_tokens, ) ctx.engine_prompts = engine_prompts elif isinstance(request_obj, ClassificationCompletionRequest): completion_request = request_obj input_data = completion_request.input if input_data in (None, ""): return self.create_error_response( "Input or messages must be provided", status_code=HTTPStatus.BAD_REQUEST, ) if isinstance(input_data, list) and not input_data: ctx.engine_prompts = [] return None renderer = self._get_renderer(ctx.tokenizer) prompt_input = cast(str | list[str], input_data) ctx.engine_prompts = await renderer.render_prompt( prompt_or_prompts=prompt_input, config=self._build_render_config(completion_request), ) else: return self.create_error_response( "Invalid classification request type", status_code=HTTPStatus.BAD_REQUEST, ) return None except (ValueError, TypeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) def _build_response( self, ctx: ServeContext, ) -> ClassificationResponse | ErrorResponse: """ Convert model outputs to a formatted classification response with probabilities and labels. """ ctx = cast(ClassificationServeContext, ctx) items: list[ClassificationData] = [] num_prompt_tokens = 0 final_res_batch_checked = cast(list[PoolingRequestOutput], ctx.final_res_batch) for idx, final_res in enumerate(final_res_batch_checked): classify_res = ClassificationOutput.from_base(final_res.outputs) probs = classify_res.probs predicted_index = int(np.argmax(probs)) label = getattr(self.model_config.hf_config, "id2label", {}).get( predicted_index ) item = ClassificationData( index=idx, label=label, probs=probs, num_classes=len(probs), ) items.append(item) prompt_token_ids = final_res.prompt_token_ids num_prompt_tokens += len(prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, total_tokens=num_prompt_tokens, ) return ClassificationResponse( id=ctx.request_id, created=ctx.created_time, model=ctx.model_name, data=items, usage=usage, ) def _build_render_config(self, request: ClassificationRequest) -> RenderConfig: return RenderConfig( max_length=self.max_model_len, truncate_prompt_tokens=request.truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) class ServingClassification(ClassificationMixin): request_id_prefix = "classify" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, chat_template: str | None = None, chat_template_content_format: ChatTemplateContentFormatOption = "auto", trust_request_chat_template: bool = False, log_error_stack: bool = False, ) -> None: super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, log_error_stack=log_error_stack, ) self.chat_template = chat_template self.chat_template_content_format = chat_template_content_format self.trust_request_chat_template = trust_request_chat_template async def create_classify( self, request: ClassificationRequest, raw_request: Request, ) -> ClassificationResponse | ErrorResponse: model_name = self.models.model_name() request_id = f"{self.request_id_prefix}-{self._base_request_id(raw_request)}" ctx = ClassificationServeContext( request=request, raw_request=raw_request, model_name=model_name, request_id=request_id, ) return await super().handle(ctx) # type: ignore def _create_pooling_params( self, ctx: ServeContext[ClassificationRequest], ) -> PoolingParams | ErrorResponse: pooling_params = super()._create_pooling_params(ctx) if isinstance(pooling_params, ErrorResponse): return pooling_params try: pooling_params.verify("classify", self.model_config) except ValueError as e: return self.create_error_response(str(e)) return pooling_params
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/classify/__init__.py
vllm/entrypoints/pooling/classify/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/classify/api_router.py
vllm/entrypoints/pooling/classify/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from http import HTTPStatus from fastapi import APIRouter, Depends, HTTPException, Request from starlette.responses import JSONResponse from typing_extensions import assert_never from vllm.entrypoints.openai.protocol import ErrorResponse from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.classify.protocol import ( ClassificationRequest, ClassificationResponse, ) from vllm.entrypoints.pooling.classify.serving import ServingClassification from vllm.entrypoints.utils import load_aware_call, with_cancellation router = APIRouter() def classify(request: Request) -> ServingClassification | None: return request.app.state.openai_serving_classification @router.post("/classify", dependencies=[Depends(validate_json_request)]) @with_cancellation @load_aware_call async def create_classify(request: ClassificationRequest, raw_request: Request): handler = classify(raw_request) if handler is None: base_server = raw_request.app.state.openai_serving_tokenization return base_server.create_error_response( message="The model does not support Classification API" ) try: generator = await handler.create_classify(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, ClassificationResponse): return JSONResponse(content=generator.model_dump()) assert_never(generator)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/pooling/classify/protocol.py
vllm/entrypoints/pooling/classify/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from typing import Annotated, Any, TypeAlias from pydantic import ( Field, ) from vllm import PoolingParams from vllm.config.pooler import get_use_activation from vllm.entrypoints.chat_utils import ChatCompletionMessageParam from vllm.entrypoints.openai.protocol import OpenAIBaseModel, UsageInfo from vllm.utils import random_uuid class ClassificationCompletionRequest(OpenAIBaseModel): model: str | None = None input: list[str] | str truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None user: str | None = None # --8<-- [start:classification-extra-params] priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) add_special_tokens: bool = Field( default=True, description=( "If true (the default), special tokens (e.g. BOS) will be added to " "the prompt." ), ) request_id: str = Field( default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "Default is True.", ) # --8<-- [end:classification-extra-params] def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, use_activation=get_use_activation(self), ) class ClassificationChatRequest(OpenAIBaseModel): model: str | None = None messages: list[ChatCompletionMessageParam] truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None user: str | None = None # --8<-- [start:chat-classification-extra-params] add_generation_prompt: bool = Field( default=False, description=( "If true, the generation prompt will be added to the chat template. " "This is a parameter used by chat template in tokenizer config of the " "model." ), ) add_special_tokens: bool = Field( default=False, description=( "If true, special tokens (e.g. BOS) will be added to the prompt " "on top of what is added by the chat template. " "For most models, the chat template takes care of adding the " "special tokens so this should be set to false (as is the " "default)." ), ) chat_template: str | None = Field( default=None, description=( "A Jinja template to use for this conversion. " "As of transformers v4.44, default chat template is no longer " "allowed, so you must provide a chat template if the tokenizer " "does not define one." ), ) chat_template_kwargs: dict[str, Any] | None = Field( default=None, description=( "Additional keyword args to pass to the template renderer. " "Will be accessible by the chat template." ), ) mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) priority: int = Field( default=0, description=( "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling." ), ) request_id: str = Field( default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), ) softmax: bool | None = Field( default=None, description="softmax will be deprecated, please use use_activation instead.", ) activation: bool | None = Field( default=None, description="activation will be deprecated, please use use_activation instead.", ) use_activation: bool | None = Field( default=None, description="Whether to use activation for classification outputs. " "Default is True.", ) # --8<-- [end:chat-classification-extra-params] def to_pooling_params(self): return PoolingParams( truncate_prompt_tokens=self.truncate_prompt_tokens, use_activation=get_use_activation(self), ) ClassificationRequest: TypeAlias = ( ClassificationCompletionRequest | ClassificationChatRequest ) class ClassificationData(OpenAIBaseModel): index: int label: str | None probs: list[float] num_classes: int class ClassificationResponse(OpenAIBaseModel): id: str = Field(default_factory=lambda: f"classify-{random_uuid()}") object: str = "list" created: int = Field(default_factory=lambda: int(time.time())) model: str data: list[ClassificationData] usage: UsageInfo
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/anthropic/serving_messages.py
vllm/entrypoints/anthropic/serving_messages.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from # https://github.com/vllm/vllm/entrypoints/openai/serving_chat.py """Anthropic Messages API serving handler""" import json import logging import time from collections.abc import AsyncGenerator from typing import Any from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.anthropic.protocol import ( AnthropicContentBlock, AnthropicDelta, AnthropicError, AnthropicMessagesRequest, AnthropicMessagesResponse, AnthropicStreamEvent, AnthropicUsage, ) from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionNamedToolChoiceParam, ChatCompletionRequest, ChatCompletionResponse, ChatCompletionStreamResponse, ChatCompletionToolsParam, ErrorResponse, StreamOptions, ) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_models import OpenAIServingModels logger = logging.getLogger(__name__) def wrap_data_with_event(data: str, event: str): return f"event: {event}\ndata: {data}\n\n" class AnthropicServingMessages(OpenAIServingChat): """Handler for Anthropic Messages API requests""" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, response_role: str, *, request_logger: RequestLogger | None, chat_template: str | None, chat_template_content_format: ChatTemplateContentFormatOption, return_tokens_as_token_ids: bool = False, reasoning_parser: str = "", enable_auto_tools: bool = False, tool_parser: str | None = None, enable_prompt_tokens_details: bool = False, enable_force_include_usage: bool = False, ): super().__init__( engine_client=engine_client, models=models, response_role=response_role, request_logger=request_logger, chat_template=chat_template, chat_template_content_format=chat_template_content_format, return_tokens_as_token_ids=return_tokens_as_token_ids, reasoning_parser=reasoning_parser, enable_auto_tools=enable_auto_tools, tool_parser=tool_parser, enable_prompt_tokens_details=enable_prompt_tokens_details, enable_force_include_usage=enable_force_include_usage, ) self.stop_reason_map = { "stop": "end_turn", "length": "max_tokens", "tool_calls": "tool_use", } def _convert_anthropic_to_openai_request( self, anthropic_request: AnthropicMessagesRequest ) -> ChatCompletionRequest: """Convert Anthropic message format to OpenAI format""" openai_messages = [] # Add system message if provided if anthropic_request.system: if isinstance(anthropic_request.system, str): openai_messages.append( {"role": "system", "content": anthropic_request.system} ) else: system_prompt = "" for block in anthropic_request.system: if block.type == "text" and block.text: system_prompt += block.text openai_messages.append({"role": "system", "content": system_prompt}) for msg in anthropic_request.messages: openai_msg: dict[str, Any] = {"role": msg.role} # type: ignore if isinstance(msg.content, str): openai_msg["content"] = msg.content else: # Handle complex content blocks content_parts: list[dict[str, Any]] = [] tool_calls: list[dict[str, Any]] = [] for block in msg.content: if block.type == "text" and block.text: content_parts.append({"type": "text", "text": block.text}) elif block.type == "image" and block.source: content_parts.append( { "type": "image_url", "image_url": {"url": block.source.get("data", "")}, } ) elif block.type == "tool_use": # Convert tool use to function call format tool_call = { "id": block.id or f"call_{int(time.time())}", "type": "function", "function": { "name": block.name or "", "arguments": json.dumps(block.input or {}), }, } tool_calls.append(tool_call) elif block.type == "tool_result": if msg.role == "user": openai_messages.append( { "role": "tool", "tool_call_id": block.id or "", "content": str(block.content) if block.content else "", } ) else: # Assistant tool result becomes regular text tool_result_text = ( str(block.content) if block.content else "" ) content_parts.append( { "type": "text", "text": f"Tool result: {tool_result_text}", } ) # Add tool calls to the message if any if tool_calls: openai_msg["tool_calls"] = tool_calls # type: ignore # Add content parts if any if content_parts: if len(content_parts) == 1 and content_parts[0]["type"] == "text": openai_msg["content"] = content_parts[0]["text"] else: openai_msg["content"] = content_parts # type: ignore elif not tool_calls: continue openai_messages.append(openai_msg) req = ChatCompletionRequest( model=anthropic_request.model, messages=openai_messages, max_tokens=anthropic_request.max_tokens, max_completion_tokens=anthropic_request.max_tokens, stop=anthropic_request.stop_sequences, temperature=anthropic_request.temperature, top_p=anthropic_request.top_p, top_k=anthropic_request.top_k, ) if anthropic_request.stream: req.stream = anthropic_request.stream req.stream_options = StreamOptions.validate( {"include_usage": True, "continuous_usage_stats": True} ) if anthropic_request.tool_choice is None: req.tool_choice = None elif anthropic_request.tool_choice.type == "auto": req.tool_choice = "auto" elif anthropic_request.tool_choice.type == "any": req.tool_choice = "required" elif anthropic_request.tool_choice.type == "tool": req.tool_choice = ChatCompletionNamedToolChoiceParam.model_validate( { "type": "function", "function": {"name": anthropic_request.tool_choice.name}, } ) tools = [] if anthropic_request.tools is None: return req for tool in anthropic_request.tools: tools.append( ChatCompletionToolsParam.model_validate( { "type": "function", "function": { "name": tool.name, "description": tool.description, "parameters": tool.input_schema, }, } ) ) if req.tool_choice is None: req.tool_choice = "auto" req.tools = tools return req async def create_messages( self, request: AnthropicMessagesRequest, raw_request: Request | None = None, ) -> AsyncGenerator[str, None] | AnthropicMessagesResponse | ErrorResponse: """ Messages API similar to Anthropic's API. See https://docs.anthropic.com/en/api/messages for the API specification. This API mimics the Anthropic messages API. """ if logger.isEnabledFor(logging.DEBUG): logger.debug("Received messages request %s", request.model_dump_json()) chat_req = self._convert_anthropic_to_openai_request(request) if logger.isEnabledFor(logging.DEBUG): logger.debug("Convert to OpenAI request %s", chat_req.model_dump_json()) generator = await self.create_chat_completion(chat_req, raw_request) if isinstance(generator, ErrorResponse): return generator elif isinstance(generator, ChatCompletionResponse): return self.messages_full_converter(generator) return self.message_stream_converter(generator) def messages_full_converter( self, generator: ChatCompletionResponse, ) -> AnthropicMessagesResponse: result = AnthropicMessagesResponse( id=generator.id, content=[], model=generator.model, usage=AnthropicUsage( input_tokens=generator.usage.prompt_tokens, output_tokens=generator.usage.completion_tokens, ), ) if generator.choices[0].finish_reason == "stop": result.stop_reason = "end_turn" elif generator.choices[0].finish_reason == "length": result.stop_reason = "max_tokens" elif generator.choices[0].finish_reason == "tool_calls": result.stop_reason = "tool_use" content: list[AnthropicContentBlock] = [ AnthropicContentBlock( type="text", text=generator.choices[0].message.content if generator.choices[0].message.content else "", ) ] for tool_call in generator.choices[0].message.tool_calls: anthropic_tool_call = AnthropicContentBlock( type="tool_use", id=tool_call.id, name=tool_call.function.name, input=json.loads(tool_call.function.arguments), ) content += [anthropic_tool_call] result.content = content return result async def message_stream_converter( self, generator: AsyncGenerator[str, None], ) -> AsyncGenerator[str, None]: try: first_item = True finish_reason = None content_block_index = 0 content_block_started = False async for item in generator: if item.startswith("data:"): data_str = item[5:].strip().rstrip("\n") if data_str == "[DONE]": stop_message = AnthropicStreamEvent( type="message_stop", ) data = stop_message.model_dump_json( exclude_unset=True, exclude_none=True ) yield wrap_data_with_event(data, "message_stop") yield "data: [DONE]\n\n" else: origin_chunk = ChatCompletionStreamResponse.model_validate_json( data_str ) if first_item: chunk = AnthropicStreamEvent( type="message_start", message=AnthropicMessagesResponse( id=origin_chunk.id, content=[], model=origin_chunk.model, usage=AnthropicUsage( input_tokens=origin_chunk.usage.prompt_tokens if origin_chunk.usage else 0, output_tokens=0, ), ), ) first_item = False data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "message_start") continue # last chunk including usage info if len(origin_chunk.choices) == 0: if content_block_started: stop_chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_stop", ) data = stop_chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "content_block_stop") stop_reason = self.stop_reason_map.get( finish_reason or "stop" ) chunk = AnthropicStreamEvent( type="message_delta", delta=AnthropicDelta(stop_reason=stop_reason), usage=AnthropicUsage( input_tokens=origin_chunk.usage.prompt_tokens if origin_chunk.usage else 0, output_tokens=origin_chunk.usage.completion_tokens if origin_chunk.usage else 0, ), ) data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "message_delta") continue if origin_chunk.choices[0].finish_reason is not None: finish_reason = origin_chunk.choices[0].finish_reason continue # content if origin_chunk.choices[0].delta.content is not None: if not content_block_started: chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_start", content_block=AnthropicContentBlock( type="text", text="" ), ) data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "content_block_start") content_block_started = True if origin_chunk.choices[0].delta.content == "": continue chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_delta", delta=AnthropicDelta( type="text_delta", text=origin_chunk.choices[0].delta.content, ), ) data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "content_block_delta") continue # tool calls elif len(origin_chunk.choices[0].delta.tool_calls) > 0: tool_call = origin_chunk.choices[0].delta.tool_calls[0] if tool_call.id is not None: if content_block_started: stop_chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_stop", ) data = stop_chunk.model_dump_json( exclude_unset=True ) yield wrap_data_with_event( data, "content_block_stop" ) content_block_started = False content_block_index += 1 chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_start", content_block=AnthropicContentBlock( type="tool_use", id=tool_call.id, name=tool_call.function.name if tool_call.function else None, input={}, ), ) data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "content_block_start") content_block_started = True else: chunk = AnthropicStreamEvent( index=content_block_index, type="content_block_delta", delta=AnthropicDelta( type="input_json_delta", partial_json=tool_call.function.arguments if tool_call.function else None, ), ) data = chunk.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "content_block_delta") continue else: error_response = AnthropicStreamEvent( type="error", error=AnthropicError( type="internal_error", message="Invalid data format received", ), ) data = error_response.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "error") yield "data: [DONE]\n\n" except Exception as e: logger.exception("Error in message stream converter.") error_response = AnthropicStreamEvent( type="error", error=AnthropicError(type="internal_error", message=str(e)), ) data = error_response.model_dump_json(exclude_unset=True) yield wrap_data_with_event(data, "error") yield "data: [DONE]\n\n"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/anthropic/__init__.py
vllm/entrypoints/anthropic/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/anthropic/protocol.py
vllm/entrypoints/anthropic/protocol.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Pydantic models for Anthropic API protocol""" import time from typing import Any, Literal, Optional from pydantic import BaseModel, field_validator class AnthropicError(BaseModel): """Error structure for Anthropic API""" type: str message: str class AnthropicErrorResponse(BaseModel): """Error response structure for Anthropic API""" type: Literal["error"] = "error" error: AnthropicError class AnthropicUsage(BaseModel): """Token usage information""" input_tokens: int output_tokens: int cache_creation_input_tokens: int | None = None cache_read_input_tokens: int | None = None class AnthropicContentBlock(BaseModel): """Content block in message""" type: Literal["text", "image", "tool_use", "tool_result"] text: str | None = None # For image content source: dict[str, Any] | None = None # For tool use/result id: str | None = None name: str | None = None input: dict[str, Any] | None = None content: str | list[dict[str, Any]] | None = None is_error: bool | None = None class AnthropicMessage(BaseModel): """Message structure""" role: Literal["user", "assistant"] content: str | list[AnthropicContentBlock] class AnthropicTool(BaseModel): """Tool definition""" name: str description: str | None = None input_schema: dict[str, Any] @field_validator("input_schema") @classmethod def validate_input_schema(cls, v): if not isinstance(v, dict): raise ValueError("input_schema must be a dictionary") if "type" not in v: v["type"] = "object" # Default to object type return v class AnthropicToolChoice(BaseModel): """Tool Choice definition""" type: Literal["auto", "any", "tool"] name: str | None = None class AnthropicMessagesRequest(BaseModel): """Anthropic Messages API request""" model: str messages: list[AnthropicMessage] max_tokens: int metadata: dict[str, Any] | None = None stop_sequences: list[str] | None = None stream: bool | None = False system: str | list[AnthropicContentBlock] | None = None temperature: float | None = None tool_choice: AnthropicToolChoice | None = None tools: list[AnthropicTool] | None = None top_k: int | None = None top_p: float | None = None @field_validator("model") @classmethod def validate_model(cls, v): if not v: raise ValueError("Model is required") return v @field_validator("max_tokens") @classmethod def validate_max_tokens(cls, v): if v <= 0: raise ValueError("max_tokens must be positive") return v class AnthropicDelta(BaseModel): """Delta for streaming responses""" type: Literal["text_delta", "input_json_delta"] | None = None text: str | None = None partial_json: str | None = None # Message delta stop_reason: ( Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] | None ) = None stop_sequence: str | None = None class AnthropicStreamEvent(BaseModel): """Streaming event""" type: Literal[ "message_start", "message_delta", "message_stop", "content_block_start", "content_block_delta", "content_block_stop", "ping", "error", ] message: Optional["AnthropicMessagesResponse"] = None delta: AnthropicDelta | None = None content_block: AnthropicContentBlock | None = None index: int | None = None error: AnthropicError | None = None usage: AnthropicUsage | None = None class AnthropicMessagesResponse(BaseModel): """Anthropic Messages API response""" id: str type: Literal["message"] = "message" role: Literal["assistant"] = "assistant" content: list[AnthropicContentBlock] model: str stop_reason: ( Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] | None ) = None stop_sequence: str | None = None usage: AnthropicUsage | None = None def model_post_init(self, __context): if not self.id: self.id = f"msg_{int(time.time() * 1000)}"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/collect_env.py
vllm/entrypoints/cli/collect_env.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import typing from vllm.collect_env import main as collect_env_main from vllm.entrypoints.cli.types import CLISubcommand if typing.TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = argparse.ArgumentParser class CollectEnvSubcommand(CLISubcommand): """The `collect-env` subcommand for the vLLM CLI.""" name = "collect-env" @staticmethod def cmd(args: argparse.Namespace) -> None: """Collect information about the environment.""" collect_env_main() def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: return subparsers.add_parser( "collect-env", help="Start collecting environment information.", description="Start collecting environment information.", usage="vllm collect-env", ) def cmd_init() -> list[CLISubcommand]: return [CollectEnvSubcommand()]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/main.py
vllm/entrypoints/cli/main.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """The CLI entrypoints of vLLM Note that all future modules must be lazily loaded within main to avoid certain eager import breakage.""" import importlib.metadata import sys from vllm.logger import init_logger logger = init_logger(__name__) def main(): import vllm.entrypoints.cli.benchmark.main import vllm.entrypoints.cli.collect_env import vllm.entrypoints.cli.openai import vllm.entrypoints.cli.run_batch import vllm.entrypoints.cli.serve from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG, cli_env_setup from vllm.utils.argparse_utils import FlexibleArgumentParser CMD_MODULES = [ vllm.entrypoints.cli.openai, vllm.entrypoints.cli.serve, vllm.entrypoints.cli.benchmark.main, vllm.entrypoints.cli.collect_env, vllm.entrypoints.cli.run_batch, ] cli_env_setup() # For 'vllm bench *': use CPU instead of UnspecifiedPlatform by default if len(sys.argv) > 1 and sys.argv[1] == "bench": logger.debug( "Bench command detected, must ensure current platform is not " "UnspecifiedPlatform to avoid device type inference error" ) from vllm import platforms if platforms.current_platform.is_unspecified(): from vllm.platforms.cpu import CpuPlatform platforms.current_platform = CpuPlatform() logger.info( "Unspecified platform detected, switching to CPU Platform instead." ) parser = FlexibleArgumentParser( description="vLLM CLI", epilog=VLLM_SUBCMD_PARSER_EPILOG.format(subcmd="[subcommand]"), ) parser.add_argument( "-v", "--version", action="version", version=importlib.metadata.version("vllm"), ) subparsers = parser.add_subparsers(required=False, dest="subparser") cmds = {} for cmd_module in CMD_MODULES: new_cmds = cmd_module.cmd_init() for cmd in new_cmds: cmd.subparser_init(subparsers).set_defaults(dispatch_function=cmd.cmd) cmds[cmd.name] = cmd args = parser.parse_args() if args.subparser in cmds: cmds[args.subparser].validate(args) if hasattr(args, "dispatch_function"): args.dispatch_function(args) else: parser.print_help() if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/__init__.py
vllm/entrypoints/cli/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.entrypoints.cli.benchmark.latency import BenchmarkLatencySubcommand from vllm.entrypoints.cli.benchmark.mm_processor import ( BenchmarkMMProcessorSubcommand, ) from vllm.entrypoints.cli.benchmark.serve import BenchmarkServingSubcommand from vllm.entrypoints.cli.benchmark.startup import BenchmarkStartupSubcommand from vllm.entrypoints.cli.benchmark.sweep import BenchmarkSweepSubcommand from vllm.entrypoints.cli.benchmark.throughput import BenchmarkThroughputSubcommand __all__: list[str] = [ "BenchmarkLatencySubcommand", "BenchmarkMMProcessorSubcommand", "BenchmarkServingSubcommand", "BenchmarkStartupSubcommand", "BenchmarkSweepSubcommand", "BenchmarkThroughputSubcommand", ]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/types.py
vllm/entrypoints/cli/types.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import typing if typing.TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = argparse.ArgumentParser class CLISubcommand: """Base class for CLI argument handlers.""" name: str @staticmethod def cmd(args: argparse.Namespace) -> None: raise NotImplementedError("Subclasses should implement this method") def validate(self, args: argparse.Namespace) -> None: # No validation by default pass def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: raise NotImplementedError("Subclasses should implement this method")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/serve.py
vllm/entrypoints/cli/serve.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import signal import uvloop import vllm import vllm.envs as envs from vllm.entrypoints.cli.types import CLISubcommand from vllm.entrypoints.openai.api_server import ( run_server, run_server_worker, setup_server, ) from vllm.entrypoints.openai.cli_args import make_arg_parser, validate_parsed_serve_args from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.network_utils import get_tcp_uri from vllm.utils.system_utils import decorate_logs, set_process_title from vllm.v1.engine.core import EngineCoreProc from vllm.v1.engine.utils import CoreEngineProcManager, launch_core_engines from vllm.v1.executor import Executor from vllm.v1.executor.multiproc_executor import MultiprocExecutor from vllm.v1.metrics.prometheus import setup_multiprocess_prometheus from vllm.v1.utils import APIServerProcessManager, wait_for_completion_or_failure logger = init_logger(__name__) DESCRIPTION = """Launch a local OpenAI-compatible API server to serve LLM completions via HTTP. Defaults to Qwen/Qwen3-0.6B if no model is specified. Search by using: `--help=<ConfigGroup>` to explore options by section (e.g., --help=ModelConfig, --help=Frontend) Use `--help=all` to show all available flags at once. """ class ServeSubcommand(CLISubcommand): """The `serve` subcommand for the vLLM CLI.""" name = "serve" @staticmethod def cmd(args: argparse.Namespace) -> None: # If model is specified in CLI (as positional arg), it takes precedence if hasattr(args, "model_tag") and args.model_tag is not None: args.model = args.model_tag if args.headless or args.api_server_count < 1: run_headless(args) else: if args.api_server_count > 1: run_multi_api_server(args) else: # Single API server (this process). uvloop.run(run_server(args)) def validate(self, args: argparse.Namespace) -> None: validate_parsed_serve_args(args) def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: serve_parser = subparsers.add_parser( self.name, help="Launch a local OpenAI-compatible API server to serve LLM " "completions via HTTP.", description=DESCRIPTION, usage="vllm serve [model_tag] [options]", ) serve_parser = make_arg_parser(serve_parser) serve_parser.epilog = VLLM_SUBCMD_PARSER_EPILOG.format(subcmd=self.name) return serve_parser def cmd_init() -> list[CLISubcommand]: return [ServeSubcommand()] def run_headless(args: argparse.Namespace): if args.api_server_count > 1: raise ValueError("api_server_count can't be set in headless mode") # Create the EngineConfig. engine_args = vllm.AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config( usage_context=usage_context, headless=True ) if engine_args.data_parallel_hybrid_lb: raise ValueError("data_parallel_hybrid_lb is not applicable in headless mode") parallel_config = vllm_config.parallel_config local_engine_count = parallel_config.data_parallel_size_local if local_engine_count <= 0: raise ValueError("data_parallel_size_local must be > 0 in headless mode") shutdown_requested = False # Catch SIGTERM and SIGINT to allow graceful shutdown. def signal_handler(signum, frame): nonlocal shutdown_requested logger.debug("Received %d signal.", signum) if not shutdown_requested: shutdown_requested = True raise SystemExit signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if parallel_config.node_rank_within_dp > 0: from vllm.version import __version__ as VLLM_VERSION # Run headless workers (for multi-node PP/TP). host = parallel_config.master_addr head_node_address = f"{host}:{parallel_config.master_port}" logger.info( "Launching vLLM (v%s) headless multiproc executor, " "with head node address %s for torch.distributed process group.", VLLM_VERSION, head_node_address, ) executor = MultiprocExecutor(vllm_config, monitor_workers=False) executor.start_worker_monitor(inline=True) return host = parallel_config.data_parallel_master_ip port = parallel_config.data_parallel_rpc_port handshake_address = get_tcp_uri(host, port) logger.info( "Launching %d data parallel engine(s) in headless mode, " "with head node address %s.", local_engine_count, handshake_address, ) # Create the engines. engine_manager = CoreEngineProcManager( target_fn=EngineCoreProc.run_engine_core, local_engine_count=local_engine_count, start_index=vllm_config.parallel_config.data_parallel_rank, local_start_index=0, vllm_config=vllm_config, local_client=False, handshake_address=handshake_address, executor_class=Executor.get_class(vllm_config), log_stats=not engine_args.disable_log_stats, ) try: engine_manager.join_first() finally: logger.info("Shutting down.") engine_manager.close() def run_multi_api_server(args: argparse.Namespace): assert not args.headless num_api_servers: int = args.api_server_count assert num_api_servers > 0 if num_api_servers > 1: setup_multiprocess_prometheus() listen_address, sock = setup_server(args) engine_args = vllm.AsyncEngineArgs.from_cli_args(args) engine_args._api_process_count = num_api_servers engine_args._api_process_rank = -1 usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context) if num_api_servers > 1 and envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING: raise ValueError( "VLLM_ALLOW_RUNTIME_LORA_UPDATING cannot be used with api_server_count > 1" ) executor_class = Executor.get_class(vllm_config) log_stats = not engine_args.disable_log_stats parallel_config = vllm_config.parallel_config dp_rank = parallel_config.data_parallel_rank external_dp_lb = parallel_config.data_parallel_external_lb hybrid_dp_lb = parallel_config.data_parallel_hybrid_lb assert external_dp_lb or hybrid_dp_lb or dp_rank == 0 api_server_manager: APIServerProcessManager | None = None with launch_core_engines( vllm_config, executor_class, log_stats, num_api_servers ) as (local_engine_manager, coordinator, addresses): # Construct common args for the APIServerProcessManager up-front. api_server_manager_kwargs = dict( target_server_fn=run_api_server_worker_proc, listen_address=listen_address, sock=sock, args=args, num_servers=num_api_servers, input_addresses=addresses.inputs, output_addresses=addresses.outputs, stats_update_address=coordinator.get_stats_publish_address() if coordinator else None, ) # For dp ranks > 0 in external/hybrid DP LB modes, we must delay the # start of the API servers until the local engine is started # (after the launcher context manager exits), # since we get the front-end stats update address from the coordinator # via the handshake with the local engine. if dp_rank == 0 or not (external_dp_lb or hybrid_dp_lb): # Start API servers using the manager. api_server_manager = APIServerProcessManager(**api_server_manager_kwargs) # Start API servers now if they weren't already started. if api_server_manager is None: api_server_manager_kwargs["stats_update_address"] = ( addresses.frontend_stats_publish_address ) api_server_manager = APIServerProcessManager(**api_server_manager_kwargs) # Wait for API servers wait_for_completion_or_failure( api_server_manager=api_server_manager, engine_manager=local_engine_manager, coordinator=coordinator, ) def run_api_server_worker_proc( listen_address, sock, args, client_config=None, **uvicorn_kwargs ) -> None: """Entrypoint for individual API server worker processes.""" client_config = client_config or {} server_index = client_config.get("client_index", 0) # Set process title and add process-specific prefix to stdout and stderr. set_process_title("APIServer", str(server_index)) decorate_logs() uvloop.run( run_server_worker(listen_address, sock, args, client_config, **uvicorn_kwargs) )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/run_batch.py
vllm/entrypoints/cli/run_batch.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import asyncio import importlib.metadata import typing from vllm.entrypoints.cli.types import CLISubcommand from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG from vllm.logger import init_logger if typing.TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = argparse.ArgumentParser logger = init_logger(__name__) class RunBatchSubcommand(CLISubcommand): """The `run-batch` subcommand for vLLM CLI.""" name = "run-batch" @staticmethod def cmd(args: argparse.Namespace) -> None: from vllm.entrypoints.openai.run_batch import main as run_batch_main logger.info( "vLLM batch processing API version %s", importlib.metadata.version("vllm") ) logger.info("args: %s", args) # Start the Prometheus metrics server. # LLMEngine uses the Prometheus client # to publish metrics at the /metrics endpoint. if args.enable_metrics: from prometheus_client import start_http_server logger.info("Prometheus metrics enabled") start_http_server(port=args.port, addr=args.url) else: logger.info("Prometheus metrics disabled") asyncio.run(run_batch_main(args)) def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: from vllm.entrypoints.openai.run_batch import make_arg_parser run_batch_parser = subparsers.add_parser( self.name, help="Run batch prompts and write results to file.", description=( "Run batch prompts using vLLM's OpenAI-compatible API.\n" "Supports local or HTTP input/output files." ), usage="vllm run-batch -i INPUT.jsonl -o OUTPUT.jsonl --model <model>", ) run_batch_parser = make_arg_parser(run_batch_parser) run_batch_parser.epilog = VLLM_SUBCMD_PARSER_EPILOG.format(subcmd=self.name) return run_batch_parser def cmd_init() -> list[CLISubcommand]: return [RunBatchSubcommand()]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/openai.py
vllm/entrypoints/cli/openai.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import os import signal import sys from typing import TYPE_CHECKING from openai import OpenAI from openai.types.chat import ChatCompletionMessageParam from vllm.entrypoints.cli.types import CLISubcommand if TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = argparse.ArgumentParser def _register_signal_handlers(): def signal_handler(sig, frame): sys.exit(0) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTSTP, signal_handler) def _interactive_cli(args: argparse.Namespace) -> tuple[str, OpenAI]: _register_signal_handlers() base_url = args.url api_key = args.api_key or os.environ.get("OPENAI_API_KEY", "EMPTY") openai_client = OpenAI(api_key=api_key, base_url=base_url) if args.model_name: model_name = args.model_name else: available_models = openai_client.models.list() model_name = available_models.data[0].id print(f"Using model: {model_name}") return model_name, openai_client def _print_chat_stream(stream) -> str: output = "" for chunk in stream: delta = chunk.choices[0].delta if delta.content: output += delta.content print(delta.content, end="", flush=True) print() return output def _print_completion_stream(stream) -> str: output = "" for chunk in stream: text = chunk.choices[0].text if text is not None: output += text print(text, end="", flush=True) print() return output def chat(system_prompt: str | None, model_name: str, client: OpenAI) -> None: conversation: list[ChatCompletionMessageParam] = [] if system_prompt is not None: conversation.append({"role": "system", "content": system_prompt}) print("Please enter a message for the chat model:") while True: try: input_message = input("> ") except EOFError: break conversation.append({"role": "user", "content": input_message}) stream = client.chat.completions.create( model=model_name, messages=conversation, stream=True ) output = _print_chat_stream(stream) conversation.append({"role": "assistant", "content": output}) def _add_query_options(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: parser.add_argument( "--url", type=str, default="http://localhost:8000/v1", help="url of the running OpenAI-Compatible RESTful API server", ) parser.add_argument( "--model-name", type=str, default=None, help=( "The model name used in prompt completion, default to " "the first model in list models API call." ), ) parser.add_argument( "--api-key", type=str, default=None, help=( "API key for OpenAI services. If provided, this api key " "will overwrite the api key obtained through environment variables." " It is important to note that this option only applies to the " "OpenAI-compatible API endpoints and NOT other endpoints that may " "be present in the server. See the security guide in the vLLM docs " "for more details." ), ) return parser class ChatCommand(CLISubcommand): """The `chat` subcommand for the vLLM CLI.""" name = "chat" @staticmethod def cmd(args: argparse.Namespace) -> None: model_name, client = _interactive_cli(args) system_prompt = args.system_prompt conversation: list[ChatCompletionMessageParam] = [] if system_prompt is not None: conversation.append({"role": "system", "content": system_prompt}) if args.quick: conversation.append({"role": "user", "content": args.quick}) stream = client.chat.completions.create( model=model_name, messages=conversation, stream=True ) output = _print_chat_stream(stream) conversation.append({"role": "assistant", "content": output}) return print("Please enter a message for the chat model:") while True: try: input_message = input("> ") except EOFError: break conversation.append({"role": "user", "content": input_message}) stream = client.chat.completions.create( model=model_name, messages=conversation, stream=True ) output = _print_chat_stream(stream) conversation.append({"role": "assistant", "content": output}) @staticmethod def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: """Add CLI arguments for the chat command.""" _add_query_options(parser) parser.add_argument( "--system-prompt", type=str, default=None, help=( "The system prompt to be added to the chat template, " "used for models that support system prompts." ), ) parser.add_argument( "-q", "--quick", type=str, metavar="MESSAGE", help=("Send a single prompt as MESSAGE and print the response, then exit."), ) return parser def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: parser = subparsers.add_parser( "chat", help="Generate chat completions via the running API server.", description="Generate chat completions via the running API server.", usage="vllm chat [options]", ) return ChatCommand.add_cli_args(parser) class CompleteCommand(CLISubcommand): """The `complete` subcommand for the vLLM CLI.""" name = "complete" @staticmethod def cmd(args: argparse.Namespace) -> None: model_name, client = _interactive_cli(args) kwargs = { "model": model_name, "stream": True, } if args.max_tokens: kwargs["max_tokens"] = args.max_tokens if args.quick: stream = client.completions.create(prompt=args.quick, **kwargs) _print_completion_stream(stream) return print("Please enter prompt to complete:") while True: try: input_prompt = input("> ") except EOFError: break stream = client.completions.create(prompt=input_prompt, **kwargs) _print_completion_stream(stream) @staticmethod def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: """Add CLI arguments for the complete command.""" _add_query_options(parser) parser.add_argument( "--max-tokens", type=int, help="Maximum number of tokens to generate per output sequence.", ) parser.add_argument( "-q", "--quick", type=str, metavar="PROMPT", help="Send a single prompt and print the completion output, then exit.", ) return parser def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: parser = subparsers.add_parser( "complete", help=( "Generate text completions based on the given prompt " "via the running API server." ), description=( "Generate text completions based on the given prompt " "via the running API server." ), usage="vllm complete [options]", ) return CompleteCommand.add_cli_args(parser) def cmd_init() -> list[CLISubcommand]: return [ChatCommand(), CompleteCommand()]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/mm_processor.py
vllm/entrypoints/cli/benchmark/mm_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.mm_processor import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkMMProcessorSubcommand(BenchmarkSubcommandBase): """The `mm-processor` subcommand for `vllm bench`.""" name = "mm-processor" help = "Benchmark multimodal processor latency across different configurations." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/latency.py
vllm/entrypoints/cli/benchmark/latency.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.latency import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkLatencySubcommand(BenchmarkSubcommandBase): """The `latency` subcommand for `vllm bench`.""" name = "latency" help = "Benchmark the latency of a single batch of requests." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/sweep.py
vllm/entrypoints/cli/benchmark/sweep.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.sweep.cli import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkSweepSubcommand(BenchmarkSubcommandBase): """The `sweep` subcommand for `vllm bench`.""" name = "sweep" help = "Benchmark for a parameter sweep." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/startup.py
vllm/entrypoints/cli/benchmark/startup.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.startup import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkStartupSubcommand(BenchmarkSubcommandBase): """The `startup` subcommand for `vllm bench`.""" name = "startup" help = "Benchmark the startup time of vLLM models." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/main.py
vllm/entrypoints/cli/benchmark/main.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import typing from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase from vllm.entrypoints.cli.types import CLISubcommand from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG if typing.TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = argparse.ArgumentParser class BenchmarkSubcommand(CLISubcommand): """The `bench` subcommand for the vLLM CLI.""" name = "bench" help = "vLLM bench subcommand." @staticmethod def cmd(args: argparse.Namespace) -> None: args.dispatch_function(args) def validate(self, args: argparse.Namespace) -> None: pass def subparser_init( self, subparsers: argparse._SubParsersAction ) -> FlexibleArgumentParser: bench_parser = subparsers.add_parser( self.name, help=self.help, description=self.help, usage=f"vllm {self.name} <bench_type> [options]", ) bench_subparsers = bench_parser.add_subparsers(required=True, dest="bench_type") for cmd_cls in BenchmarkSubcommandBase.__subclasses__(): cmd_subparser = bench_subparsers.add_parser( cmd_cls.name, help=cmd_cls.help, description=cmd_cls.help, usage=f"vllm {self.name} {cmd_cls.name} [options]", ) cmd_subparser.set_defaults(dispatch_function=cmd_cls.cmd) cmd_cls.add_cli_args(cmd_subparser) cmd_subparser.epilog = VLLM_SUBCMD_PARSER_EPILOG.format( subcmd=f"{self.name} {cmd_cls.name}" ) return bench_parser def cmd_init() -> list[CLISubcommand]: return [BenchmarkSubcommand()]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/__init__.py
vllm/entrypoints/cli/benchmark/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/serve.py
vllm/entrypoints/cli/benchmark/serve.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.serve import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkServingSubcommand(BenchmarkSubcommandBase): """The `serve` subcommand for `vllm bench`.""" name = "serve" help = "Benchmark the online serving throughput." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/throughput.py
vllm/entrypoints/cli/benchmark/throughput.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.benchmarks.throughput import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase class BenchmarkThroughputSubcommand(BenchmarkSubcommandBase): """The `throughput` subcommand for `vllm bench`.""" name = "throughput" help = "Benchmark offline inference throughput." @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/cli/benchmark/base.py
vllm/entrypoints/cli/benchmark/base.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm.entrypoints.cli.types import CLISubcommand class BenchmarkSubcommandBase(CLISubcommand): """The base class of subcommands for `vllm bench`.""" help: str @classmethod def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: """Add the CLI arguments to the parser.""" raise NotImplementedError @staticmethod def cmd(args: argparse.Namespace) -> None: """Run the benchmark. Args: args: The arguments to the command. """ raise NotImplementedError
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/__init__.py
vllm/entrypoints/serve/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from fastapi import FastAPI import vllm.envs as envs from vllm.logger import init_logger logger = init_logger(__name__) def register_vllm_serve_api_routers(app: FastAPI): if envs.VLLM_SERVER_DEV_MODE: logger.warning( "SECURITY WARNING: Development endpoints are enabled! " "This should NOT be used in production!" ) from vllm.entrypoints.serve.lora.api_router import ( attach_router as attach_lora_router, ) attach_lora_router(app) from vllm.entrypoints.serve.elastic_ep.api_router import ( attach_router as attach_elastic_ep_router, ) attach_elastic_ep_router(app) from vllm.entrypoints.serve.profile.api_router import ( attach_router as attach_profile_router, ) attach_profile_router(app) from vllm.entrypoints.serve.sleep.api_router import ( attach_router as attach_sleep_router, ) attach_sleep_router(app) from vllm.entrypoints.serve.rpc.api_router import ( attach_router as attach_rpc_router, ) attach_rpc_router(app) from vllm.entrypoints.serve.cache.api_router import ( attach_router as attach_cache_router, ) attach_cache_router(app) from vllm.entrypoints.serve.tokenize.api_router import ( attach_router as attach_tokenize_router, ) attach_tokenize_router(app) from vllm.entrypoints.serve.disagg.api_router import ( attach_router as attach_disagg_router, ) attach_disagg_router(app) from vllm.entrypoints.serve.rlhf.api_router import ( attach_router as attach_rlhf_router, ) attach_rlhf_router(app) from vllm.entrypoints.serve.instrumentator.metrics import ( attach_router as attach_metrics_router, ) attach_metrics_router(app) from vllm.entrypoints.serve.instrumentator.health import ( attach_router as attach_health_router, ) attach_health_router(app) from vllm.entrypoints.serve.instrumentator.offline_docs import ( attach_router as attach_offline_docs_router, ) attach_offline_docs_router(app) from vllm.entrypoints.serve.instrumentator.server_info import ( attach_router as attach_server_info_router, ) attach_server_info_router(app)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/sleep/__init__.py
vllm/entrypoints/serve/sleep/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/sleep/api_router.py
vllm/entrypoints/serve/sleep/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from fastapi import APIRouter, FastAPI, Request from fastapi.responses import JSONResponse, Response import vllm.envs as envs from vllm.engine.protocol import EngineClient from vllm.logger import init_logger logger = init_logger(__name__) def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client router = APIRouter() @router.post("/sleep") async def sleep(raw_request: Request): # get POST params level = raw_request.query_params.get("level", "1") await engine_client(raw_request).sleep(int(level)) # FIXME: in v0 with frontend multiprocessing, the sleep command # is sent but does not finish yet when we return a response. return Response(status_code=200) @router.post("/wake_up") async def wake_up(raw_request: Request): tags = raw_request.query_params.getlist("tags") if tags == []: # set to None to wake up all tags if no tags are provided tags = None logger.info("wake up the engine with tags: %s", tags) await engine_client(raw_request).wake_up(tags) # FIXME: in v0 with frontend multiprocessing, the wake-up command # is sent but does not finish yet when we return a response. return Response(status_code=200) @router.get("/is_sleeping") async def is_sleeping(raw_request: Request): logger.info("check whether the engine is sleeping") is_sleeping = await engine_client(raw_request).is_sleeping() return JSONResponse(content={"is_sleeping": is_sleeping}) def attach_router(app: FastAPI): if not envs.VLLM_SERVER_DEV_MODE: return app.include_router(router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/instrumentator/metrics.py
vllm/entrypoints/serve/instrumentator/metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import prometheus_client import regex as re from fastapi import FastAPI, Response from prometheus_client import make_asgi_app from prometheus_fastapi_instrumentator import Instrumentator from starlette.routing import Mount from vllm.v1.metrics.prometheus import get_prometheus_registry class PrometheusResponse(Response): media_type = prometheus_client.CONTENT_TYPE_LATEST def attach_router(app: FastAPI): """Mount prometheus metrics to a FastAPI app.""" registry = get_prometheus_registry() # `response_class=PrometheusResponse` is needed to return an HTTP response # with header "Content-Type: text/plain; version=0.0.4; charset=utf-8" # instead of the default "application/json" which is incorrect. # See https://github.com/trallnag/prometheus-fastapi-instrumentator/issues/163#issue-1296092364 Instrumentator( excluded_handlers=[ "/metrics", "/health", "/load", "/ping", "/version", "/server_info", ], registry=registry, ).add().instrument(app).expose(app, response_class=PrometheusResponse) # Add prometheus asgi middleware to route /metrics requests metrics_route = Mount("/metrics", make_asgi_app(registry=registry)) # Workaround for 307 Redirect for /metrics metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$") app.routes.append(metrics_route)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/instrumentator/offline_docs.py
vllm/entrypoints/serve/instrumentator/offline_docs.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Offline FastAPI documentation support for air-gapped environments.""" import pathlib from fastapi import FastAPI from fastapi.openapi.docs import ( get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.staticfiles import StaticFiles from vllm.logger import init_logger logger = init_logger(__name__) def attach_router(app: FastAPI) -> None: """Attach offline docs router if enabled via args.""" args = getattr(app.state, "args", None) if args is None or not getattr(args, "enable_offline_docs", False): return static_dir = pathlib.Path(__file__).parent / "static" if not static_dir.exists(): logger.warning( "Static directory not found at %s. Offline docs will not be available.", static_dir, ) return app.mount("/static", StaticFiles(directory=str(static_dir)), name="static") @app.get("/docs", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=app.openapi_url, title=app.title + " - Swagger UI", oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url, swagger_js_url="/static/swagger-ui-bundle.js", swagger_css_url="/static/swagger-ui.css", ) @app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() logger.info("Offline documentation enabled with vendored static assets")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/instrumentator/health.py
vllm/entrypoints/serve/instrumentator/health.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from fastapi import APIRouter, Request from fastapi.responses import Response from vllm.engine.protocol import EngineClient from vllm.logger import init_logger from vllm.v1.engine.exceptions import EngineDeadError logger = init_logger(__name__) router = APIRouter() def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client @router.get("/health", response_class=Response) async def health(raw_request: Request) -> Response: """Health check.""" try: await engine_client(raw_request).check_health() return Response(status_code=200) except EngineDeadError: return Response(status_code=503) def attach_router(app): app.include_router(router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/instrumentator/server_info.py
vllm/entrypoints/serve/instrumentator/server_info.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Annotated, Literal import pydantic from fastapi import APIRouter, FastAPI, Query, Request from fastapi.responses import JSONResponse import vllm.envs as envs from vllm.config import VllmConfig from vllm.logger import init_logger logger = init_logger(__name__) router = APIRouter() PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig) @router.get("/server_info") async def show_server_info( raw_request: Request, config_format: Annotated[Literal["text", "json"], Query()] = "text", ): vllm_config: VllmConfig = raw_request.app.state.vllm_config server_info = { "vllm_config": str(vllm_config) if config_format == "text" else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str) # fallback=str is needed to handle e.g. torch.dtype } return JSONResponse(content=server_info) def attach_router(app: FastAPI): if not envs.VLLM_SERVER_DEV_MODE: return app.include_router(router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/instrumentator/__init__.py
vllm/entrypoints/serve/instrumentator/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/elastic_ep/middleware.py
vllm/entrypoints/serve/elastic_ep/middleware.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Awaitable from fastapi.responses import JSONResponse from starlette.types import ASGIApp, Receive, Scope, Send # Global variable to track scaling state _scaling_elastic_ep = False def get_scaling_elastic_ep(): return _scaling_elastic_ep def set_scaling_elastic_ep(value): global _scaling_elastic_ep _scaling_elastic_ep = value class ScalingMiddleware: """ Middleware that checks if the model is currently scaling and returns a 503 Service Unavailable response if it is. This middleware applies to all HTTP requests and prevents processing when the model is in a scaling state. """ def __init__(self, app: ASGIApp) -> None: self.app = app def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]: if scope["type"] != "http": return self.app(scope, receive, send) # Check global scaling state if get_scaling_elastic_ep(): # Return 503 Service Unavailable response response = JSONResponse( content={ "error": "The model is currently scaling. Please try again later." }, status_code=503, ) return response(scope, receive, send) return self.app(scope, receive, send)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/elastic_ep/__init__.py
vllm/entrypoints/serve/elastic_ep/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/elastic_ep/api_router.py
vllm/entrypoints/serve/elastic_ep/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from http import HTTPStatus from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request from fastapi.responses import JSONResponse from vllm.engine.protocol import EngineClient from vllm.entrypoints.openai.api_server import validate_json_request from vllm.entrypoints.openai.protocol import ( ErrorResponse, ) from vllm.entrypoints.serve.elastic_ep.middleware import ( get_scaling_elastic_ep, set_scaling_elastic_ep, ) from vllm.logger import init_logger logger = init_logger(__name__) def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client router = APIRouter() @router.post( "/scale_elastic_ep", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"model": dict}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.REQUEST_TIMEOUT.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) async def scale_elastic_ep(raw_request: Request): try: body = await raw_request.json() except json.JSONDecodeError as e: raise HTTPException(status_code=400, detail="Invalid JSON format") from e new_data_parallel_size = body.get("new_data_parallel_size") drain_timeout = body.get("drain_timeout", 120) # Default 2 minutes if new_data_parallel_size is None: raise HTTPException( status_code=400, detail="new_data_parallel_size is required" ) if not isinstance(new_data_parallel_size, int) or new_data_parallel_size <= 0: raise HTTPException( status_code=400, detail="new_data_parallel_size must be a positive integer", ) if not isinstance(drain_timeout, int) or drain_timeout <= 0: raise HTTPException( status_code=400, detail="drain_timeout must be a positive integer" ) # Set scaling flag to prevent new requests set_scaling_elastic_ep(True) client = engine_client(raw_request) try: await client.scale_elastic_ep(new_data_parallel_size, drain_timeout) return JSONResponse( { "message": f"Scaled to {new_data_parallel_size} data parallel engines", } ) except TimeoutError as e: raise HTTPException( status_code=408, detail="Scale failed due to request drain timeout " f"after {drain_timeout} seconds", ) from e except Exception as e: logger.error("Scale failed: %s", e) raise HTTPException(status_code=500, detail="Scale failed") from e finally: set_scaling_elastic_ep(False) @router.post("/is_scaling_elastic_ep") async def is_scaling_elastic_ep(raw_request: Request): return JSONResponse({"is_scaling_elastic_ep": get_scaling_elastic_ep()}) def attach_router(app: FastAPI): app.include_router(router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/disagg/serving.py
vllm/entrypoints/serve/disagg/serving.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import time from collections.abc import AsyncGenerator from collections.abc import Sequence as GenericSequence from fastapi import Request from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionLogProb, ChatCompletionLogProbs, ChatCompletionLogProbsContent, ErrorResponse, PromptTokenUsageInfo, RequestResponseMetadata, UsageInfo, ) from vllm.entrypoints.openai.serving_engine import OpenAIServing, clamp_prompt_logprobs from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.serve.disagg.protocol import ( GenerateRequest, GenerateResponse, GenerateResponseChoice, ) from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.utils.collection_utils import as_list logger = init_logger(__name__) class ServingTokens(OpenAIServing): """Provides Tokens IN <> Tokens OUT functionality to vLLM API.""" def __init__( self, engine_client: EngineClient, models: OpenAIServingModels, *, request_logger: RequestLogger | None, force_no_detokenize: bool = False, return_tokens_as_token_ids: bool = False, log_error_stack: bool = False, enable_prompt_tokens_details: bool = False, enable_log_outputs: bool = False, ): super().__init__( engine_client=engine_client, models=models, request_logger=request_logger, return_tokens_as_token_ids=return_tokens_as_token_ids, log_error_stack=log_error_stack, ) self.enable_prompt_tokens_details = enable_prompt_tokens_details self.enable_log_outputs = enable_log_outputs self.force_no_detokenize = force_no_detokenize if force_no_detokenize: logger.info( "Tokens-only mode is enabled, skipping detokenization " "step for incoming requests." ) async def serve_tokens( self, request: GenerateRequest, raw_request: Request | None = None, ) -> GenerateResponse | ErrorResponse: error_check_ret = await self._check_model(request) if error_check_ret is not None: logger.error("Error with model %s", error_check_ret) return error_check_ret # If the engine is dead, raise the engine's DEAD_ERROR. # This is required for the streaming case, where we return a # success status before we actually start generating text :). if self.engine_client.errored: raise self.engine_client.dead_error lora_request = None lora_request = self._maybe_get_adapters(request, supports_default_mm_loras=True) model_name = self.models.model_name(lora_request) request_id = ( f"generate-tokens-{self._base_request_id(raw_request, request.request_id)}" ) request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: raw_request.state.request_metadata = request_metadata # TODO(NickLucche): Change to EngineCoreRequest once Renderer work is # completed engine_prompt = TokensPrompt(prompt_token_ids=request.token_ids) if request.features is not None: engine_prompt["multi_modal_data"] = None if hasattr(request, "cache_salt") and request.cache_salt is not None: engine_prompt["cache_salt"] = request.cache_salt # Schedule the request and get the result generator. result_generator: AsyncGenerator[RequestOutput, None] | None = None try: sampling_params = request.sampling_params if self.force_no_detokenize: sampling_params.detokenize = False self._log_inputs( request_id, TokensPrompt(prompt_token_ids=request.token_ids), params=sampling_params, lora_request=lora_request, ) trace_headers = ( None if raw_request is None else await self._get_trace_headers(raw_request.headers) ) result_generator = self.engine_client.generate( engine_prompt, sampling_params, request_id, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, ) except ValueError as e: return self.create_error_response(str(e)) # TODO(NickLucche): Implement streaming response try: assert result_generator is not None return await self.serve_tokens_full_generator( request, result_generator, request_id, model_name, request_metadata ) except ValueError as e: return self.create_error_response(str(e)) async def serve_tokens_full_generator( self, request: GenerateRequest, result_generator: AsyncGenerator[RequestOutput, None], request_id: str, model_name: str, request_metadata: RequestResponseMetadata, ) -> ErrorResponse | GenerateResponse: created_time = int(time.time()) final_res: RequestOutput | None = None sampling_params: SamplingParams = request.sampling_params try: async for res in result_generator: final_res = res except asyncio.CancelledError: return self.create_error_response("Client disconnected") except ValueError as e: return self.create_error_response(str(e)) assert final_res is not None choices: list[GenerateResponseChoice] = [] num_generated_tokens = 0 for output in final_res.outputs: token_ids = output.token_ids out_logprobs = output.logprobs # This is top_logprobs in completions API if sampling_params.logprobs: assert out_logprobs is not None, "Did not output logprobs" logprobs = self._create_tokens_logprobs( token_ids=token_ids, top_logprobs=out_logprobs, num_output_top_logprobs=sampling_params.logprobs, ) else: logprobs = None choice_data = GenerateResponseChoice( index=output.index, logprobs=logprobs, finish_reason=output.finish_reason if output.finish_reason else "stop", token_ids=as_list(output.token_ids), ) choices.append(choice_data) num_generated_tokens += len(output.token_ids) assert final_res.prompt_token_ids is not None num_prompt_tokens = len(final_res.prompt_token_ids) if final_res.encoder_prompt_token_ids is not None: num_prompt_tokens += len(final_res.encoder_prompt_token_ids) usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=num_generated_tokens, total_tokens=num_prompt_tokens + num_generated_tokens, ) if self.enable_prompt_tokens_details and final_res.num_cached_tokens: # This info is not available at the /coordinator level usage.prompt_tokens_details = PromptTokenUsageInfo( cached_tokens=final_res.num_cached_tokens ) request_metadata.final_usage_info = usage response = GenerateResponse( id=request_id, created=created_time, model=model_name, choices=choices, usage=usage, prompt_logprobs=clamp_prompt_logprobs(final_res.prompt_logprobs), kv_transfer_params=final_res.kv_transfer_params, ) # Log complete response if output logging is enabled if self.enable_log_outputs and self.request_logger: for choice in choices: # Get the corresponding output token IDs output_token_ids = None if choice.index < len(final_res.outputs): output_token_ids = final_res.outputs[choice.index].token_ids if output_token_ids: # Log token_ids only. self.request_logger.log_outputs( request_id=request_id, outputs="", output_token_ids=output_token_ids, finish_reason=choice.finish_reason, is_streaming=False, delta=False, ) return response def _create_tokens_logprobs( self, token_ids: GenericSequence[int], top_logprobs: GenericSequence[dict[int, Logprob] | None], num_output_top_logprobs: int | None = None, ) -> ChatCompletionLogProbs: """Create OpenAI-style logprobs.""" logprobs_content: list[ChatCompletionLogProbsContent] = [] for i, token_id in enumerate(token_ids): token = f"token_id:{token_id}" step_top_logprobs = top_logprobs[i] if step_top_logprobs is None or step_top_logprobs.get(token_id) is None: logprobs_content.append( ChatCompletionLogProbsContent( token=token, ) ) else: step_token = step_top_logprobs[token_id] logprobs_content.append( ChatCompletionLogProbsContent( token=token, logprob=max(step_token.logprob, -9999.0), top_logprobs=[ ChatCompletionLogProb( token=token, logprob=max(p[1].logprob, -9999.0), ) for i, p in enumerate(step_top_logprobs.items()) if num_output_top_logprobs and i < num_output_top_logprobs ], ) ) return ChatCompletionLogProbs(content=logprobs_content)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/disagg/__init__.py
vllm/entrypoints/serve/disagg/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/entrypoints/serve/disagg/api_router.py
vllm/entrypoints/serve/disagg/api_router.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json from http import HTTPStatus from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request, Response from fastapi.responses import JSONResponse, StreamingResponse from vllm.engine.protocol import EngineClient from vllm.entrypoints.openai.api_server import validate_json_request from vllm.entrypoints.openai.protocol import ( ErrorResponse, ) from vllm.entrypoints.serve.disagg.protocol import ( GenerateRequest, GenerateResponse, ) from vllm.entrypoints.serve.disagg.serving import ( ServingTokens, ) from vllm.entrypoints.serve.tokenize.serving import OpenAIServingTokenization from vllm.entrypoints.utils import ( load_aware_call, with_cancellation, ) from vllm.logger import init_logger logger = init_logger(__name__) def tokenization(request: Request) -> OpenAIServingTokenization: return request.app.state.openai_serving_tokenization def generate_tokens(request: Request) -> ServingTokens | None: return request.app.state.serving_tokens def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client router = APIRouter() @router.post( "/inference/v1/generate", dependencies=[Depends(validate_json_request)], responses={ HTTPStatus.OK.value: {"content": {"text/event-stream": {}}}, HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse}, HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse}, HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse}, }, ) @with_cancellation @load_aware_call async def generate(request: GenerateRequest, raw_request: Request): handler = generate_tokens(raw_request) if handler is None: return tokenization(raw_request).create_error_response( message="The model does not support generate tokens API" ) try: generator = await handler.serve_tokens(request, raw_request) except Exception as e: raise HTTPException( status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e) ) from e if isinstance(generator, ErrorResponse): return JSONResponse( content=generator.model_dump(), status_code=generator.error.code ) elif isinstance(generator, GenerateResponse): return JSONResponse(content=generator.model_dump()) return StreamingResponse(content=generator, media_type="text/event-stream") def attach_router(app: FastAPI): if getattr(app.state.args, "tokens_only", False): @router.post("/abort_requests") async def abort_requests(raw_request: Request): """ Abort one or more requests. To be used in a Disaggregated Everything setup. """ try: body = await raw_request.json() except json.JSONDecodeError as e: raise HTTPException( status_code=HTTPStatus.BAD_REQUEST.value, detail=f"JSON decode error: {e}", ) from e request_ids = body.get("request_ids") if request_ids is None: raise HTTPException( status_code=HTTPStatus.BAD_REQUEST.value, detail="Missing 'request_ids' in request body", ) # Abort requests in background asyncio.create_task(engine_client(raw_request).abort(request_ids)) return Response(status_code=200) app.include_router(router)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false