Add files using upload-large-folder tool
Browse files- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__pycache__/multiproc_executor.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__pycache__/ray_distributed_executor.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/loggers.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/prometheus.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/ray_wrappers.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/reader.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/stats.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__pycache__/metadata.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__init__.py +185 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/builtin.py +289 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/interface.py +86 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/state.py +149 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/bad_words.py +39 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/logprobs.py +26 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/penalties.py +43 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/topk_topp_sampler.py +298 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__init__.py +0 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__pycache__/multiproc_executor.cpython-312.pyc
ADDED
|
Binary file (25.5 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/executor/__pycache__/ray_distributed_executor.cpython-312.pyc
ADDED
|
Binary file (5.3 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/loggers.cpython-312.pyc
ADDED
|
Binary file (27.1 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/prometheus.cpython-312.pyc
ADDED
|
Binary file (3.49 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/ray_wrappers.cpython-312.pyc
ADDED
|
Binary file (6.39 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/reader.cpython-312.pyc
ADDED
|
Binary file (8.82 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/metrics/__pycache__/stats.cpython-312.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/pool/__pycache__/metadata.cpython-312.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__init__.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import importlib
|
| 4 |
+
import itertools
|
| 5 |
+
from collections.abc import Sequence
|
| 6 |
+
from typing import TYPE_CHECKING, Optional, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from vllm.logger import init_logger
|
| 11 |
+
from vllm.v1.sample.logits_processor.builtin import (LogitBiasLogitsProcessor,
|
| 12 |
+
MinPLogitsProcessor,
|
| 13 |
+
MinTokensLogitsProcessor)
|
| 14 |
+
from vllm.v1.sample.logits_processor.interface import (BatchUpdate,
|
| 15 |
+
LogitsProcessor,
|
| 16 |
+
MoveDirectionality)
|
| 17 |
+
from vllm.v1.sample.logits_processor.state import (BatchUpdateBuilder,
|
| 18 |
+
LogitsProcessors)
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from vllm.config import VllmConfig
|
| 22 |
+
|
| 23 |
+
logger = init_logger(__name__)
|
| 24 |
+
|
| 25 |
+
# Error message when the user tries to initialize vLLM with a pooling model
|
| 26 |
+
# and custom logitsproces
|
| 27 |
+
STR_POOLING_REJECTS_LOGITSPROCS = ("Pooling models do not support custom"
|
| 28 |
+
" logits processors.")
|
| 29 |
+
|
| 30 |
+
LOGITSPROCS_GROUP = 'vllm.logits_processors'
|
| 31 |
+
|
| 32 |
+
BUILTIN_LOGITS_PROCESSORS: list[type[LogitsProcessor]] = [
|
| 33 |
+
MinTokensLogitsProcessor,
|
| 34 |
+
LogitBiasLogitsProcessor,
|
| 35 |
+
MinPLogitsProcessor,
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _load_logitsprocs_plugins() -> list[type[LogitsProcessor]]:
|
| 40 |
+
"""Load all installed logit processor plugins"""
|
| 41 |
+
|
| 42 |
+
import sys
|
| 43 |
+
|
| 44 |
+
if sys.version_info < (3, 10):
|
| 45 |
+
from importlib_metadata import entry_points
|
| 46 |
+
else:
|
| 47 |
+
from importlib.metadata import entry_points
|
| 48 |
+
|
| 49 |
+
installed_logitsprocs_plugins = entry_points(group=LOGITSPROCS_GROUP)
|
| 50 |
+
if len(installed_logitsprocs_plugins) == 0:
|
| 51 |
+
logger.debug("No logitsprocs plugins installed (group %s).",
|
| 52 |
+
LOGITSPROCS_GROUP)
|
| 53 |
+
return []
|
| 54 |
+
|
| 55 |
+
# Load logitsprocs plugins
|
| 56 |
+
logger.debug("Loading installed logitsprocs plugins (group %s):",
|
| 57 |
+
LOGITSPROCS_GROUP)
|
| 58 |
+
classes: list[type[LogitsProcessor]] = []
|
| 59 |
+
for entrypoint in installed_logitsprocs_plugins:
|
| 60 |
+
try:
|
| 61 |
+
logger.debug("- Loading logitproc plugin entrypoint=%s target=%s",
|
| 62 |
+
entrypoint.name, entrypoint.value)
|
| 63 |
+
classes.append(entrypoint.load())
|
| 64 |
+
except Exception as e:
|
| 65 |
+
raise RuntimeError(
|
| 66 |
+
f"Failed to load LogitsProcessor plugin {entrypoint}") from e
|
| 67 |
+
return classes
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _load_logitsprocs_by_fqcns(
|
| 71 |
+
logits_processors: Optional[Sequence[Union[str, type[LogitsProcessor]]]]
|
| 72 |
+
) -> list[type[LogitsProcessor]]:
|
| 73 |
+
"""Load logit processor types, identifying them by fully-qualified class
|
| 74 |
+
names (FQCNs).
|
| 75 |
+
|
| 76 |
+
Effectively, a mixed list of logitproc types and FQCN strings is converted
|
| 77 |
+
into a list of entirely logitproc types, by loading from the FQCNs.
|
| 78 |
+
|
| 79 |
+
FQCN syntax is <module>:<type> i.e. x.y.z:CustomLogitProc
|
| 80 |
+
|
| 81 |
+
Already-loaded logitproc types must be subclasses of LogitsProcessor
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
logits_processors: Potentially mixed list of logitsprocs types and FQCN
|
| 85 |
+
strings for logitproc types
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
List of logitproc types
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
if not logits_processors:
|
| 92 |
+
return []
|
| 93 |
+
|
| 94 |
+
logger.debug(
|
| 95 |
+
"%s additional custom logits processors specified, checking whether "
|
| 96 |
+
"they need to be loaded.", len(logits_processors))
|
| 97 |
+
|
| 98 |
+
classes: list[type[LogitsProcessor]] = []
|
| 99 |
+
for ldx, logitproc in enumerate(logits_processors):
|
| 100 |
+
if isinstance(logitproc, type):
|
| 101 |
+
logger.debug(" - Already-loaded logit processor: %s",
|
| 102 |
+
logitproc.__name__)
|
| 103 |
+
if not issubclass(logitproc, LogitsProcessor):
|
| 104 |
+
raise ValueError(
|
| 105 |
+
f"{logitproc.__name__} is not a subclass of LogitsProcessor"
|
| 106 |
+
)
|
| 107 |
+
classes.append(logitproc)
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
logger.debug("- Loading logits processor %s", logitproc)
|
| 111 |
+
module_path, qualname = logitproc.split(":")
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
# Load module
|
| 115 |
+
module = importlib.import_module(module_path)
|
| 116 |
+
except Exception as e:
|
| 117 |
+
raise RuntimeError(
|
| 118 |
+
f"Failed to load {ldx}th LogitsProcessor plugin {logitproc}"
|
| 119 |
+
) from e
|
| 120 |
+
|
| 121 |
+
# Walk down dotted name to get logitproc class
|
| 122 |
+
obj = module
|
| 123 |
+
for attr in qualname.split("."):
|
| 124 |
+
obj = getattr(obj, attr)
|
| 125 |
+
if not isinstance(obj, type):
|
| 126 |
+
raise ValueError("Loaded logit processor must be a type.")
|
| 127 |
+
if not issubclass(obj, LogitsProcessor):
|
| 128 |
+
raise ValueError(
|
| 129 |
+
f"{obj.__name__} must be a subclass of LogitsProcessor")
|
| 130 |
+
classes.append(obj)
|
| 131 |
+
|
| 132 |
+
return classes
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _load_custom_logitsprocs(
|
| 136 |
+
logits_processors: Optional[Sequence[Union[str, type[LogitsProcessor]]]],
|
| 137 |
+
) -> list[type[LogitsProcessor]]:
|
| 138 |
+
"""Load all custom logits processors.
|
| 139 |
+
|
| 140 |
+
* First load all installed logitproc plugins
|
| 141 |
+
* Second load custom logitsprocs pass by the user at initialization time
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
logits_processors: potentially mixed list of logitproc types and
|
| 145 |
+
logitproc type fully-qualified names (FQCNs)
|
| 146 |
+
which need to be loaded
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
A list of all loaded logitproc types
|
| 150 |
+
"""
|
| 151 |
+
from vllm.platforms import current_platform
|
| 152 |
+
if current_platform.is_tpu():
|
| 153 |
+
# No logitsprocs specified by caller
|
| 154 |
+
# TODO(andy) - vLLM V1 on TPU does not support custom logitsprocs
|
| 155 |
+
return []
|
| 156 |
+
|
| 157 |
+
return (_load_logitsprocs_plugins() +
|
| 158 |
+
_load_logitsprocs_by_fqcns(logits_processors))
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def build_logitsprocs(
|
| 162 |
+
vllm_config: "VllmConfig",
|
| 163 |
+
device: torch.device,
|
| 164 |
+
is_pin_memory: bool,
|
| 165 |
+
is_pooling_model: bool,
|
| 166 |
+
custom_logitsprocs: Sequence[Union[str, type[LogitsProcessor]]] = (),
|
| 167 |
+
) -> LogitsProcessors:
|
| 168 |
+
if is_pooling_model:
|
| 169 |
+
if custom_logitsprocs:
|
| 170 |
+
raise ValueError(STR_POOLING_REJECTS_LOGITSPROCS)
|
| 171 |
+
logger.debug("Skipping logits processor loading because pooling models"
|
| 172 |
+
" do not support logits processors.")
|
| 173 |
+
return LogitsProcessors()
|
| 174 |
+
custom_logitsprocs_classes = _load_custom_logitsprocs(custom_logitsprocs)
|
| 175 |
+
return LogitsProcessors(
|
| 176 |
+
ctor(vllm_config, device, is_pin_memory) for ctor in itertools.chain(
|
| 177 |
+
BUILTIN_LOGITS_PROCESSORS, custom_logitsprocs_classes))
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
__all__ = [
|
| 181 |
+
"LogitsProcessor", "LogitBiasLogitsProcessor", "MinPLogitsProcessor",
|
| 182 |
+
"MinTokensLogitsProcessor", "BatchUpdate", "BatchUpdateBuilder",
|
| 183 |
+
"MoveDirectionality", "LogitsProcessors", "build_logitsprocs",
|
| 184 |
+
"STR_POOLING_REJECTS_LOGITSPROCS", "LOGITSPROCS_GROUP"
|
| 185 |
+
]
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/builtin.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from collections.abc import Sequence
|
| 4 |
+
from typing import TYPE_CHECKING, Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from vllm.v1.sample.logits_processor.interface import (BatchUpdate,
|
| 9 |
+
LogitsProcessor,
|
| 10 |
+
MoveDirectionality)
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from vllm.config import VllmConfig
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MinPLogitsProcessor(LogitsProcessor):
|
| 17 |
+
|
| 18 |
+
def __init__(self, vllm_config: "VllmConfig", device: torch.device,
|
| 19 |
+
is_pin_memory: bool):
|
| 20 |
+
max_num_reqs = vllm_config.scheduler_config.max_num_seqs
|
| 21 |
+
self.min_p_count: int = 0
|
| 22 |
+
|
| 23 |
+
self.min_p_cpu_tensor = torch.zeros((max_num_reqs, ),
|
| 24 |
+
dtype=torch.float32,
|
| 25 |
+
device="cpu",
|
| 26 |
+
pin_memory=is_pin_memory)
|
| 27 |
+
self.min_p_cpu = self.min_p_cpu_tensor.numpy()
|
| 28 |
+
|
| 29 |
+
self.use_double_tensor = torch.device(device).type != "cpu"
|
| 30 |
+
|
| 31 |
+
if self.use_double_tensor:
|
| 32 |
+
# Pre-allocated device tensor
|
| 33 |
+
self.min_p_device: torch.Tensor = torch.empty((max_num_reqs, ),
|
| 34 |
+
dtype=torch.float32,
|
| 35 |
+
device=device)
|
| 36 |
+
else:
|
| 37 |
+
self.min_p_device = self.min_p_cpu_tensor
|
| 38 |
+
# Current slice of the device tensor
|
| 39 |
+
self.min_p: torch.Tensor = self.min_p_device[:0]
|
| 40 |
+
|
| 41 |
+
def is_argmax_invariant(self) -> bool:
|
| 42 |
+
"""Min-p never impacts greedy sampling"""
|
| 43 |
+
return True
|
| 44 |
+
|
| 45 |
+
def get_min_p_by_index(self, index: int) -> float:
|
| 46 |
+
return float(self.min_p_cpu[index])
|
| 47 |
+
|
| 48 |
+
def update_state(self, batch_update: Optional[BatchUpdate]):
|
| 49 |
+
if not batch_update:
|
| 50 |
+
return
|
| 51 |
+
|
| 52 |
+
needs_update = False
|
| 53 |
+
# Process added requests.
|
| 54 |
+
for index, params, _, _ in batch_update.added:
|
| 55 |
+
min_p = params.min_p
|
| 56 |
+
if self.min_p_cpu[index] != min_p:
|
| 57 |
+
needs_update = True
|
| 58 |
+
self.min_p_cpu[index] = min_p
|
| 59 |
+
if min_p:
|
| 60 |
+
self.min_p_count += 1
|
| 61 |
+
|
| 62 |
+
if self.min_p_count:
|
| 63 |
+
# Process removed requests.
|
| 64 |
+
needs_update |= bool(batch_update.removed)
|
| 65 |
+
for index in batch_update.removed:
|
| 66 |
+
if self.min_p_cpu[index]:
|
| 67 |
+
self.min_p_count -= 1
|
| 68 |
+
|
| 69 |
+
# Process moved requests, unidirectional (a->b) and swap (a<->b)
|
| 70 |
+
for adx, bdx, direct in batch_update.moved:
|
| 71 |
+
change = (min_p_a :=
|
| 72 |
+
self.min_p_cpu[adx]) != (min_p_b :=
|
| 73 |
+
self.min_p_cpu[bdx])
|
| 74 |
+
needs_update |= change
|
| 75 |
+
if change:
|
| 76 |
+
self.min_p_cpu[bdx] = min_p_a
|
| 77 |
+
if direct == MoveDirectionality.SWAP:
|
| 78 |
+
self.min_p_cpu[adx] = min_p_b
|
| 79 |
+
|
| 80 |
+
# Update tensors if needed.
|
| 81 |
+
size = batch_update.batch_size
|
| 82 |
+
if self.min_p_count and (needs_update or self.min_p.shape[0] != size):
|
| 83 |
+
self.min_p = self.min_p_device[:size]
|
| 84 |
+
if self.use_double_tensor:
|
| 85 |
+
self.min_p.copy_(self.min_p_cpu_tensor[:size],
|
| 86 |
+
non_blocking=True)
|
| 87 |
+
self.min_p.unsqueeze_(1)
|
| 88 |
+
|
| 89 |
+
def apply(self, logits: torch.Tensor) -> torch.Tensor:
|
| 90 |
+
if not self.min_p_count:
|
| 91 |
+
return logits
|
| 92 |
+
|
| 93 |
+
# Convert logits to probability distribution
|
| 94 |
+
probability_values = torch.nn.functional.softmax(logits, dim=-1)
|
| 95 |
+
# Calculate maximum probabilities per sequence
|
| 96 |
+
max_probabilities = torch.amax(probability_values,
|
| 97 |
+
dim=-1,
|
| 98 |
+
keepdim=True)
|
| 99 |
+
# Adjust min_p
|
| 100 |
+
adjusted_min_p = max_probabilities.mul_(self.min_p)
|
| 101 |
+
# Identify valid tokens using threshold comparison
|
| 102 |
+
invalid_token_mask = probability_values < adjusted_min_p
|
| 103 |
+
# Apply mask using boolean indexing
|
| 104 |
+
logits[invalid_token_mask] = -float('inf')
|
| 105 |
+
return logits
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class LogitBiasLogitsProcessor(LogitsProcessor):
|
| 109 |
+
|
| 110 |
+
def __init__(self, _, device: torch.device, is_pin_memory: bool):
|
| 111 |
+
self.device = device
|
| 112 |
+
self.pin_memory = is_pin_memory
|
| 113 |
+
self.biases: dict[int, dict[int, float]] = {}
|
| 114 |
+
|
| 115 |
+
self.bias_tensor: torch.Tensor = torch.tensor(())
|
| 116 |
+
self.logits_slice = (self._device_tensor([], torch.int32),
|
| 117 |
+
self._device_tensor([], torch.int32))
|
| 118 |
+
|
| 119 |
+
def is_argmax_invariant(self) -> bool:
|
| 120 |
+
"""Logit bias can rebalance token probabilities and change the
|
| 121 |
+
outcome of argmax in greedy sampling."""
|
| 122 |
+
return False
|
| 123 |
+
|
| 124 |
+
def update_state(self, batch_update: Optional[BatchUpdate]):
|
| 125 |
+
if not batch_update:
|
| 126 |
+
return
|
| 127 |
+
|
| 128 |
+
needs_update: bool = False
|
| 129 |
+
# Process added requests.
|
| 130 |
+
for index, params, _, _ in batch_update.added:
|
| 131 |
+
if lb := params.logit_bias:
|
| 132 |
+
self.biases[index] = lb
|
| 133 |
+
needs_update = True
|
| 134 |
+
else:
|
| 135 |
+
# Drop biases metadata at batch index
|
| 136 |
+
if self.biases.pop(index, None) is not None:
|
| 137 |
+
# If a new request replaces an old request which
|
| 138 |
+
# specified biases, we should update processor tensors
|
| 139 |
+
needs_update = True
|
| 140 |
+
|
| 141 |
+
if self.biases:
|
| 142 |
+
# Process removed requests.
|
| 143 |
+
for index in batch_update.removed:
|
| 144 |
+
if self.biases.pop(index, None):
|
| 145 |
+
needs_update = True
|
| 146 |
+
|
| 147 |
+
# Process moved requests, unidirectional (a->b) and swap (a<->b)
|
| 148 |
+
for a_index, b_index, direct in batch_update.moved:
|
| 149 |
+
if direct == MoveDirectionality.UNIDIRECTIONAL:
|
| 150 |
+
if (a_entry := self.biases.pop(a_index, None)) is None:
|
| 151 |
+
if self.biases.pop(b_index, None) is not None:
|
| 152 |
+
needs_update = True
|
| 153 |
+
else:
|
| 154 |
+
self.biases[b_index] = a_entry
|
| 155 |
+
needs_update = True
|
| 156 |
+
else:
|
| 157 |
+
a_entry = self.biases.pop(a_index, None)
|
| 158 |
+
if (b_entry := self.biases.pop(b_index, None)) is not None:
|
| 159 |
+
self.biases[a_index] = b_entry
|
| 160 |
+
needs_update = True
|
| 161 |
+
if a_entry is not None:
|
| 162 |
+
self.biases[b_index] = a_entry
|
| 163 |
+
needs_update = True
|
| 164 |
+
|
| 165 |
+
# Update tensors if needed.
|
| 166 |
+
if needs_update:
|
| 167 |
+
reqs, tok_ids, biases = [], [], []
|
| 168 |
+
for req, lb in self.biases.items():
|
| 169 |
+
reqs.extend([req] * len(lb))
|
| 170 |
+
tok_ids.extend(lb.keys())
|
| 171 |
+
biases.extend(lb.values())
|
| 172 |
+
|
| 173 |
+
self.bias_tensor = self._device_tensor(biases, torch.float32)
|
| 174 |
+
self.logits_slice = (self._device_tensor(reqs, torch.int32),
|
| 175 |
+
self._device_tensor(tok_ids, torch.int32))
|
| 176 |
+
|
| 177 |
+
def _device_tensor(self, data: list, dtype: torch.dtype) -> torch.Tensor:
|
| 178 |
+
return (torch.tensor(data,
|
| 179 |
+
device="cpu",
|
| 180 |
+
dtype=dtype,
|
| 181 |
+
pin_memory=self.pin_memory).to(device=self.device,
|
| 182 |
+
non_blocking=True))
|
| 183 |
+
|
| 184 |
+
def apply(self, logits: torch.Tensor) -> torch.Tensor:
|
| 185 |
+
if self.biases:
|
| 186 |
+
logits[self.logits_slice] += self.bias_tensor
|
| 187 |
+
return logits
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class MinTokensLogitsProcessor(LogitsProcessor):
|
| 191 |
+
|
| 192 |
+
def __init__(self, vllm_config: "VllmConfig", device: torch.device,
|
| 193 |
+
is_pin_memory: bool):
|
| 194 |
+
# index -> (min_toks, output_token_ids, stop_token_ids)
|
| 195 |
+
self.device = device
|
| 196 |
+
self.pin_memory = is_pin_memory
|
| 197 |
+
self.min_toks: dict[int, tuple[int, Sequence[int], set[int]]] = {}
|
| 198 |
+
|
| 199 |
+
# (req_idx_tensor,eos_tok_id_tensor)
|
| 200 |
+
self.logits_slice: tuple[torch.Tensor,
|
| 201 |
+
torch.Tensor] = (self._device_tensor(
|
| 202 |
+
[], torch.int32),
|
| 203 |
+
self._device_tensor(
|
| 204 |
+
[], torch.int32))
|
| 205 |
+
|
| 206 |
+
def is_argmax_invariant(self) -> bool:
|
| 207 |
+
"""By censoring stop tokens, min-tokens can change the outcome
|
| 208 |
+
of the argmax operation in greedy sampling."""
|
| 209 |
+
return False
|
| 210 |
+
|
| 211 |
+
def update_state(self, batch_update: Optional[BatchUpdate]):
|
| 212 |
+
needs_update = False
|
| 213 |
+
|
| 214 |
+
if batch_update:
|
| 215 |
+
# Process added requests.
|
| 216 |
+
for index, params, _, output_tok_ids in batch_update.added:
|
| 217 |
+
if ((min_tokens := params.min_tokens)
|
| 218 |
+
and len(output_tok_ids) < min_tokens):
|
| 219 |
+
# Replace request metadata at batch index
|
| 220 |
+
self.min_toks[index] = (min_tokens, output_tok_ids,
|
| 221 |
+
params.all_stop_token_ids)
|
| 222 |
+
needs_update = True
|
| 223 |
+
else:
|
| 224 |
+
# Drop min_toks metadata at batch index
|
| 225 |
+
if self.min_toks.pop(index, None) is not None:
|
| 226 |
+
# If a new request replaces an old request which
|
| 227 |
+
# specified min_toks, we should update processor tensors
|
| 228 |
+
needs_update = True
|
| 229 |
+
|
| 230 |
+
if self.min_toks:
|
| 231 |
+
# Process removed requests.
|
| 232 |
+
for index in batch_update.removed:
|
| 233 |
+
if self.min_toks.pop(index, None):
|
| 234 |
+
needs_update = True
|
| 235 |
+
|
| 236 |
+
# Process moved requests, unidirectional (a->b) and
|
| 237 |
+
# swapped (a<->b)
|
| 238 |
+
for a_index, b_index, direct in batch_update.moved:
|
| 239 |
+
if direct == MoveDirectionality.UNIDIRECTIONAL:
|
| 240 |
+
if (a_entry := self.min_toks.pop(a_index,
|
| 241 |
+
None)) is None:
|
| 242 |
+
if self.min_toks.pop(b_index, None) is not None:
|
| 243 |
+
needs_update = True
|
| 244 |
+
else:
|
| 245 |
+
self.min_toks[b_index] = a_entry
|
| 246 |
+
needs_update = True
|
| 247 |
+
else:
|
| 248 |
+
a_entry = self.min_toks.pop(a_index, None)
|
| 249 |
+
if (b_entry := self.min_toks.pop(b_index,
|
| 250 |
+
None)) is not None:
|
| 251 |
+
self.min_toks[a_index] = b_entry
|
| 252 |
+
needs_update = True
|
| 253 |
+
if a_entry is not None:
|
| 254 |
+
self.min_toks[b_index] = a_entry
|
| 255 |
+
needs_update = True
|
| 256 |
+
|
| 257 |
+
if self.min_toks:
|
| 258 |
+
# Check for any requests that have attained their min tokens.
|
| 259 |
+
to_remove = tuple(index for index, (min_toks, out_tok_ids,
|
| 260 |
+
_) in self.min_toks.items()
|
| 261 |
+
if len(out_tok_ids) >= min_toks)
|
| 262 |
+
if to_remove:
|
| 263 |
+
needs_update = True
|
| 264 |
+
for index in to_remove:
|
| 265 |
+
del self.min_toks[index]
|
| 266 |
+
|
| 267 |
+
# Update tensors if needed.
|
| 268 |
+
if needs_update:
|
| 269 |
+
reqs: list[int] = []
|
| 270 |
+
tok_ids: list[int] = []
|
| 271 |
+
for req, (_, _, stop_tok_ids) in self.min_toks.items():
|
| 272 |
+
reqs.extend([req] * len(stop_tok_ids))
|
| 273 |
+
tok_ids.extend(stop_tok_ids)
|
| 274 |
+
|
| 275 |
+
self.logits_slice = (self._device_tensor(reqs, torch.int32),
|
| 276 |
+
self._device_tensor(tok_ids, torch.int32))
|
| 277 |
+
|
| 278 |
+
def _device_tensor(self, data: list, dtype: torch.dtype) -> torch.Tensor:
|
| 279 |
+
return (torch.tensor(data,
|
| 280 |
+
device="cpu",
|
| 281 |
+
dtype=dtype,
|
| 282 |
+
pin_memory=self.pin_memory).to(device=self.device,
|
| 283 |
+
non_blocking=True))
|
| 284 |
+
|
| 285 |
+
def apply(self, logits: torch.Tensor) -> torch.Tensor:
|
| 286 |
+
if self.min_toks:
|
| 287 |
+
# Inhibit EOS token for requests which have not reached min length
|
| 288 |
+
logits[self.logits_slice] = -float("inf")
|
| 289 |
+
return logits
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/interface.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from collections.abc import Sequence
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from enum import Enum, auto
|
| 7 |
+
from typing import TYPE_CHECKING, Optional
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from vllm import SamplingParams
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from vllm.config import VllmConfig
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MoveDirectionality(Enum):
|
| 18 |
+
# One-way i1->i2 req move within batch
|
| 19 |
+
UNIDIRECTIONAL = auto()
|
| 20 |
+
# Two-way i1<->i2 req swap within batch
|
| 21 |
+
SWAP = auto()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# (index, params, prompt_tok_ids, output_tok_ids) tuples for new
|
| 25 |
+
# requests added to the batch.
|
| 26 |
+
AddedRequest = tuple[int, SamplingParams, list[int], list[int]]
|
| 27 |
+
|
| 28 |
+
# (index 1, index 2, directionality) tuples representing
|
| 29 |
+
# one-way moves or two-way swaps of requests in batch
|
| 30 |
+
MovedRequest = tuple[int, int, MoveDirectionality]
|
| 31 |
+
|
| 32 |
+
# Batch indices of any removed requests.
|
| 33 |
+
RemovedRequest = int
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass(frozen=True)
|
| 37 |
+
class BatchUpdate:
|
| 38 |
+
"""Persistent batch state change info for logitsprocs"""
|
| 39 |
+
batch_size: int # Current num reqs in batch
|
| 40 |
+
|
| 41 |
+
# Metadata for requests added to, removed from, and moved
|
| 42 |
+
# within the persistent batch.
|
| 43 |
+
#
|
| 44 |
+
# Key assumption: the `output_tok_ids` list (which is an element of each
|
| 45 |
+
# tuple in `added`) is a reference to the request's running output tokens
|
| 46 |
+
# list; via this reference, the logits processors always see the latest
|
| 47 |
+
# list of generated output tokens
|
| 48 |
+
removed: Sequence[RemovedRequest]
|
| 49 |
+
moved: Sequence[MovedRequest]
|
| 50 |
+
added: Sequence[AddedRequest]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class LogitsProcessor(ABC):
|
| 54 |
+
|
| 55 |
+
@abstractmethod
|
| 56 |
+
def __init__(self, vllm_config: "VllmConfig", device: torch.device,
|
| 57 |
+
is_pin_memory: bool) -> None:
|
| 58 |
+
raise NotImplementedError
|
| 59 |
+
|
| 60 |
+
@abstractmethod
|
| 61 |
+
def apply(self, logits: torch.Tensor) -> torch.Tensor:
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
@abstractmethod
|
| 65 |
+
def is_argmax_invariant(self) -> bool:
|
| 66 |
+
"""True if logits processor has no impact on the
|
| 67 |
+
argmax computation in greedy sampling.
|
| 68 |
+
NOTE: may or may not have the same value for all
|
| 69 |
+
instances of a given LogitsProcessor subclass,
|
| 70 |
+
depending on subclass implementation.
|
| 71 |
+
"""
|
| 72 |
+
raise NotImplementedError
|
| 73 |
+
|
| 74 |
+
@abstractmethod
|
| 75 |
+
def update_state(
|
| 76 |
+
self,
|
| 77 |
+
batch_update: Optional["BatchUpdate"],
|
| 78 |
+
) -> None:
|
| 79 |
+
"""Called when there are new output tokens, prior
|
| 80 |
+
to each forward pass.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
batch_update is non-None iff there have been
|
| 84 |
+
changes to the batch makeup.
|
| 85 |
+
"""
|
| 86 |
+
raise NotImplementedError
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/state.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from collections.abc import Iterator
|
| 4 |
+
from itertools import chain
|
| 5 |
+
from typing import TYPE_CHECKING, Optional
|
| 6 |
+
|
| 7 |
+
from vllm.v1.sample.logits_processor.interface import (AddedRequest,
|
| 8 |
+
BatchUpdate,
|
| 9 |
+
MovedRequest,
|
| 10 |
+
RemovedRequest)
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from vllm.v1.sample.logits_processor.interface import LogitsProcessor
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class BatchUpdateBuilder:
|
| 17 |
+
"""Helps track persistent batch state changes and build
|
| 18 |
+
a batch update data structure for logitsprocs
|
| 19 |
+
Assumptions:
|
| 20 |
+
* All information about requests removed from persistent batch
|
| 21 |
+
during a step is aggregated in self._removed through calls to
|
| 22 |
+
self.removed_append() at the beginning of a step. This must happen
|
| 23 |
+
before the first time that self.removed, self.pop_removed()
|
| 24 |
+
or self.peek_removed() are invoked in a given step
|
| 25 |
+
* After the first time that self.removed, self.pop_removed()
|
| 26 |
+
or self.peek_removed() are read in a step, no new removals
|
| 27 |
+
are registered using self.removed_append()
|
| 28 |
+
* Elements of self._removed are never directly modified, added or
|
| 29 |
+
removed (i.e. modification is only via self.removed_append() and
|
| 30 |
+
self.pop_removed())
|
| 31 |
+
Guarantees under above assumptions:
|
| 32 |
+
* self.removed is always sorted in descending order
|
| 33 |
+
* self.pop_removed() and self.peek_removed() both return
|
| 34 |
+
the lowest removed request index in the current step
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
_removed: list[RemovedRequest]
|
| 38 |
+
_is_removed_sorted: bool
|
| 39 |
+
moved: list[MovedRequest]
|
| 40 |
+
added: list[AddedRequest]
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
removed: Optional[list[RemovedRequest]] = None,
|
| 45 |
+
moved: Optional[list[MovedRequest]] = None,
|
| 46 |
+
added: Optional[list[AddedRequest]] = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
self._removed = removed or []
|
| 49 |
+
self.moved = moved or []
|
| 50 |
+
self.added = added or []
|
| 51 |
+
self._is_removed_sorted = False
|
| 52 |
+
|
| 53 |
+
def _ensure_removed_sorted(self) -> None:
|
| 54 |
+
"""Sort removed request indices in
|
| 55 |
+
descending order.
|
| 56 |
+
Idempotent after first call in a
|
| 57 |
+
given step, until reset.
|
| 58 |
+
"""
|
| 59 |
+
if not self._is_removed_sorted:
|
| 60 |
+
self._removed.sort(reverse=True)
|
| 61 |
+
self._is_removed_sorted = True
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def removed(self) -> list[RemovedRequest]:
|
| 65 |
+
"""Removed request indices sorted in
|
| 66 |
+
descending order"""
|
| 67 |
+
self._ensure_removed_sorted()
|
| 68 |
+
return self._removed
|
| 69 |
+
|
| 70 |
+
def removed_append(self, index: int) -> None:
|
| 71 |
+
"""Register the removal of a request from the persistent batch.
|
| 72 |
+
|
| 73 |
+
Must not be called after the first time self.removed,
|
| 74 |
+
self.pop_removed() or self.peek_removed() are invoked.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
index: request index
|
| 78 |
+
"""
|
| 79 |
+
if self._is_removed_sorted:
|
| 80 |
+
raise RuntimeError("Cannot register new removed request after"
|
| 81 |
+
" self.removed has been read.")
|
| 82 |
+
self._removed.append(index)
|
| 83 |
+
|
| 84 |
+
def has_removed(self) -> bool:
|
| 85 |
+
return bool(self._removed)
|
| 86 |
+
|
| 87 |
+
def peek_removed(self) -> Optional[int]:
|
| 88 |
+
"""Return lowest removed request index"""
|
| 89 |
+
if self.has_removed():
|
| 90 |
+
self._ensure_removed_sorted()
|
| 91 |
+
return self._removed[-1]
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
+
def pop_removed(self) -> Optional[int]:
|
| 95 |
+
"""Pop lowest removed request index"""
|
| 96 |
+
if self.has_removed():
|
| 97 |
+
self._ensure_removed_sorted()
|
| 98 |
+
return self._removed.pop()
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
def _is_update(self) -> bool:
|
| 102 |
+
"""True if there is a batch state change"""
|
| 103 |
+
return any((self._removed, self.moved, self.added))
|
| 104 |
+
|
| 105 |
+
def get_and_reset(self, batch_size: int) -> Optional[BatchUpdate]:
|
| 106 |
+
"""Generate a logitsprocs batch update data structure and reset
|
| 107 |
+
internal batch update builder state.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
batch_size: current persistent batch size
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
Frozen logitsprocs batch update instance; `None` if no updates
|
| 114 |
+
"""
|
| 115 |
+
# Reset removal-sorting logic
|
| 116 |
+
self._is_removed_sorted = False
|
| 117 |
+
if not self._is_update():
|
| 118 |
+
# No update; short-circuit
|
| 119 |
+
return None
|
| 120 |
+
# Build batch state update
|
| 121 |
+
batch_update = BatchUpdate(
|
| 122 |
+
batch_size=batch_size,
|
| 123 |
+
removed=self._removed,
|
| 124 |
+
moved=self.moved,
|
| 125 |
+
added=self.added,
|
| 126 |
+
)
|
| 127 |
+
self._removed = []
|
| 128 |
+
self.moved = []
|
| 129 |
+
self.added = []
|
| 130 |
+
return batch_update
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class LogitsProcessors:
|
| 134 |
+
"""Encapsulates initialized logitsproc objects."""
|
| 135 |
+
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
logitsprocs: Optional[Iterator["LogitsProcessor"]] = None) -> None:
|
| 139 |
+
self.argmax_invariant: list[LogitsProcessor] = []
|
| 140 |
+
self.non_argmax_invariant: list[LogitsProcessor] = []
|
| 141 |
+
if logitsprocs:
|
| 142 |
+
for logitproc in logitsprocs:
|
| 143 |
+
(self.argmax_invariant if logitproc.is_argmax_invariant() else
|
| 144 |
+
self.non_argmax_invariant).append(logitproc)
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def all(self) -> Iterator["LogitsProcessor"]:
|
| 148 |
+
"""Iterator over all logits processors."""
|
| 149 |
+
return chain(self.argmax_invariant, self.non_argmax_invariant)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/bad_words.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
_SMALLEST_LOGIT = float("-inf")
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _apply_bad_words_single_batch(
|
| 10 |
+
logits: torch.Tensor,
|
| 11 |
+
bad_words_token_ids: list[list[int]],
|
| 12 |
+
past_tokens_ids: list[int],
|
| 13 |
+
) -> None:
|
| 14 |
+
for bad_word_ids in bad_words_token_ids:
|
| 15 |
+
if len(bad_word_ids) > len(past_tokens_ids) + 1:
|
| 16 |
+
continue
|
| 17 |
+
|
| 18 |
+
prefix_length = len(bad_word_ids) - 1
|
| 19 |
+
last_token_id = bad_word_ids[-1]
|
| 20 |
+
if prefix_length > 0:
|
| 21 |
+
actual_prefix = past_tokens_ids[-prefix_length:]
|
| 22 |
+
else:
|
| 23 |
+
actual_prefix = []
|
| 24 |
+
expected_prefix = bad_word_ids[:prefix_length]
|
| 25 |
+
|
| 26 |
+
assert len(actual_prefix) == len(expected_prefix)
|
| 27 |
+
|
| 28 |
+
if actual_prefix == expected_prefix:
|
| 29 |
+
logits[last_token_id] = _SMALLEST_LOGIT
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def apply_bad_words(
|
| 33 |
+
logits: torch.Tensor,
|
| 34 |
+
bad_words_token_ids: dict[int, list[list[int]]],
|
| 35 |
+
past_tokens_ids: list[list[int]],
|
| 36 |
+
) -> None:
|
| 37 |
+
for i, bad_words_ids in bad_words_token_ids.items():
|
| 38 |
+
_apply_bad_words_single_batch(logits[i], bad_words_ids,
|
| 39 |
+
past_tokens_ids[i])
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/logprobs.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
"""Some utilities for logprobs, including logits."""
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from vllm.platforms import current_platform
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@torch.compile(dynamic=True, backend=current_platform.simple_compile_backend)
|
| 11 |
+
def batched_count_greater_than(x: torch.Tensor,
|
| 12 |
+
values: torch.Tensor) -> torch.Tensor:
|
| 13 |
+
"""
|
| 14 |
+
Counts elements in each row of x that are greater than the corresponding
|
| 15 |
+
value in values. Use torch.compile to generate an optimized kernel for
|
| 16 |
+
this function. otherwise, it will create additional copies of the input
|
| 17 |
+
tensors and cause memory issues.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
x (torch.Tensor): A 2D tensor of shape (batch_size, n_elements).
|
| 21 |
+
values (torch.Tensor): A 2D tensor of shape (batch_size, 1).
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
torch.Tensor: A 1D tensor of shape (batch_size,) with the counts.
|
| 25 |
+
"""
|
| 26 |
+
return (x >= values).sum(-1)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/penalties.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from vllm.model_executor.layers.utils import apply_penalties
|
| 7 |
+
from vllm.utils import is_pin_memory_available, make_tensor_with_pad
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def apply_all_penalties(
|
| 11 |
+
logits: torch.Tensor,
|
| 12 |
+
prompt_token_ids: torch.Tensor,
|
| 13 |
+
presence_penalties: torch.Tensor,
|
| 14 |
+
frequency_penalties: torch.Tensor,
|
| 15 |
+
repetition_penalties: torch.Tensor,
|
| 16 |
+
output_token_ids: list[list[int]],
|
| 17 |
+
) -> torch.Tensor:
|
| 18 |
+
"""
|
| 19 |
+
Applies presence, frequency and repetition penalties to the logits.
|
| 20 |
+
"""
|
| 21 |
+
_, vocab_size = logits.shape
|
| 22 |
+
output_tokens_t = _convert_to_tensors(output_token_ids, vocab_size,
|
| 23 |
+
logits.device)
|
| 24 |
+
return apply_penalties(logits, prompt_token_ids, output_tokens_t,
|
| 25 |
+
presence_penalties, frequency_penalties,
|
| 26 |
+
repetition_penalties)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _convert_to_tensors(output_token_ids: list[list[int]], vocab_size: int,
|
| 30 |
+
device: torch.device) -> torch.Tensor:
|
| 31 |
+
"""
|
| 32 |
+
Convert the different list data structures to tensors.
|
| 33 |
+
"""
|
| 34 |
+
output_tokens_tensor = make_tensor_with_pad(
|
| 35 |
+
output_token_ids,
|
| 36 |
+
# Use the value of vocab_size as a pad since we don't have a
|
| 37 |
+
# token_id of this value.
|
| 38 |
+
pad=vocab_size,
|
| 39 |
+
device="cpu",
|
| 40 |
+
dtype=torch.int64,
|
| 41 |
+
pin_memory=is_pin_memory_available(),
|
| 42 |
+
)
|
| 43 |
+
return output_tokens_tensor.to(device, non_blocking=True)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/topk_topp_sampler.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from packaging import version
|
| 9 |
+
|
| 10 |
+
from vllm import envs
|
| 11 |
+
from vllm.logger import init_logger
|
| 12 |
+
from vllm.platforms import current_platform
|
| 13 |
+
|
| 14 |
+
logger = init_logger(__name__)
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
import flashinfer.sampling
|
| 18 |
+
is_flashinfer_available = True
|
| 19 |
+
except ImportError:
|
| 20 |
+
is_flashinfer_available = False
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TopKTopPSampler(nn.Module):
|
| 24 |
+
"""
|
| 25 |
+
Module that performs optional top-k and top-p filtering followed by
|
| 26 |
+
weighted random sampling of logits.
|
| 27 |
+
|
| 28 |
+
Implementations may update the logits tensor in-place.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self):
|
| 32 |
+
super().__init__()
|
| 33 |
+
if current_platform.is_cuda():
|
| 34 |
+
if is_flashinfer_available:
|
| 35 |
+
flashinfer_version = flashinfer.__version__
|
| 36 |
+
if version.parse(flashinfer_version) < version.parse("0.2.3"):
|
| 37 |
+
logger.warning_once(
|
| 38 |
+
"FlashInfer version >= 0.2.3 required. "
|
| 39 |
+
"Falling back to default sampling implementation.")
|
| 40 |
+
self.forward = self.forward_native
|
| 41 |
+
elif envs.VLLM_USE_FLASHINFER_SAMPLER is not False:
|
| 42 |
+
# NOTE(woosuk): The V0 sampler doesn't use FlashInfer for
|
| 43 |
+
# sampling unless VLLM_USE_FLASHINFER_SAMPLER=1 (i.e., by
|
| 44 |
+
# default it is unused). For backward compatibility, we set
|
| 45 |
+
# `VLLM_USE_FLASHINFER_SAMPLER` as None by default and
|
| 46 |
+
# interpret it differently in V0 and V1 samplers: In V0,
|
| 47 |
+
# None means False, while in V1, None means True. This is
|
| 48 |
+
# why we use the condition
|
| 49 |
+
# `envs.VLLM_USE_FLASHINFER_SAMPLER is not False` here.
|
| 50 |
+
logger.info_once(
|
| 51 |
+
"Using FlashInfer for top-p & top-k sampling.")
|
| 52 |
+
self.forward = self.forward_cuda
|
| 53 |
+
else:
|
| 54 |
+
logger.warning_once(
|
| 55 |
+
"FlashInfer is available, but it is not enabled. "
|
| 56 |
+
"Falling back to the PyTorch-native implementation of "
|
| 57 |
+
"top-p & top-k sampling. For the best performance, "
|
| 58 |
+
"please set VLLM_USE_FLASHINFER_SAMPLER=1.")
|
| 59 |
+
self.forward = self.forward_native
|
| 60 |
+
else:
|
| 61 |
+
logger.warning_once(
|
| 62 |
+
"FlashInfer is not available. Falling back to the PyTorch-"
|
| 63 |
+
"native implementation of top-p & top-k sampling. For the "
|
| 64 |
+
"best performance, please install FlashInfer.")
|
| 65 |
+
self.forward = self.forward_native
|
| 66 |
+
elif current_platform.is_tpu():
|
| 67 |
+
self.forward = self.forward_tpu
|
| 68 |
+
else:
|
| 69 |
+
self.forward = self.forward_native
|
| 70 |
+
|
| 71 |
+
def forward_native(
|
| 72 |
+
self,
|
| 73 |
+
logits: torch.Tensor,
|
| 74 |
+
generators: dict[int, torch.Generator],
|
| 75 |
+
k: Optional[torch.Tensor],
|
| 76 |
+
p: Optional[torch.Tensor],
|
| 77 |
+
) -> torch.Tensor:
|
| 78 |
+
"""
|
| 79 |
+
PyTorch-native implementation of top-k and top-p sampling.
|
| 80 |
+
|
| 81 |
+
The logits tensor may be updated in-place.
|
| 82 |
+
"""
|
| 83 |
+
logits = apply_top_k_top_p(logits, k, p)
|
| 84 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 85 |
+
return random_sample(probs, generators)
|
| 86 |
+
|
| 87 |
+
def forward_cuda(
|
| 88 |
+
self,
|
| 89 |
+
logits: torch.Tensor,
|
| 90 |
+
generators: dict[int, torch.Generator],
|
| 91 |
+
k: Optional[torch.Tensor],
|
| 92 |
+
p: Optional[torch.Tensor],
|
| 93 |
+
) -> torch.Tensor:
|
| 94 |
+
"""More optimized implementation for top-k and top-p sampling."""
|
| 95 |
+
if k is None and p is None:
|
| 96 |
+
# We prefer `random_sample` over `flashinfer_sample` when sorting is
|
| 97 |
+
# not needed. This is because `random_sample` does not require
|
| 98 |
+
# CPU-GPU synchronization while `flashinfer_sample` does.
|
| 99 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 100 |
+
return random_sample(probs, generators)
|
| 101 |
+
if generators:
|
| 102 |
+
logger.warning_once("FlashInfer 0.2.3+ does not support "
|
| 103 |
+
"per-request generators. Falling back to "
|
| 104 |
+
"PyTorch-native implementation.")
|
| 105 |
+
return self.forward_native(logits, generators, k, p)
|
| 106 |
+
# flashinfer sampling functions expect contiguous logits.
|
| 107 |
+
# In flex_attn/triton_attn fp32 inference, logits can be non-contiguous
|
| 108 |
+
# because of slicing operation in logits_processor.
|
| 109 |
+
return flashinfer_sample(logits.contiguous(), k, p, generators)
|
| 110 |
+
|
| 111 |
+
def forward_tpu(
|
| 112 |
+
self,
|
| 113 |
+
logits: torch.Tensor,
|
| 114 |
+
generators: dict[int, torch.Generator],
|
| 115 |
+
k: Optional[torch.Tensor],
|
| 116 |
+
p: Optional[torch.Tensor],
|
| 117 |
+
) -> torch.Tensor:
|
| 118 |
+
logits = apply_top_k_top_p_tpu(logits, k, p)
|
| 119 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 120 |
+
return random_sample(probs, generators)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def apply_top_k_top_p_tpu(
|
| 124 |
+
logits: torch.Tensor,
|
| 125 |
+
k: torch.Tensor,
|
| 126 |
+
p: torch.Tensor,
|
| 127 |
+
) -> torch.Tensor:
|
| 128 |
+
"""
|
| 129 |
+
Apply top-k and top-p optimized for TPU.
|
| 130 |
+
|
| 131 |
+
This algorithm avoids using torch.scatter which is extremely slow on TPU.
|
| 132 |
+
This is achieved by finding a "cut-off" element in the original logit, and
|
| 133 |
+
after thresholding the logit using this cut-off, the remaining elements
|
| 134 |
+
shall constitute the top-p set.
|
| 135 |
+
|
| 136 |
+
Note: in the case of tie (i.e. multipple cut-off elements present in the
|
| 137 |
+
logit), all tie elements are included in the top-p set. In other words,
|
| 138 |
+
this function does not break ties. Instead, these tie tokens have equal
|
| 139 |
+
chance of being chosen during final sampling, so we can consider the tie
|
| 140 |
+
being broken then.
|
| 141 |
+
"""
|
| 142 |
+
probs = logits.softmax(dim=-1)
|
| 143 |
+
probs_sort, _ = probs.sort(dim=-1, descending=False)
|
| 144 |
+
|
| 145 |
+
if k is not None:
|
| 146 |
+
top_k_count = probs_sort.size(1) - k.to(torch.long) # shape: (batch, )
|
| 147 |
+
top_k_count = top_k_count.unsqueeze(dim=1)
|
| 148 |
+
top_k_cutoff = probs_sort.gather(-1, top_k_count)
|
| 149 |
+
|
| 150 |
+
# Make sure the no top-k rows are no-op.
|
| 151 |
+
no_top_k_mask = (k == logits.shape[1]).unsqueeze(dim=1)
|
| 152 |
+
top_k_cutoff.masked_fill_(no_top_k_mask, -float("inf"))
|
| 153 |
+
|
| 154 |
+
elements_to_discard = probs < top_k_cutoff
|
| 155 |
+
logits.masked_fill_(elements_to_discard, -float("inf"))
|
| 156 |
+
|
| 157 |
+
if p is not None:
|
| 158 |
+
cumprob = torch.cumsum(probs_sort, dim=-1)
|
| 159 |
+
top_p_mask = cumprob <= 1 - p.unsqueeze(dim=1)
|
| 160 |
+
top_p_mask[:, -1] = False # at least one
|
| 161 |
+
|
| 162 |
+
top_p_count = top_p_mask.sum(dim=-1).unsqueeze(1)
|
| 163 |
+
top_p_cutoff = probs_sort.gather(-1, top_p_count)
|
| 164 |
+
elements_to_discard = probs < top_p_cutoff
|
| 165 |
+
logits.masked_fill_(elements_to_discard, -float("inf"))
|
| 166 |
+
|
| 167 |
+
return logits
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def apply_top_k_top_p(
|
| 171 |
+
logits: torch.Tensor,
|
| 172 |
+
k: Optional[torch.Tensor],
|
| 173 |
+
p: Optional[torch.Tensor],
|
| 174 |
+
) -> torch.Tensor:
|
| 175 |
+
"""Apply top-k and top-p masks to the logits.
|
| 176 |
+
|
| 177 |
+
If a top-p is used, this function will sort the logits tensor,
|
| 178 |
+
which can be slow for large batches.
|
| 179 |
+
|
| 180 |
+
The logits tensor may be updated in-place.
|
| 181 |
+
"""
|
| 182 |
+
if p is None:
|
| 183 |
+
if k is None:
|
| 184 |
+
return logits
|
| 185 |
+
|
| 186 |
+
# Avoid sorting vocab for top-k only case.
|
| 187 |
+
return apply_top_k_only(logits, k)
|
| 188 |
+
|
| 189 |
+
logits_sort, logits_idx = logits.sort(dim=-1, descending=False)
|
| 190 |
+
|
| 191 |
+
if k is not None:
|
| 192 |
+
# Apply top-k.
|
| 193 |
+
top_k_mask = logits_sort.size(1) - k.to(torch.long) # shape: B
|
| 194 |
+
# Get all the top_k values.
|
| 195 |
+
top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1))
|
| 196 |
+
top_k_mask = logits_sort < top_k_mask
|
| 197 |
+
logits_sort.masked_fill_(top_k_mask, -float("inf"))
|
| 198 |
+
|
| 199 |
+
if p is not None:
|
| 200 |
+
# Apply top-p.
|
| 201 |
+
probs_sort = logits_sort.softmax(dim=-1)
|
| 202 |
+
probs_sum = torch.cumsum(probs_sort, dim=-1, out=probs_sort)
|
| 203 |
+
top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1)
|
| 204 |
+
# at least one
|
| 205 |
+
top_p_mask[:, -1] = False
|
| 206 |
+
logits_sort.masked_fill_(top_p_mask, -float("inf"))
|
| 207 |
+
|
| 208 |
+
# Re-sort the probabilities.
|
| 209 |
+
logits = logits_sort.scatter(dim=-1, index=logits_idx, src=logits_sort)
|
| 210 |
+
return logits
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def apply_top_k_only(
|
| 214 |
+
logits: torch.Tensor,
|
| 215 |
+
k: torch.Tensor,
|
| 216 |
+
) -> torch.Tensor:
|
| 217 |
+
"""
|
| 218 |
+
Apply top-k mask to the logits.
|
| 219 |
+
|
| 220 |
+
This implementation doesn't involve sorting the entire vocab.
|
| 221 |
+
|
| 222 |
+
The logits tensor may be updated in-place.
|
| 223 |
+
"""
|
| 224 |
+
no_top_k_mask = k == logits.shape[1]
|
| 225 |
+
# Set non-top-k rows to 1 so that we can gather.
|
| 226 |
+
k = k.masked_fill(no_top_k_mask, 1)
|
| 227 |
+
max_top_k = k.max()
|
| 228 |
+
# topk.values tensor has shape [batch_size, max_top_k].
|
| 229 |
+
# Convert top k to 0-based index in range [0, max_top_k).
|
| 230 |
+
k_index = k.sub_(1).unsqueeze(1)
|
| 231 |
+
top_k_mask = logits.topk(max_top_k, dim=1).values.gather(1, k_index.long())
|
| 232 |
+
# Handle non-topk rows.
|
| 233 |
+
top_k_mask.masked_fill_(no_top_k_mask.unsqueeze(1), -float("inf"))
|
| 234 |
+
logits.masked_fill_(logits < top_k_mask, -float("inf"))
|
| 235 |
+
return logits
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def random_sample(
|
| 239 |
+
probs: torch.Tensor,
|
| 240 |
+
generators: dict[int, torch.Generator],
|
| 241 |
+
) -> torch.Tensor:
|
| 242 |
+
"""Randomly sample from the probabilities.
|
| 243 |
+
|
| 244 |
+
We use this function instead of torch.multinomial because torch.multinomial
|
| 245 |
+
causes CPU-GPU synchronization.
|
| 246 |
+
"""
|
| 247 |
+
q = torch.empty_like(probs)
|
| 248 |
+
# NOTE(woosuk): To batch-process the requests without their own seeds,
|
| 249 |
+
# which is the common case, we first assume that every request does
|
| 250 |
+
# not have its own seed. Then, we overwrite the values for the requests
|
| 251 |
+
# that have their own seeds.
|
| 252 |
+
if len(generators) != probs.shape[0]:
|
| 253 |
+
q.exponential_()
|
| 254 |
+
if generators:
|
| 255 |
+
# TODO(woosuk): This can be slow because we handle each request
|
| 256 |
+
# one by one. Optimize this.
|
| 257 |
+
for i, generator in generators.items():
|
| 258 |
+
q[i].exponential_(generator=generator)
|
| 259 |
+
return probs.div_(q).argmax(dim=-1).view(-1)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def flashinfer_sample(
|
| 263 |
+
logits: torch.Tensor,
|
| 264 |
+
k: Optional[torch.Tensor],
|
| 265 |
+
p: Optional[torch.Tensor],
|
| 266 |
+
generators: dict[int, torch.Generator],
|
| 267 |
+
) -> torch.Tensor:
|
| 268 |
+
"""Sample from the logits using FlashInfer.
|
| 269 |
+
|
| 270 |
+
Statistically, this function is equivalent to the `random_sample` function.
|
| 271 |
+
However, this function is faster because it avoids sorting the logits tensor
|
| 272 |
+
via rejection sampling.
|
| 273 |
+
|
| 274 |
+
NOTE: The outputs of this function do not necessarily match the outputs of
|
| 275 |
+
the `random_sample` function. It only guarantees that the outputs are
|
| 276 |
+
statistically equivalent.
|
| 277 |
+
|
| 278 |
+
NOTE: This function includes CPU-GPU synchronization, while `random_sample`
|
| 279 |
+
does not. Call this function at the end of the forward pass to minimize
|
| 280 |
+
the synchronization overhead.
|
| 281 |
+
"""
|
| 282 |
+
assert not (k is None and p is None)
|
| 283 |
+
if k is None:
|
| 284 |
+
# Top-p only.
|
| 285 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 286 |
+
next_token_ids = flashinfer.sampling.top_p_sampling_from_probs(
|
| 287 |
+
probs, p, deterministic=True)
|
| 288 |
+
elif p is None:
|
| 289 |
+
# Top-k only.
|
| 290 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 291 |
+
next_token_ids = flashinfer.sampling.top_k_sampling_from_probs(
|
| 292 |
+
probs, k, deterministic=True)
|
| 293 |
+
else:
|
| 294 |
+
# Both top-k and top-p.
|
| 295 |
+
next_token_ids = flashinfer.sampling.top_k_top_p_sampling_from_logits(
|
| 296 |
+
logits, k, p, deterministic=True)
|
| 297 |
+
|
| 298 |
+
return next_token_ids.view(-1)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__init__.py
ADDED
|
File without changes
|