Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/__init__.cpython-312.pyc +3 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/deep_gemm.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/flashinfer.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/jsontree.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/tensor_schema.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/block_pool.py +313 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/encoder_cache_manager.py +254 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_coordinator.py +393 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_manager.py +364 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_utils.py +1154 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/single_type_kv_cache_manager.py +567 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/__init__.py +201 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/async_llm.py +688 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/coordinator.py +357 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/core.py +1216 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/core_client.py +1344 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/detokenizer.py +297 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/exceptions.py +17 -0
.gitattributes
CHANGED
|
@@ -4617,3 +4617,4 @@ tool_server/.venv/lib/python3.12/site-packages/torch/_dynamo/__pycache__/trace_r
|
|
| 4617 |
tool_server/.venv/lib/python3.12/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 4618 |
tool_server/.venv/lib/python3.12/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 4619 |
tool_server/.venv/lib/python3.12/site-packages/vllm/third_party/__pycache__/pynvml.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 4617 |
tool_server/.venv/lib/python3.12/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 4618 |
tool_server/.venv/lib/python3.12/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 4619 |
tool_server/.venv/lib/python3.12/site-packages/vllm/third_party/__pycache__/pynvml.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 4620 |
+
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/__init__.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26ac70c0e77847f727e23f716716d4f44c94f8630c94b1592357d8cb6c849696
|
| 3 |
+
size 152048
|
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/deep_gemm.cpython-312.pyc
ADDED
|
Binary file (8.89 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/flashinfer.cpython-312.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/jsontree.cpython-312.pyc
ADDED
|
Binary file (3.29 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/utils/__pycache__/tensor_schema.cpython-312.pyc
ADDED
|
Binary file (9.86 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/block_pool.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from collections.abc import Iterable
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from vllm.distributed.kv_events import (AllBlocksCleared, BlockRemoved,
|
| 8 |
+
BlockStored, KVCacheEvent)
|
| 9 |
+
from vllm.logger import init_logger
|
| 10 |
+
from vllm.v1.core.kv_cache_utils import (BlockHash, BlockHashWithGroupId,
|
| 11 |
+
FreeKVCacheBlockQueue, KVCacheBlock)
|
| 12 |
+
from vllm.v1.request import Request
|
| 13 |
+
|
| 14 |
+
logger = init_logger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class BlockPool:
|
| 18 |
+
"""BlockPool that manages KVCacheBlocks.
|
| 19 |
+
It provides methods to allocate, free and cache the kv cache blocks. The
|
| 20 |
+
free_block_queue stores the free blocks in eviction order to enable
|
| 21 |
+
allocation, free, and cache eviction. The cached_block_hash_to_block
|
| 22 |
+
maps between block hash and cached block to support finding cached blocks
|
| 23 |
+
by their block hash.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
num_gpu_blocks: The number of blocks in the pool.
|
| 27 |
+
enable_caching: Whether to enable prefix caching.
|
| 28 |
+
enable_kv_cache_events: Whether to enable kv cache events.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
num_gpu_blocks: int,
|
| 34 |
+
enable_caching: bool,
|
| 35 |
+
enable_kv_cache_events: bool = False,
|
| 36 |
+
):
|
| 37 |
+
assert isinstance(num_gpu_blocks, int) and num_gpu_blocks > 0
|
| 38 |
+
self.num_gpu_blocks = num_gpu_blocks
|
| 39 |
+
self.enable_caching = enable_caching
|
| 40 |
+
# All kv-cache blocks.
|
| 41 |
+
self.blocks: list[KVCacheBlock] = [
|
| 42 |
+
KVCacheBlock(idx) for idx in range(num_gpu_blocks)
|
| 43 |
+
]
|
| 44 |
+
# Free block queue that constructs and manipulates a doubly linked
|
| 45 |
+
# list of free blocks (including eviction candidates when caching is
|
| 46 |
+
# enabled).
|
| 47 |
+
self.free_block_queue = FreeKVCacheBlockQueue(self.blocks)
|
| 48 |
+
|
| 49 |
+
# {block_hash: {block ID: block}}. A cached block is
|
| 50 |
+
# a full block with a block hash that can be used for prefix caching.
|
| 51 |
+
# The cached block may be used by running requests or in the
|
| 52 |
+
# free_block_queue that could potentially be evicted.
|
| 53 |
+
# NOTE: We currently don't de-duplicate the blocks in the cache,
|
| 54 |
+
# meaning that if a block becomes full and is cached, we don't check
|
| 55 |
+
# if there is already an identical block in the cache. This is because
|
| 56 |
+
# we want to make sure the allocated block IDs won't change so that
|
| 57 |
+
# block tables are append-only.
|
| 58 |
+
self.cached_block_hash_to_block: dict[BlockHashWithGroupId, dict[
|
| 59 |
+
int, KVCacheBlock]] = defaultdict(dict)
|
| 60 |
+
|
| 61 |
+
# To represent a placeholder block with block_id=0.
|
| 62 |
+
# The ref_cnt of null_block is not maintained, needs special care to
|
| 63 |
+
# avoid freeing it.
|
| 64 |
+
self.null_block = self.free_block_queue.popleft()
|
| 65 |
+
self.null_block.is_null = True
|
| 66 |
+
|
| 67 |
+
self.enable_kv_cache_events = enable_kv_cache_events
|
| 68 |
+
self.kv_event_queue: list[KVCacheEvent] = []
|
| 69 |
+
|
| 70 |
+
def get_cached_block(
|
| 71 |
+
self, block_hash: BlockHash,
|
| 72 |
+
kv_cache_group_ids: list[int]) -> Optional[list[KVCacheBlock]]:
|
| 73 |
+
"""Get the cached block by the block hash for each group in
|
| 74 |
+
`kv_cache_group_ids`, or None if cache miss for any group.
|
| 75 |
+
If there are duplicated blocks, we return the first block in the cache.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
block_hash: The hash value of the block.
|
| 79 |
+
kv_cache_group_ids: The ids of the KV cache groups.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
The cached blocks if exists, or None.
|
| 83 |
+
"""
|
| 84 |
+
cached_blocks = []
|
| 85 |
+
for group_id in kv_cache_group_ids:
|
| 86 |
+
cached_blocks_one_group = self.cached_block_hash_to_block.get(
|
| 87 |
+
BlockHashWithGroupId(block_hash, group_id))
|
| 88 |
+
if not cached_blocks_one_group:
|
| 89 |
+
return None
|
| 90 |
+
first_block = next(iter(cached_blocks_one_group.values()))
|
| 91 |
+
cached_blocks.append(first_block)
|
| 92 |
+
return cached_blocks
|
| 93 |
+
|
| 94 |
+
def cache_full_blocks(
|
| 95 |
+
self,
|
| 96 |
+
request: Request,
|
| 97 |
+
blocks: list[KVCacheBlock],
|
| 98 |
+
num_cached_blocks: int,
|
| 99 |
+
num_full_blocks: int,
|
| 100 |
+
block_size: int,
|
| 101 |
+
kv_cache_group_id: int,
|
| 102 |
+
) -> None:
|
| 103 |
+
"""Cache a list of full blocks for prefix caching.
|
| 104 |
+
This function takes a list of blocks that will have their block hash
|
| 105 |
+
metadata to be updated and cached. Given a request, it updates the
|
| 106 |
+
metadata for each block and caching it in the
|
| 107 |
+
`cached_block_hash_to_block`.
|
| 108 |
+
The block hashes values are computed by the Request object immediately
|
| 109 |
+
when it is created and when new tokens are appended.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
request: The request to cache the blocks.
|
| 113 |
+
blocks: All blocks in the request.
|
| 114 |
+
num_cached_blocks: The number of blocks that are already cached.
|
| 115 |
+
num_full_blocks: The number of blocks that are full and should
|
| 116 |
+
be cached after this function.
|
| 117 |
+
block_size: Number of tokens in each block.
|
| 118 |
+
kv_cache_group_id: The id of the KV cache group.
|
| 119 |
+
"""
|
| 120 |
+
if num_cached_blocks == num_full_blocks:
|
| 121 |
+
return
|
| 122 |
+
new_full_blocks = blocks[num_cached_blocks:num_full_blocks]
|
| 123 |
+
assert len(request.block_hashes) >= num_full_blocks
|
| 124 |
+
new_block_hashes = request.block_hashes[num_cached_blocks:]
|
| 125 |
+
|
| 126 |
+
new_hashes: Optional[list[int]] = ([] if self.enable_kv_cache_events
|
| 127 |
+
else None)
|
| 128 |
+
for i, blk in enumerate(new_full_blocks):
|
| 129 |
+
assert blk.block_hash is None
|
| 130 |
+
block_hash = new_block_hashes[i]
|
| 131 |
+
|
| 132 |
+
# Update and added the full block to the cache.
|
| 133 |
+
block_hash_with_group_id = BlockHashWithGroupId(
|
| 134 |
+
block_hash, kv_cache_group_id)
|
| 135 |
+
blk.block_hash = block_hash_with_group_id
|
| 136 |
+
self.cached_block_hash_to_block[block_hash_with_group_id][
|
| 137 |
+
blk.block_id] = blk
|
| 138 |
+
if new_hashes is not None:
|
| 139 |
+
new_hashes.append(block_hash.hash_value)
|
| 140 |
+
|
| 141 |
+
if self.enable_kv_cache_events:
|
| 142 |
+
if num_cached_blocks == 0:
|
| 143 |
+
parent_block_hash = None
|
| 144 |
+
else:
|
| 145 |
+
parent_block = blocks[num_cached_blocks - 1]
|
| 146 |
+
assert parent_block.block_hash is not None
|
| 147 |
+
parent_block_hash = parent_block.block_hash.get_hash_value()
|
| 148 |
+
|
| 149 |
+
self.kv_event_queue.append(
|
| 150 |
+
BlockStored(
|
| 151 |
+
block_hashes=new_hashes,
|
| 152 |
+
parent_block_hash=parent_block_hash,
|
| 153 |
+
token_ids=request.
|
| 154 |
+
all_token_ids[num_cached_blocks *
|
| 155 |
+
block_size:num_full_blocks * block_size],
|
| 156 |
+
block_size=block_size,
|
| 157 |
+
lora_id=request.lora_request.id
|
| 158 |
+
if request.lora_request else None,
|
| 159 |
+
))
|
| 160 |
+
|
| 161 |
+
def get_new_blocks(self, num_blocks: int) -> list[KVCacheBlock]:
|
| 162 |
+
"""Get new blocks from the free block pool.
|
| 163 |
+
|
| 164 |
+
Note that we do not check block cache in this function.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
num_blocks: The number of blocks to allocate.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
A list of new block.
|
| 171 |
+
"""
|
| 172 |
+
if num_blocks > self.get_num_free_blocks():
|
| 173 |
+
raise ValueError(
|
| 174 |
+
f"Cannot get {num_blocks} free blocks from the pool")
|
| 175 |
+
|
| 176 |
+
ret: list[KVCacheBlock] = self.free_block_queue.popleft_n(num_blocks)
|
| 177 |
+
|
| 178 |
+
# In order to only iterate the list once, we duplicated code a bit
|
| 179 |
+
if self.enable_caching:
|
| 180 |
+
for block in ret:
|
| 181 |
+
self._maybe_evict_cached_block(block)
|
| 182 |
+
assert block.ref_cnt == 0
|
| 183 |
+
block.ref_cnt += 1
|
| 184 |
+
else:
|
| 185 |
+
for block in ret:
|
| 186 |
+
assert block.ref_cnt == 0
|
| 187 |
+
block.ref_cnt += 1
|
| 188 |
+
return ret
|
| 189 |
+
|
| 190 |
+
def _maybe_evict_cached_block(self, block: KVCacheBlock) -> bool:
|
| 191 |
+
"""
|
| 192 |
+
If a block is cached in `cached_block_hash_to_block`, we reset its hash
|
| 193 |
+
metadata and evict it from the cache.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
block: The block to evict.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
True if the block is evicted, False otherwise.
|
| 200 |
+
"""
|
| 201 |
+
block_hash = block.block_hash
|
| 202 |
+
if block_hash is None:
|
| 203 |
+
# The block doesn't have hash, eviction is not needed
|
| 204 |
+
return False
|
| 205 |
+
blocks_by_id = self.cached_block_hash_to_block.get(block_hash)
|
| 206 |
+
if blocks_by_id is None:
|
| 207 |
+
# block_hash not found in cached_block_hash_to_block,
|
| 208 |
+
# eviction is not needed
|
| 209 |
+
return False
|
| 210 |
+
block.reset_hash()
|
| 211 |
+
blocks_by_id.pop(block.block_id, None)
|
| 212 |
+
if len(blocks_by_id) == 0:
|
| 213 |
+
del self.cached_block_hash_to_block[block_hash]
|
| 214 |
+
|
| 215 |
+
if self.enable_kv_cache_events:
|
| 216 |
+
# FIXME (Chen): Not sure whether we should return `hash_value`
|
| 217 |
+
# or `(hash_value, group_id)` here. But it's fine now because
|
| 218 |
+
# we disable hybrid kv cache manager when kv cache event is
|
| 219 |
+
# enabled, so there is only one group.
|
| 220 |
+
self.kv_event_queue.append(
|
| 221 |
+
BlockRemoved(block_hashes=[block_hash.get_hash_value()]))
|
| 222 |
+
return True
|
| 223 |
+
|
| 224 |
+
def touch(self, blocks: tuple[list[KVCacheBlock], ...]) -> None:
|
| 225 |
+
"""Touch a block increases its reference count by 1, and may remove
|
| 226 |
+
the block from the free queue. This is used when a block is hit by
|
| 227 |
+
another request with the same prefix.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
blocks: A list of blocks to touch.
|
| 231 |
+
"""
|
| 232 |
+
for blocks_per_group in blocks:
|
| 233 |
+
for block in blocks_per_group:
|
| 234 |
+
# ref_cnt=0 means this block is in the free list (i.e. eviction
|
| 235 |
+
# candidate), so remove it.
|
| 236 |
+
if block.ref_cnt == 0 and not block.is_null:
|
| 237 |
+
self.free_block_queue.remove(block)
|
| 238 |
+
block.ref_cnt += 1
|
| 239 |
+
|
| 240 |
+
def free_blocks(self, ordered_blocks: Iterable[KVCacheBlock]) -> None:
|
| 241 |
+
"""Free a list of blocks. The blocks should be ordered by their
|
| 242 |
+
eviction priority, where the first block will be evicted first.
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
ordered_blocks: A list of blocks to free ordered by their eviction
|
| 246 |
+
priority.
|
| 247 |
+
"""
|
| 248 |
+
# Materialize the iterable to allow multiple passes.
|
| 249 |
+
blocks_list = list(ordered_blocks)
|
| 250 |
+
for block in blocks_list:
|
| 251 |
+
block.ref_cnt -= 1
|
| 252 |
+
self.free_block_queue.append_n([
|
| 253 |
+
block for block in blocks_list
|
| 254 |
+
if block.ref_cnt == 0 and not block.is_null
|
| 255 |
+
])
|
| 256 |
+
|
| 257 |
+
def reset_prefix_cache(self) -> bool:
|
| 258 |
+
"""Reset prefix cache. This function may be used in RLHF
|
| 259 |
+
flows to invalid prefix caching after the weights are updated,
|
| 260 |
+
or used for resetting prefix caching status for benchmarking.
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
bool: True if the prefix cache is successfully reset,
|
| 264 |
+
False otherwise.
|
| 265 |
+
"""
|
| 266 |
+
num_used_blocks = self.num_gpu_blocks - self.get_num_free_blocks()
|
| 267 |
+
if num_used_blocks != 1: # The null block is always marked as used
|
| 268 |
+
logger.warning(
|
| 269 |
+
"Failed to reset prefix cache because some "
|
| 270 |
+
"blocks (%d) are not freed yet", num_used_blocks - 1)
|
| 271 |
+
return False
|
| 272 |
+
|
| 273 |
+
# Remove all hashes so that no new blocks will hit.
|
| 274 |
+
self.cached_block_hash_to_block = defaultdict(dict)
|
| 275 |
+
|
| 276 |
+
# Remove all hashes from all blocks.
|
| 277 |
+
for block in self.blocks:
|
| 278 |
+
block.reset_hash()
|
| 279 |
+
|
| 280 |
+
logger.info("Successfully reset prefix cache")
|
| 281 |
+
|
| 282 |
+
if self.enable_kv_cache_events:
|
| 283 |
+
self.kv_event_queue.append(AllBlocksCleared())
|
| 284 |
+
|
| 285 |
+
return True
|
| 286 |
+
|
| 287 |
+
def get_num_free_blocks(self) -> int:
|
| 288 |
+
"""Get the number of free blocks in the pool.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
The number of free blocks.
|
| 292 |
+
"""
|
| 293 |
+
return self.free_block_queue.num_free_blocks
|
| 294 |
+
|
| 295 |
+
def get_usage(self) -> float:
|
| 296 |
+
"""Get the KV cache usage.
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
The KV cache usage (between 0.0 and 1.0).
|
| 300 |
+
"""
|
| 301 |
+
return 1.0 - (self.get_num_free_blocks() / self.num_gpu_blocks)
|
| 302 |
+
|
| 303 |
+
def take_events(self) -> list[KVCacheEvent]:
|
| 304 |
+
"""Atomically takes all events and clears the queue.
|
| 305 |
+
|
| 306 |
+
Returns:
|
| 307 |
+
A list of KV cache events.
|
| 308 |
+
"""
|
| 309 |
+
if not self.enable_kv_cache_events:
|
| 310 |
+
return []
|
| 311 |
+
events = self.kv_event_queue
|
| 312 |
+
self.kv_event_queue = []
|
| 313 |
+
return events
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/encoder_cache_manager.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
from vllm.logger import init_logger
|
| 7 |
+
from vllm.multimodal import MultiModalRegistry
|
| 8 |
+
from vllm.v1.request import Request
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from vllm.config import ModelConfig, SchedulerConfig
|
| 12 |
+
|
| 13 |
+
logger = init_logger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class EncoderCacheManager:
|
| 17 |
+
"""Manages caching of encoder outputs for multimodal models in vLLM V1.
|
| 18 |
+
|
| 19 |
+
The EncoderCacheManager handles the lifecycle of multimodal encoder outputs
|
| 20 |
+
(such as vision embeddings from images) during request processing. It
|
| 21 |
+
provides memory-aware caching to avoid recomputing encoder outputs when the
|
| 22 |
+
same multimodal inputs appear in different stages of request processing.
|
| 23 |
+
|
| 24 |
+
This manager is particularly important for:
|
| 25 |
+
- Vision-language models (e.g., LLaVA) where image encoder outputs are
|
| 26 |
+
cached
|
| 27 |
+
- Any multimodal model where encoder computation is expensive and
|
| 28 |
+
cacheable
|
| 29 |
+
|
| 30 |
+
The cache operates at the granularity of individual multimodal input items
|
| 31 |
+
within requests, allowing for fine-grained memory management and enabling
|
| 32 |
+
chunked processing of multimodal inputs.
|
| 33 |
+
|
| 34 |
+
Note that no caching is shared between requests at this time. If the same
|
| 35 |
+
input is used across multiple requests, it will be reprocessed for each
|
| 36 |
+
request.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
cache_size: Limit the size of the cache, measured by the number of
|
| 40 |
+
tokens from the input sequence.
|
| 41 |
+
|
| 42 |
+
Attributes:
|
| 43 |
+
cache_size: Total cache capacity in encoder tokens
|
| 44 |
+
num_free_slots: Current available cache capacity in encoder tokens
|
| 45 |
+
cached: Mapping from request_id to set of cached input_ids for that
|
| 46 |
+
request
|
| 47 |
+
freed: List of (request_id, input_id) pairs that were recently freed.
|
| 48 |
+
This is cleared after every call to get_freed_ids().
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, cache_size: int):
|
| 52 |
+
self.cache_size = cache_size
|
| 53 |
+
self.num_free_slots = cache_size
|
| 54 |
+
# req_id -> cached input ids
|
| 55 |
+
self.cached: dict[str, set[int]] = {}
|
| 56 |
+
# list of [req_id, input_id]
|
| 57 |
+
self.freed: list[tuple[str, int]] = []
|
| 58 |
+
|
| 59 |
+
def has_cache(self, request: Request, input_id: int) -> bool:
|
| 60 |
+
"""Check if encoder output for a specific multimodal input is cached.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
request: The request containing the multimodal input
|
| 64 |
+
input_id: Index of the multimodal input within the request
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
True if the encoder output for this input is already cached
|
| 68 |
+
"""
|
| 69 |
+
req_id = request.request_id
|
| 70 |
+
return req_id in self.cached and input_id in self.cached[req_id]
|
| 71 |
+
|
| 72 |
+
def can_allocate(self, request: Request, input_id: int) -> bool:
|
| 73 |
+
"""Check if there's sufficient cache space for a multimodal input.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
request: The request containing the multimodal input
|
| 77 |
+
input_id: Index of the multimodal input within the request
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
True if there's enough free cache space to store the encoder output
|
| 81 |
+
for this multimodal input
|
| 82 |
+
"""
|
| 83 |
+
num_tokens = request.get_num_encoder_tokens(input_id)
|
| 84 |
+
return num_tokens <= self.num_free_slots
|
| 85 |
+
|
| 86 |
+
def allocate(self, request: Request, input_id: int) -> None:
|
| 87 |
+
"""Allocate cache space for a multimodal input's encoder output.
|
| 88 |
+
|
| 89 |
+
This method reserves cache space for storing the encoder output of
|
| 90 |
+
the specified multimodal input. The actual encoder output storage
|
| 91 |
+
happens in the model runner, but this method ensures the cache
|
| 92 |
+
manager tracks the allocation.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
request: The request containing the multimodal input
|
| 96 |
+
input_id: Index of the multimodal input within the request
|
| 97 |
+
|
| 98 |
+
Note:
|
| 99 |
+
This method assumes can_allocate() returned True for the same
|
| 100 |
+
request and input_id. It will reduce available cache space.
|
| 101 |
+
"""
|
| 102 |
+
req_id = request.request_id
|
| 103 |
+
if req_id not in self.cached:
|
| 104 |
+
self.cached[req_id] = set()
|
| 105 |
+
self.cached[req_id].add(input_id)
|
| 106 |
+
self.num_free_slots -= request.get_num_encoder_tokens(input_id)
|
| 107 |
+
|
| 108 |
+
def get_cached_input_ids(self, request: Request) -> set[int]:
|
| 109 |
+
"""Get all cached multimodal input IDs for a request.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
request: The request to query
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
Set of input_ids that have cached encoder outputs for this request.
|
| 116 |
+
Returns empty set if no inputs are cached for this request.
|
| 117 |
+
"""
|
| 118 |
+
return self.cached.get(request.request_id, set())
|
| 119 |
+
|
| 120 |
+
def free_encoder_input(self, request: Request, input_id: int) -> None:
|
| 121 |
+
"""Free cache space for a single multimodal input's encoder output.
|
| 122 |
+
|
| 123 |
+
This method is called when:
|
| 124 |
+
- The encoder output has been fully consumed by the decoder and is
|
| 125 |
+
no longer needed (e.g., in vision-language models after image
|
| 126 |
+
tokens are processed)
|
| 127 |
+
- A request is being cancelled or aborted
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
request: The request containing the multimodal input
|
| 131 |
+
input_id: Index of the multimodal input to free from cache
|
| 132 |
+
"""
|
| 133 |
+
req_id = request.request_id
|
| 134 |
+
if req_id not in self.cached:
|
| 135 |
+
return
|
| 136 |
+
|
| 137 |
+
self.cached[req_id].discard(input_id)
|
| 138 |
+
if len(self.cached[req_id]) == 0:
|
| 139 |
+
del self.cached[req_id]
|
| 140 |
+
self.num_free_slots += request.get_num_encoder_tokens(input_id)
|
| 141 |
+
self.freed.append((req_id, input_id))
|
| 142 |
+
|
| 143 |
+
def free(self, request: Request) -> None:
|
| 144 |
+
"""Free all cached encoder outputs for a request.
|
| 145 |
+
|
| 146 |
+
This method is typically called when a request is finished, cancelled,
|
| 147 |
+
or aborted, and all its encoder outputs should be freed from cache.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
request: The request whose encoder outputs should be freed
|
| 151 |
+
"""
|
| 152 |
+
input_ids = self.get_cached_input_ids(request).copy()
|
| 153 |
+
for input_id in input_ids:
|
| 154 |
+
self.free_encoder_input(request, input_id)
|
| 155 |
+
|
| 156 |
+
def get_freed_ids(self) -> list[tuple[str, int]]:
|
| 157 |
+
"""Get and clear the list of recently freed encoder cache entries.
|
| 158 |
+
|
| 159 |
+
This method returns all encoder cache entries that were freed since
|
| 160 |
+
the last call to this method. It's used by the scheduler to notify
|
| 161 |
+
workers about which encoder outputs can be removed from their caches.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
List of (request_id, input_id) tuples that were freed since the
|
| 165 |
+
last call. The internal freed list is cleared after this call.
|
| 166 |
+
"""
|
| 167 |
+
freed = self.freed
|
| 168 |
+
self.freed = []
|
| 169 |
+
return freed
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def compute_encoder_budget(
|
| 173 |
+
model_config: "ModelConfig",
|
| 174 |
+
scheduler_config: "SchedulerConfig",
|
| 175 |
+
mm_registry: MultiModalRegistry,
|
| 176 |
+
) -> tuple[int, int]:
|
| 177 |
+
"""Compute the encoder cache budget based on the model and scheduler
|
| 178 |
+
configurations.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
model_config: Model configuration.
|
| 182 |
+
scheduler_config: Scheduler configuration.
|
| 183 |
+
mm_registry: Provides information about the token cost.
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
- Compute budget for encoder execution, in unit of number of tokens
|
| 187 |
+
in the input sequence.
|
| 188 |
+
- Space budget for encoder cache size, in unit of number of tokens
|
| 189 |
+
in the input sequence.
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
if not mm_registry.supports_multimodal_inputs(model_config):
|
| 193 |
+
return 0, 0
|
| 194 |
+
|
| 195 |
+
# TODO: handle encoder-decoder models once we support them.
|
| 196 |
+
(
|
| 197 |
+
encoder_compute_budget,
|
| 198 |
+
encoder_cache_size,
|
| 199 |
+
) = _compute_encoder_budget_multimodal(
|
| 200 |
+
model_config,
|
| 201 |
+
scheduler_config,
|
| 202 |
+
mm_registry,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
return encoder_compute_budget, encoder_cache_size
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def _compute_encoder_budget_multimodal(
|
| 209 |
+
model_config: "ModelConfig",
|
| 210 |
+
scheduler_config: "SchedulerConfig",
|
| 211 |
+
mm_registry: MultiModalRegistry,
|
| 212 |
+
) -> tuple[int, int]:
|
| 213 |
+
"""Compute the encoder cache budget based on the model and scheduler
|
| 214 |
+
configurations for a multimodal model.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
model_config: Model configuration.
|
| 218 |
+
scheduler_config: Scheduler configuration.
|
| 219 |
+
mm_registry: Provides information about the token cost.
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
- Compute budget for encoder execution, in unit of number of tokens
|
| 223 |
+
in the input sequence.
|
| 224 |
+
- Space budget for encoder cache size, in unit of number of tokens
|
| 225 |
+
in the input sequence.
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
max_tokens_by_modality_dict = mm_registry \
|
| 229 |
+
.get_max_tokens_per_item_by_nonzero_modality(model_config)
|
| 230 |
+
|
| 231 |
+
if not max_tokens_by_modality_dict:
|
| 232 |
+
logger.warning(
|
| 233 |
+
"All non-text modalities supported by the model have been "
|
| 234 |
+
"explicitly disabled via limit_mm_per_prompt. Encoder cache will "
|
| 235 |
+
"not be initialized.")
|
| 236 |
+
return 0, 0
|
| 237 |
+
|
| 238 |
+
_, max_tokens_per_mm_item = max(max_tokens_by_modality_dict.items(),
|
| 239 |
+
key=lambda item: item[1])
|
| 240 |
+
|
| 241 |
+
if (scheduler_config.disable_chunked_mm_input and max_tokens_per_mm_item
|
| 242 |
+
> scheduler_config.max_num_batched_tokens):
|
| 243 |
+
raise ValueError(
|
| 244 |
+
"Chunked MM input disabled but max_tokens_per_mm_item "
|
| 245 |
+
f"({max_tokens_per_mm_item}) is larger than max_num_batched_tokens"
|
| 246 |
+
f" ({scheduler_config.max_num_batched_tokens}). Please increase "
|
| 247 |
+
"max_num_batched_tokens.")
|
| 248 |
+
|
| 249 |
+
encoder_compute_budget = max(scheduler_config.max_num_encoder_input_tokens,
|
| 250 |
+
max_tokens_per_mm_item)
|
| 251 |
+
encoder_cache_size = max(scheduler_config.encoder_cache_size,
|
| 252 |
+
max_tokens_per_mm_item)
|
| 253 |
+
|
| 254 |
+
return encoder_compute_budget, encoder_cache_size
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_coordinator.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
from vllm.v1.core.block_pool import BlockPool
|
| 7 |
+
from vllm.v1.core.kv_cache_utils import BlockHash, KVCacheBlock
|
| 8 |
+
from vllm.v1.core.single_type_kv_cache_manager import (
|
| 9 |
+
FullAttentionManager, get_manager_for_kv_cache_spec)
|
| 10 |
+
from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheConfig,
|
| 11 |
+
KVCacheSpec)
|
| 12 |
+
from vllm.v1.request import Request
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class KVCacheCoordinator(ABC):
|
| 16 |
+
"""
|
| 17 |
+
Coordinate the KV cache of different KV cache groups.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
kv_cache_config: KVCacheConfig,
|
| 23 |
+
max_model_len: int,
|
| 24 |
+
use_eagle: bool,
|
| 25 |
+
enable_caching: bool,
|
| 26 |
+
enable_kv_cache_events: bool,
|
| 27 |
+
):
|
| 28 |
+
self.kv_cache_config = kv_cache_config
|
| 29 |
+
self.max_model_len = max_model_len
|
| 30 |
+
self.enable_caching = enable_caching
|
| 31 |
+
|
| 32 |
+
self.block_pool = BlockPool(kv_cache_config.num_blocks, enable_caching,
|
| 33 |
+
enable_kv_cache_events)
|
| 34 |
+
|
| 35 |
+
# Needs special handling for find_longest_cache_hit if eagle is enabled
|
| 36 |
+
self.use_eagle = use_eagle
|
| 37 |
+
self.single_type_managers = tuple(
|
| 38 |
+
get_manager_for_kv_cache_spec(
|
| 39 |
+
kv_cache_spec=kv_cache_group.kv_cache_spec,
|
| 40 |
+
block_pool=self.block_pool,
|
| 41 |
+
kv_cache_group_id=i,
|
| 42 |
+
) for i, kv_cache_group in enumerate(
|
| 43 |
+
self.kv_cache_config.kv_cache_groups))
|
| 44 |
+
|
| 45 |
+
def get_num_blocks_to_allocate(
|
| 46 |
+
self, request_id: str, num_tokens: int,
|
| 47 |
+
new_computed_blocks: tuple[list[KVCacheBlock], ...]) -> int:
|
| 48 |
+
"""
|
| 49 |
+
Get the number of blocks needed to be allocated for the request.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
request_id: The request ID.
|
| 53 |
+
num_tokens: The total number of tokens that need a slot (including
|
| 54 |
+
tokens that are already allocated).
|
| 55 |
+
new_computed_blocks: The new computed blocks just hitting the
|
| 56 |
+
prefix caching.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
The number of blocks.
|
| 60 |
+
"""
|
| 61 |
+
num_blocks_to_allocate = 0
|
| 62 |
+
for i, manager in enumerate(self.single_type_managers):
|
| 63 |
+
num_blocks_to_allocate += manager.get_num_blocks_to_allocate(
|
| 64 |
+
request_id, num_tokens, new_computed_blocks[i])
|
| 65 |
+
return num_blocks_to_allocate
|
| 66 |
+
|
| 67 |
+
def save_new_computed_blocks(
|
| 68 |
+
self, request_id: str,
|
| 69 |
+
new_computed_blocks: tuple[list[KVCacheBlock], ...]) -> None:
|
| 70 |
+
"""
|
| 71 |
+
Add the new computed blocks to the request.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
request_id: The request ID.
|
| 75 |
+
new_computed_blocks: The new computed blocks just hitting the
|
| 76 |
+
prefix cache.
|
| 77 |
+
"""
|
| 78 |
+
for i, manager in enumerate(self.single_type_managers):
|
| 79 |
+
manager.save_new_computed_blocks(request_id,
|
| 80 |
+
new_computed_blocks[i])
|
| 81 |
+
|
| 82 |
+
def allocate_new_blocks(self, request_id: str,
|
| 83 |
+
num_tokens: int) -> tuple[list[KVCacheBlock], ...]:
|
| 84 |
+
"""
|
| 85 |
+
Allocate new blocks for the request to give it at least `num_tokens`
|
| 86 |
+
token slots.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
request_id: The request ID.
|
| 90 |
+
num_tokens: The total number of tokens that need a slot (including
|
| 91 |
+
tokens that are already allocated).
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
The new allocated blocks.
|
| 95 |
+
"""
|
| 96 |
+
return tuple(
|
| 97 |
+
manager.allocate_new_blocks(request_id, num_tokens)
|
| 98 |
+
for manager in self.single_type_managers)
|
| 99 |
+
|
| 100 |
+
def cache_blocks(self, request: Request, num_computed_tokens: int) -> None:
|
| 101 |
+
"""
|
| 102 |
+
Cache the blocks for the request.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
request: The request.
|
| 106 |
+
num_tokens: The total number of tokens that need to be cached
|
| 107 |
+
(including tokens that are already cached).
|
| 108 |
+
"""
|
| 109 |
+
for manager in self.single_type_managers:
|
| 110 |
+
manager.cache_blocks(request, num_computed_tokens)
|
| 111 |
+
|
| 112 |
+
def free(self, request_id: str) -> None:
|
| 113 |
+
"""
|
| 114 |
+
Free the blocks for the request.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
request_id: The request ID.
|
| 118 |
+
"""
|
| 119 |
+
for manager in self.single_type_managers:
|
| 120 |
+
manager.free(request_id)
|
| 121 |
+
|
| 122 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 123 |
+
num_running_requests: int) -> list[int]:
|
| 124 |
+
"""
|
| 125 |
+
Get the number of common prefix blocks for all requests in the RUNNING
|
| 126 |
+
state for each kv cache group.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
request_id: The request ID.
|
| 130 |
+
num_running_requests: The total number of requests in the RUNNING
|
| 131 |
+
state.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
list[int]: The number of common prefix blocks for all requests in
|
| 135 |
+
the RUNNING state for each kv cache group.
|
| 136 |
+
"""
|
| 137 |
+
num_blocks_per_group = [
|
| 138 |
+
manager.get_num_common_prefix_blocks(request_id,
|
| 139 |
+
num_running_requests)
|
| 140 |
+
for manager in self.single_type_managers
|
| 141 |
+
]
|
| 142 |
+
return num_blocks_per_group
|
| 143 |
+
|
| 144 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 145 |
+
num_computed_tokens: int) -> None:
|
| 146 |
+
"""
|
| 147 |
+
Remove the blocks that are no longer needed from `blocks` and replace
|
| 148 |
+
the removed blocks with null_block.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
request_id: The request ID.
|
| 152 |
+
num_computed_tokens: The number of tokens that have been computed.
|
| 153 |
+
"""
|
| 154 |
+
for manager in self.single_type_managers:
|
| 155 |
+
manager.remove_skipped_blocks(request_id, num_computed_tokens)
|
| 156 |
+
|
| 157 |
+
def get_blocks(self, request_id: str) -> tuple[list[KVCacheBlock], ...]:
|
| 158 |
+
"""
|
| 159 |
+
Get the blocks for the request.
|
| 160 |
+
"""
|
| 161 |
+
return tuple(
|
| 162 |
+
manager.req_to_blocks.get(request_id) or []
|
| 163 |
+
for manager in self.single_type_managers)
|
| 164 |
+
|
| 165 |
+
@abstractmethod
|
| 166 |
+
def find_longest_cache_hit(
|
| 167 |
+
self,
|
| 168 |
+
block_hashes: list[BlockHash],
|
| 169 |
+
max_cache_hit_length: int,
|
| 170 |
+
) -> tuple[tuple[list[KVCacheBlock], ...], int]:
|
| 171 |
+
pass
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class KVCacheCoordinatorNoPrefixCache(KVCacheCoordinator):
|
| 175 |
+
"""
|
| 176 |
+
KV cache coordinator to use if prefix caching is disabled or unsupported.
|
| 177 |
+
In contrast to UnitaryKVCacheCoordinator and HybridKVCacheCoordinator,
|
| 178 |
+
supports arbitrary numbers of KV cache groups (including 0 groups).
|
| 179 |
+
Does not implement any features related to prefix caching.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def __init__(self, kv_cache_config: KVCacheConfig, max_model_len: int,
|
| 183 |
+
use_eagle: bool, enable_kv_cache_events: bool):
|
| 184 |
+
super().__init__(kv_cache_config, max_model_len, use_eagle, False,
|
| 185 |
+
enable_kv_cache_events)
|
| 186 |
+
self.num_single_type_manager = len(self.single_type_managers)
|
| 187 |
+
|
| 188 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 189 |
+
num_running_requests: int) -> list[int]:
|
| 190 |
+
return [0] * self.num_single_type_manager
|
| 191 |
+
|
| 192 |
+
def find_longest_cache_hit(
|
| 193 |
+
self,
|
| 194 |
+
block_hashes: list[BlockHash],
|
| 195 |
+
max_cache_hit_length: int,
|
| 196 |
+
) -> tuple[tuple[list[KVCacheBlock], ...], int]:
|
| 197 |
+
blocks: tuple[list[KVCacheBlock], ...] = tuple(
|
| 198 |
+
[] for _ in range(self.num_single_type_manager))
|
| 199 |
+
return blocks, 0
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class UnitaryKVCacheCoordinator(KVCacheCoordinator):
|
| 203 |
+
"""
|
| 204 |
+
KV cache coordinator for models with only one KV cache group. This is the
|
| 205 |
+
case for models with only one KV cache type, e.g., all attention layers use
|
| 206 |
+
full attention or all attention layers use sliding window attention.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
def __init__(self, kv_cache_config: KVCacheConfig, max_model_len: int,
|
| 210 |
+
use_eagle: bool, enable_caching: bool,
|
| 211 |
+
enable_kv_cache_events: bool):
|
| 212 |
+
super().__init__(kv_cache_config, max_model_len, use_eagle,
|
| 213 |
+
enable_caching, enable_kv_cache_events)
|
| 214 |
+
self.kv_cache_spec = self.kv_cache_config.kv_cache_groups[
|
| 215 |
+
0].kv_cache_spec
|
| 216 |
+
self.block_size = self.kv_cache_spec.block_size
|
| 217 |
+
assert len(self.kv_cache_config.kv_cache_groups) == 1, (
|
| 218 |
+
"UnitaryKVCacheCoordinator assumes only one kv cache group")
|
| 219 |
+
|
| 220 |
+
def find_longest_cache_hit(
|
| 221 |
+
self,
|
| 222 |
+
block_hashes: list[BlockHash],
|
| 223 |
+
max_cache_hit_length: int,
|
| 224 |
+
) -> tuple[tuple[list[KVCacheBlock], ...], int]:
|
| 225 |
+
hit_blocks = self.single_type_managers[0].find_longest_cache_hit(
|
| 226 |
+
block_hashes=block_hashes,
|
| 227 |
+
max_length=max_cache_hit_length,
|
| 228 |
+
kv_cache_group_ids=[0],
|
| 229 |
+
block_pool=self.block_pool,
|
| 230 |
+
kv_cache_spec=self.kv_cache_spec,
|
| 231 |
+
use_eagle=self.use_eagle,
|
| 232 |
+
)
|
| 233 |
+
return hit_blocks, len(hit_blocks[0]) * self.block_size
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
class HybridKVCacheCoordinator(KVCacheCoordinator):
|
| 237 |
+
"""
|
| 238 |
+
KV cache coordinator for hybrid models with multiple KV cache types, and
|
| 239 |
+
thus multiple kv cache groups.
|
| 240 |
+
To simplify `find_longest_cache_hit`, it only supports the combination of
|
| 241 |
+
two types of KV cache groups, and one of them must be full attention.
|
| 242 |
+
May extend to more general cases in the future.
|
| 243 |
+
"""
|
| 244 |
+
|
| 245 |
+
def __init__(self, kv_cache_config: KVCacheConfig, max_model_len: int,
|
| 246 |
+
use_eagle: bool, enable_caching: bool,
|
| 247 |
+
enable_kv_cache_events: bool):
|
| 248 |
+
super().__init__(kv_cache_config, max_model_len, use_eagle,
|
| 249 |
+
enable_caching, enable_kv_cache_events)
|
| 250 |
+
self.verify_and_split_kv_cache_groups()
|
| 251 |
+
|
| 252 |
+
def verify_and_split_kv_cache_groups(self) -> None:
|
| 253 |
+
"""
|
| 254 |
+
Verifies that the model has exactly two types of KV cache groups, and
|
| 255 |
+
one of them is full attention. Then, split the kv cache groups into full
|
| 256 |
+
attention groups and other groups.
|
| 257 |
+
"""
|
| 258 |
+
full_attention_spec: Optional[FullAttentionSpec] = None
|
| 259 |
+
other_spec: Optional[KVCacheSpec] = None
|
| 260 |
+
self.full_attention_group_ids: list[int] = []
|
| 261 |
+
self.other_group_ids: list[int] = []
|
| 262 |
+
for i, g in enumerate(self.kv_cache_config.kv_cache_groups):
|
| 263 |
+
if isinstance(g.kv_cache_spec, FullAttentionSpec):
|
| 264 |
+
if full_attention_spec is None:
|
| 265 |
+
full_attention_spec = g.kv_cache_spec
|
| 266 |
+
else:
|
| 267 |
+
assert full_attention_spec == g.kv_cache_spec, (
|
| 268 |
+
"HybridKVCacheCoordinator assumes exactly one type of "
|
| 269 |
+
"full attention groups now.")
|
| 270 |
+
self.full_attention_group_ids.append(i)
|
| 271 |
+
else:
|
| 272 |
+
if other_spec is None:
|
| 273 |
+
other_spec = g.kv_cache_spec
|
| 274 |
+
else:
|
| 275 |
+
assert other_spec == g.kv_cache_spec, (
|
| 276 |
+
"HybridKVCacheCoordinator assumes "
|
| 277 |
+
"exactly one other type of groups now.")
|
| 278 |
+
self.other_group_ids.append(i)
|
| 279 |
+
|
| 280 |
+
assert full_attention_spec is not None, (
|
| 281 |
+
"HybridKVCacheCoordinator assumes exactly one type of full "
|
| 282 |
+
"attention groups now.")
|
| 283 |
+
assert other_spec is not None, (
|
| 284 |
+
"HybridKVCacheCoordinator assumes exactly one type of other "
|
| 285 |
+
"groups now.")
|
| 286 |
+
|
| 287 |
+
self.full_attention_manager_cls = FullAttentionManager
|
| 288 |
+
self.other_attention_cls = self.single_type_managers[
|
| 289 |
+
self.other_group_ids[0]].__class__
|
| 290 |
+
self.full_attention_spec = full_attention_spec
|
| 291 |
+
self.other_spec = other_spec
|
| 292 |
+
self.full_attention_block_size = self.full_attention_spec.block_size
|
| 293 |
+
self.other_block_size = self.other_spec.block_size
|
| 294 |
+
|
| 295 |
+
if self.enable_caching:
|
| 296 |
+
# this requirement is only needed for the prefix caching logic
|
| 297 |
+
divisible = self.other_block_size % self.full_attention_block_size
|
| 298 |
+
assert divisible == 0, (
|
| 299 |
+
"KVCacheCoordinator assumes the block_size of full "
|
| 300 |
+
"attention layers is divisible by other layers now.")
|
| 301 |
+
|
| 302 |
+
if max(self.full_attention_group_ids) < min(self.other_group_ids):
|
| 303 |
+
self.full_attn_first = True
|
| 304 |
+
elif max(self.other_group_ids) < min(self.full_attention_group_ids):
|
| 305 |
+
self.full_attn_first = False
|
| 306 |
+
else:
|
| 307 |
+
raise ValueError(
|
| 308 |
+
"HybridKVCacheCoordinator assumes the full "
|
| 309 |
+
"attention group ids and other attention group ids "
|
| 310 |
+
"do not interleave, either full attention group ids "
|
| 311 |
+
"are before other attention group ids or vice versa."
|
| 312 |
+
"This is for simplifying merging hit_blocks_full_attn and "
|
| 313 |
+
"hit_blocks_other_attn to hit_blocks.")
|
| 314 |
+
|
| 315 |
+
def find_longest_cache_hit(
|
| 316 |
+
self,
|
| 317 |
+
block_hashes: list[BlockHash],
|
| 318 |
+
max_cache_hit_length: int,
|
| 319 |
+
) -> tuple[tuple[list[KVCacheBlock], ...], int]:
|
| 320 |
+
"""
|
| 321 |
+
Find the longest cache hit for the request.
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
block_hashes: The block hashes of the request.
|
| 325 |
+
max_cache_hit_length: The maximum length of the cache hit.
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
A tuple containing:
|
| 329 |
+
- A list of the cache hit blocks for each single type manager.
|
| 330 |
+
- The number of tokens of the longest cache hit.
|
| 331 |
+
"""
|
| 332 |
+
# First, find the longest cache hit for full attention.
|
| 333 |
+
hit_blocks_full_attn = (
|
| 334 |
+
self.full_attention_manager_cls.find_longest_cache_hit(
|
| 335 |
+
block_hashes=block_hashes,
|
| 336 |
+
max_length=max_cache_hit_length,
|
| 337 |
+
kv_cache_group_ids=self.full_attention_group_ids,
|
| 338 |
+
block_pool=self.block_pool,
|
| 339 |
+
kv_cache_spec=self.full_attention_spec,
|
| 340 |
+
use_eagle=self.use_eagle,
|
| 341 |
+
))
|
| 342 |
+
hit_length = len(
|
| 343 |
+
hit_blocks_full_attn[0]) * self.full_attention_block_size
|
| 344 |
+
|
| 345 |
+
# Next, find the cache hit for the other attention WITHIN
|
| 346 |
+
# the cache hit of full attention.
|
| 347 |
+
hit_blocks_other_attn = (
|
| 348 |
+
self.other_attention_cls.find_longest_cache_hit(
|
| 349 |
+
block_hashes=block_hashes,
|
| 350 |
+
max_length=hit_length,
|
| 351 |
+
kv_cache_group_ids=self.other_group_ids,
|
| 352 |
+
block_pool=self.block_pool,
|
| 353 |
+
kv_cache_spec=self.other_spec,
|
| 354 |
+
use_eagle=self.use_eagle,
|
| 355 |
+
))
|
| 356 |
+
hit_length = len(hit_blocks_other_attn[0]) * self.other_block_size
|
| 357 |
+
|
| 358 |
+
# NOTE: the prefix cache hit length must be a multiple of block_size as
|
| 359 |
+
# we don't support partial block cache hit yet. The cache hit length
|
| 360 |
+
# of other attention is ensured to be a multiple of the block size of
|
| 361 |
+
# full attention layers in current implementation, because hit_length is
|
| 362 |
+
# a multiple of other attention's block size, and other attention's
|
| 363 |
+
# block size is a multiple of full attention's block size (verified in
|
| 364 |
+
# `verify_and_split_kv_cache_groups`).
|
| 365 |
+
assert hit_length % self.full_attention_block_size == 0
|
| 366 |
+
|
| 367 |
+
# Truncate the full attention cache hit to the length of the
|
| 368 |
+
# cache hit of the other attention.
|
| 369 |
+
for group_hit_blocks in hit_blocks_full_attn:
|
| 370 |
+
del group_hit_blocks[hit_length // self.full_attention_block_size:]
|
| 371 |
+
|
| 372 |
+
# Merge the hit blocks of full attention and other attention.
|
| 373 |
+
if self.full_attn_first:
|
| 374 |
+
hit_blocks = hit_blocks_full_attn + hit_blocks_other_attn
|
| 375 |
+
else:
|
| 376 |
+
hit_blocks = hit_blocks_other_attn + hit_blocks_full_attn
|
| 377 |
+
return hit_blocks, hit_length
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def get_kv_cache_coordinator(
|
| 381 |
+
kv_cache_config: KVCacheConfig, max_model_len: int, use_eagle: bool,
|
| 382 |
+
enable_caching: bool,
|
| 383 |
+
enable_kv_cache_events: bool) -> KVCacheCoordinator:
|
| 384 |
+
if not enable_caching:
|
| 385 |
+
return KVCacheCoordinatorNoPrefixCache(kv_cache_config, max_model_len,
|
| 386 |
+
use_eagle,
|
| 387 |
+
enable_kv_cache_events)
|
| 388 |
+
if len(kv_cache_config.kv_cache_groups) == 1:
|
| 389 |
+
return UnitaryKVCacheCoordinator(kv_cache_config, max_model_len,
|
| 390 |
+
use_eagle, enable_caching,
|
| 391 |
+
enable_kv_cache_events)
|
| 392 |
+
return HybridKVCacheCoordinator(kv_cache_config, max_model_len, use_eagle,
|
| 393 |
+
enable_caching, enable_kv_cache_events)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_manager.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from vllm.distributed.kv_events import KVCacheEvent
|
| 8 |
+
from vllm.logger import init_logger
|
| 9 |
+
from vllm.v1.core.kv_cache_coordinator import get_kv_cache_coordinator
|
| 10 |
+
from vllm.v1.core.kv_cache_utils import KVCacheBlock
|
| 11 |
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
| 12 |
+
from vllm.v1.metrics.stats import PrefixCacheStats
|
| 13 |
+
from vllm.v1.request import Request, RequestStatus
|
| 14 |
+
|
| 15 |
+
logger = init_logger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass
|
| 19 |
+
class KVCacheBlocks:
|
| 20 |
+
"""
|
| 21 |
+
The allocation result of KVCacheManager, work as the interface between
|
| 22 |
+
Scheduler and KVCacheManager, to hide KVCacheManager's internal data
|
| 23 |
+
structure from the Scheduler.
|
| 24 |
+
"""
|
| 25 |
+
blocks: tuple[list[KVCacheBlock], ...]
|
| 26 |
+
"""
|
| 27 |
+
blocks[i][j] refers to the i-th kv_cache_group and the j-th block of tokens.
|
| 28 |
+
We don't use block of tokens as the outer dimension because it assumes all
|
| 29 |
+
kv_cache_groups have the same number of blocks, which is true for now but
|
| 30 |
+
will be broken if we want to give different block_size to different
|
| 31 |
+
kv_cache_groups in the future.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __add__(self, other: "KVCacheBlocks") -> "KVCacheBlocks":
|
| 35 |
+
"""Adds two KVCacheBlocks instances."""
|
| 36 |
+
return KVCacheBlocks(
|
| 37 |
+
tuple(blk1 + blk2
|
| 38 |
+
for blk1, blk2 in zip(self.blocks, other.blocks)))
|
| 39 |
+
|
| 40 |
+
def get_block_ids(self) -> tuple[list[int], ...]:
|
| 41 |
+
"""
|
| 42 |
+
Converts the KVCacheBlocks instance to block_ids.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
tuple[list[int], ...]: A tuple of lists where
|
| 46 |
+
* the outer tuple corresponds to KV cache groups
|
| 47 |
+
* each inner list contains the block_ids of the blocks in that group
|
| 48 |
+
"""
|
| 49 |
+
return tuple([blk.block_id for blk in group] for group in self.blocks)
|
| 50 |
+
|
| 51 |
+
def get_unhashed_block_ids(self) -> list[int]:
|
| 52 |
+
"""Get block_ids of unhashed blocks from KVCacheBlocks instance."""
|
| 53 |
+
assert len(self.blocks) == 1, "Only one group is supported"
|
| 54 |
+
return [
|
| 55 |
+
block.block_id for block in self.blocks[0]
|
| 56 |
+
if block.block_hash is None
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
def new_empty(self) -> "KVCacheBlocks":
|
| 60 |
+
"""Creates a new KVCacheBlocks instance with no blocks."""
|
| 61 |
+
return KVCacheBlocks(tuple([] for _ in range(len(self.blocks))))
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class KVCacheManager:
|
| 65 |
+
|
| 66 |
+
def __init__(
|
| 67 |
+
self,
|
| 68 |
+
kv_cache_config: KVCacheConfig,
|
| 69 |
+
max_model_len: int,
|
| 70 |
+
enable_caching: bool = True,
|
| 71 |
+
use_eagle: bool = False,
|
| 72 |
+
log_stats: bool = False,
|
| 73 |
+
enable_kv_cache_events: bool = False,
|
| 74 |
+
) -> None:
|
| 75 |
+
self.max_model_len = max_model_len
|
| 76 |
+
|
| 77 |
+
self.enable_caching = enable_caching
|
| 78 |
+
self.use_eagle = use_eagle
|
| 79 |
+
self.log_stats = log_stats
|
| 80 |
+
# FIXME: make prefix cache stats conditional on log_stats
|
| 81 |
+
self.prefix_cache_stats = PrefixCacheStats() if log_stats else None
|
| 82 |
+
|
| 83 |
+
self.block_size: Optional[int] = None
|
| 84 |
+
if self.enable_caching:
|
| 85 |
+
assert len(
|
| 86 |
+
set(g.kv_cache_spec.block_size
|
| 87 |
+
for g in kv_cache_config.kv_cache_groups)
|
| 88 |
+
) == 1, "Only one block size is supported for now"
|
| 89 |
+
self.block_size = kv_cache_config.kv_cache_groups[
|
| 90 |
+
0].kv_cache_spec.block_size
|
| 91 |
+
|
| 92 |
+
self.coordinator = get_kv_cache_coordinator(
|
| 93 |
+
kv_cache_config=kv_cache_config,
|
| 94 |
+
max_model_len=self.max_model_len,
|
| 95 |
+
use_eagle=self.use_eagle,
|
| 96 |
+
enable_caching=self.enable_caching,
|
| 97 |
+
enable_kv_cache_events=enable_kv_cache_events,
|
| 98 |
+
)
|
| 99 |
+
self.num_kv_cache_groups = len(kv_cache_config.kv_cache_groups)
|
| 100 |
+
self.block_pool = self.coordinator.block_pool
|
| 101 |
+
self.kv_cache_config = kv_cache_config
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def usage(self) -> float:
|
| 105 |
+
"""Get the KV cache usage.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
The KV cache usage (between 0.0 and 1.0).
|
| 109 |
+
"""
|
| 110 |
+
return self.block_pool.get_usage()
|
| 111 |
+
|
| 112 |
+
def make_prefix_cache_stats(self) -> Optional[PrefixCacheStats]:
|
| 113 |
+
"""Get (and reset) the prefix cache stats.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
The current prefix caching stats, or None if logging is disabled.
|
| 117 |
+
"""
|
| 118 |
+
if not self.log_stats:
|
| 119 |
+
return None
|
| 120 |
+
stats = self.prefix_cache_stats
|
| 121 |
+
self.prefix_cache_stats = PrefixCacheStats()
|
| 122 |
+
return stats
|
| 123 |
+
|
| 124 |
+
def get_computed_blocks(self,
|
| 125 |
+
request: Request) -> tuple[KVCacheBlocks, int]:
|
| 126 |
+
"""Get the computed (cached) blocks for the request.
|
| 127 |
+
Note that the computed blocks must be full.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
request: The request to get the computed blocks.
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
A tuple containing:
|
| 134 |
+
- A list of blocks that are computed for the request.
|
| 135 |
+
- The number of computed tokens.
|
| 136 |
+
"""
|
| 137 |
+
# Prefix caching is disabled or
|
| 138 |
+
# When the request requires prompt logprobs, we skip prefix caching.
|
| 139 |
+
if (not self.enable_caching
|
| 140 |
+
or (request.sampling_params is not None
|
| 141 |
+
and request.sampling_params.prompt_logprobs is not None)):
|
| 142 |
+
return self.create_empty_block_list(), 0
|
| 143 |
+
|
| 144 |
+
# NOTE: When all tokens hit the cache, we must recompute the last token
|
| 145 |
+
# to obtain logits. Thus, set max_cache_hit_length to prompt_length - 1.
|
| 146 |
+
# This can trigger recomputation of an entire block, rather than just
|
| 147 |
+
# the single last token, because allocate_slots() requires
|
| 148 |
+
# num_computed_tokens to be block-size aligned. Removing this limitation
|
| 149 |
+
# could slightly improve performance in the future.
|
| 150 |
+
max_cache_hit_length = request.num_tokens - 1
|
| 151 |
+
computed_blocks, num_new_computed_tokens = (
|
| 152 |
+
self.coordinator.find_longest_cache_hit(request.block_hashes,
|
| 153 |
+
max_cache_hit_length))
|
| 154 |
+
|
| 155 |
+
if self.log_stats:
|
| 156 |
+
assert self.prefix_cache_stats is not None
|
| 157 |
+
self.prefix_cache_stats.requests += 1
|
| 158 |
+
self.prefix_cache_stats.queries += request.num_tokens
|
| 159 |
+
self.prefix_cache_stats.hits += num_new_computed_tokens
|
| 160 |
+
|
| 161 |
+
return KVCacheBlocks(computed_blocks), num_new_computed_tokens
|
| 162 |
+
|
| 163 |
+
def allocate_slots(
|
| 164 |
+
self,
|
| 165 |
+
request: Request,
|
| 166 |
+
num_new_tokens: int,
|
| 167 |
+
num_new_computed_tokens: int = 0,
|
| 168 |
+
new_computed_blocks: Optional[KVCacheBlocks] = None,
|
| 169 |
+
num_lookahead_tokens: int = 0,
|
| 170 |
+
delay_cache_blocks: bool = False,
|
| 171 |
+
) -> Optional[KVCacheBlocks]:
|
| 172 |
+
"""Add slots for a request with new tokens to append.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
request: The request to allocate slots.
|
| 176 |
+
num_new_tokens: The number of tokens to allocate, including external
|
| 177 |
+
tokens. Note that this does not include tokens that have
|
| 178 |
+
already been computed locally (i.e. new_computed_blocks).
|
| 179 |
+
num_new_computed_tokens: The number of new computed tokens just
|
| 180 |
+
hitting the prefix caching, excluding external tokens.
|
| 181 |
+
new_computed_blocks: The cached blocks for the above new computed
|
| 182 |
+
tokens.
|
| 183 |
+
num_lookahead_tokens: The number of speculative tokens to allocate.
|
| 184 |
+
This is used by spec decode proposers with kv-cache such
|
| 185 |
+
as eagle.
|
| 186 |
+
delay_cache_blocks: Whether to skip caching the blocks. This is
|
| 187 |
+
used by P/D when allocating blocks used in a KV transfer
|
| 188 |
+
which will complete in a future step.
|
| 189 |
+
|
| 190 |
+
Blocks layout:
|
| 191 |
+
```
|
| 192 |
+
-----------------------------------------------------------------------
|
| 193 |
+
| < computed > | < new computed > | < new > | < pre-allocated > |
|
| 194 |
+
-----------------------------------------------------------------------
|
| 195 |
+
| < required > |
|
| 196 |
+
--------------------------------------------------
|
| 197 |
+
| < full > |
|
| 198 |
+
------------------------------------------------
|
| 199 |
+
| <new full> |
|
| 200 |
+
--------------
|
| 201 |
+
```
|
| 202 |
+
The following *_blocks are illustrated in this layout.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
A list of new allocated blocks.
|
| 206 |
+
"""
|
| 207 |
+
if num_new_tokens == 0:
|
| 208 |
+
raise ValueError("num_new_tokens must be greater than 0")
|
| 209 |
+
|
| 210 |
+
if new_computed_blocks is not None:
|
| 211 |
+
new_computed_block_list = new_computed_blocks.blocks
|
| 212 |
+
else:
|
| 213 |
+
new_computed_block_list = tuple(
|
| 214 |
+
[] for _ in range(len(self.kv_cache_config.kv_cache_groups)))
|
| 215 |
+
|
| 216 |
+
# Free the blocks that are skipped during the attention computation
|
| 217 |
+
# (e.g., tokens outside the sliding window).
|
| 218 |
+
# We can do this even if we cannot schedule this request due to
|
| 219 |
+
# insufficient free blocks.
|
| 220 |
+
# Should call this function before allocating new blocks to reduce
|
| 221 |
+
# the number of evicted blocks.
|
| 222 |
+
self.coordinator.remove_skipped_blocks(request.request_id,
|
| 223 |
+
request.num_computed_tokens)
|
| 224 |
+
|
| 225 |
+
# The number of computed tokens is the number of computed tokens plus
|
| 226 |
+
# the new prefix caching hits
|
| 227 |
+
num_computed_tokens = (request.num_computed_tokens +
|
| 228 |
+
num_new_computed_tokens)
|
| 229 |
+
num_tokens_need_slot = min(
|
| 230 |
+
num_computed_tokens + num_new_tokens + num_lookahead_tokens,
|
| 231 |
+
self.max_model_len)
|
| 232 |
+
|
| 233 |
+
num_blocks_to_allocate = self.coordinator.get_num_blocks_to_allocate(
|
| 234 |
+
request_id=request.request_id,
|
| 235 |
+
num_tokens=num_tokens_need_slot,
|
| 236 |
+
new_computed_blocks=new_computed_block_list,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
if num_blocks_to_allocate > self.block_pool.get_num_free_blocks():
|
| 240 |
+
# Cannot allocate new blocks
|
| 241 |
+
return None
|
| 242 |
+
|
| 243 |
+
# Touch the computed blocks to make sure they won't be evicted.
|
| 244 |
+
if self.enable_caching:
|
| 245 |
+
self.block_pool.touch(new_computed_block_list)
|
| 246 |
+
else:
|
| 247 |
+
assert not any(new_computed_block_list), (
|
| 248 |
+
"Computed blocks should be empty when "
|
| 249 |
+
"prefix caching is disabled")
|
| 250 |
+
|
| 251 |
+
# Append the new computed blocks to the request blocks until now to
|
| 252 |
+
# avoid the case where the new blocks cannot be allocated.
|
| 253 |
+
self.coordinator.save_new_computed_blocks(request.request_id,
|
| 254 |
+
new_computed_block_list)
|
| 255 |
+
|
| 256 |
+
new_blocks = self.coordinator.allocate_new_blocks(
|
| 257 |
+
request.request_id, num_tokens_need_slot)
|
| 258 |
+
|
| 259 |
+
# P/D: delay caching blocks if we have to recv from
|
| 260 |
+
# remote. Update state for locally cached blocks.
|
| 261 |
+
if not self.enable_caching or delay_cache_blocks:
|
| 262 |
+
return KVCacheBlocks(new_blocks)
|
| 263 |
+
|
| 264 |
+
# NOTE(woosuk): We want to commit (cache) up to num_computed_tokens +
|
| 265 |
+
# num_new_tokens, but must exclude "non-committable" tokens (e.g.,
|
| 266 |
+
# draft tokens that could be rejected). Therefore, we cap the number
|
| 267 |
+
# at `request.num_tokens`, ensuring only "finalized" tokens are cached.
|
| 268 |
+
num_tokens_to_cache = min(num_computed_tokens + num_new_tokens,
|
| 269 |
+
request.num_tokens)
|
| 270 |
+
self.coordinator.cache_blocks(request, num_tokens_to_cache)
|
| 271 |
+
|
| 272 |
+
return KVCacheBlocks(new_blocks)
|
| 273 |
+
|
| 274 |
+
def free(self, request: Request) -> None:
|
| 275 |
+
"""Free the blocks allocated for the request.
|
| 276 |
+
We free the blocks in reverse order so that he tail blocks are evicted
|
| 277 |
+
first when caching is enabled.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
request: The request to free the blocks.
|
| 281 |
+
"""
|
| 282 |
+
self.coordinator.free(request.request_id)
|
| 283 |
+
|
| 284 |
+
def reset_prefix_cache(self) -> bool:
|
| 285 |
+
"""Reset prefix cache. This function may be used in RLHF
|
| 286 |
+
flows to invalidate prefix caching after the weights are updated,
|
| 287 |
+
or used for resetting prefix caching status for benchmarking.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
bool: True if the prefix cache is successfully reset,
|
| 291 |
+
False otherwise.
|
| 292 |
+
"""
|
| 293 |
+
if not self.block_pool.reset_prefix_cache():
|
| 294 |
+
return False
|
| 295 |
+
if self.log_stats:
|
| 296 |
+
assert self.prefix_cache_stats is not None
|
| 297 |
+
self.prefix_cache_stats.reset = True
|
| 298 |
+
return True
|
| 299 |
+
|
| 300 |
+
def get_num_common_prefix_blocks(
|
| 301 |
+
self,
|
| 302 |
+
request: Request,
|
| 303 |
+
num_running_requests: int,
|
| 304 |
+
) -> list[int]:
|
| 305 |
+
"""Calculate the number of common prefix blocks shared by all requests
|
| 306 |
+
in the RUNNING state for each kv cache group.
|
| 307 |
+
|
| 308 |
+
The function determines this by selecting any request and iterating
|
| 309 |
+
through its blocks. A block is considered a common prefix block if its
|
| 310 |
+
`ref_cnt` equals the total number of requests in the RUNNING state.
|
| 311 |
+
|
| 312 |
+
NOTE(woosuk): The number of requests in the RUNNING state is **greater
|
| 313 |
+
than or equal to** the number of requests scheduled in the current step.
|
| 314 |
+
This is because the RUNNING state only indicates that:
|
| 315 |
+
1. The request has not yet finished, and
|
| 316 |
+
2. The request holds its blocks unfreed.
|
| 317 |
+
|
| 318 |
+
While all scheduled requests must be in the RUNNING state, the inverse
|
| 319 |
+
is not necessarily true. There may be RUNNING requests that are not
|
| 320 |
+
scheduled in the current step.
|
| 321 |
+
|
| 322 |
+
This can result in an edge case where the number of common prefix blocks
|
| 323 |
+
is 0, even though all scheduled requests share a common prefix. This
|
| 324 |
+
occurs because there may be unscheduled RUNNING requests that do not
|
| 325 |
+
share the common prefix. Currently, this case cannot be easily detected,
|
| 326 |
+
so the function returns 0 in such cases.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
request: Any request in the RUNNING state, used to identify the
|
| 330 |
+
common prefix blocks.
|
| 331 |
+
num_running_requests: The total number of requests in the RUNNING
|
| 332 |
+
state. This can be different from the number of scheduled
|
| 333 |
+
requests in the current step.
|
| 334 |
+
|
| 335 |
+
Returns:
|
| 336 |
+
list[int]: The number of common prefix blocks for each kv cache
|
| 337 |
+
group.
|
| 338 |
+
"""
|
| 339 |
+
assert request.status == RequestStatus.RUNNING
|
| 340 |
+
return self.coordinator.get_num_common_prefix_blocks(
|
| 341 |
+
request.request_id, num_running_requests)
|
| 342 |
+
|
| 343 |
+
def take_events(self) -> list[KVCacheEvent]:
|
| 344 |
+
"""Take the KV cache events from the block pool.
|
| 345 |
+
|
| 346 |
+
Returns:
|
| 347 |
+
A list of KV cache events.
|
| 348 |
+
"""
|
| 349 |
+
return self.block_pool.take_events()
|
| 350 |
+
|
| 351 |
+
def get_block_ids(self, request_id: str) -> tuple[list[int], ...]:
|
| 352 |
+
"""Get the block ids of a request."""
|
| 353 |
+
return KVCacheBlocks(
|
| 354 |
+
self.coordinator.get_blocks(request_id)).get_block_ids()
|
| 355 |
+
|
| 356 |
+
def cache_blocks(self, request: Request, num_computed_tokens: int) -> None:
|
| 357 |
+
"""Cache the blocks for the request, if enabled."""
|
| 358 |
+
if self.enable_caching:
|
| 359 |
+
self.coordinator.cache_blocks(request, num_computed_tokens)
|
| 360 |
+
|
| 361 |
+
def create_empty_block_list(self) -> KVCacheBlocks:
|
| 362 |
+
"""Creates a new KVCacheBlocks instance with no blocks."""
|
| 363 |
+
return KVCacheBlocks(tuple([]
|
| 364 |
+
for _ in range(self.num_kv_cache_groups)))
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/kv_cache_utils.py
ADDED
|
@@ -0,0 +1,1154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
"""KV-Cache Utilities."""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from collections import defaultdict, deque
|
| 7 |
+
from collections.abc import Iterable, Sequence
|
| 8 |
+
from dataclasses import astuple, dataclass
|
| 9 |
+
from typing import Any, Callable, NamedTuple, Optional
|
| 10 |
+
|
| 11 |
+
from vllm.config import VllmConfig
|
| 12 |
+
from vllm.logger import init_logger
|
| 13 |
+
from vllm.utils import GiB_bytes, cdiv, sha256_cbor_64bit
|
| 14 |
+
from vllm.v1.kv_cache_interface import (ChunkedLocalAttentionSpec,
|
| 15 |
+
FullAttentionSpec, KVCacheConfig,
|
| 16 |
+
KVCacheGroupSpec, KVCacheSpec,
|
| 17 |
+
KVCacheTensor, SlidingWindowSpec)
|
| 18 |
+
from vllm.v1.metrics.stats import PrefixCacheStats
|
| 19 |
+
from vllm.v1.request import Request
|
| 20 |
+
|
| 21 |
+
logger = init_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class BlockHash(NamedTuple):
|
| 25 |
+
"""Hash value of a block (int), the token IDs in the block, and extra keys.
|
| 26 |
+
We keep a tuple of token IDs and extra keys to reduce the likelihood of
|
| 27 |
+
hash collisions when the hash value is the same. By using SHA256 however,
|
| 28 |
+
hash collisions are practically impossible.
|
| 29 |
+
"""
|
| 30 |
+
# Hash value of the block in an integer.
|
| 31 |
+
hash_value: int
|
| 32 |
+
# Token IDs in the block.
|
| 33 |
+
token_ids: tuple[int, ...]
|
| 34 |
+
# Extra keys for the block.
|
| 35 |
+
extra_keys: Optional[Any] = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BlockHashWithGroupId(NamedTuple):
|
| 39 |
+
# The hash value for the contents (e.g., token_ids) of a block without group
|
| 40 |
+
# ID. The value is the same for blocks representing the same tokens but for
|
| 41 |
+
# different groups.
|
| 42 |
+
block_hash: BlockHash
|
| 43 |
+
# The KV cache group ID.
|
| 44 |
+
group_id: int
|
| 45 |
+
|
| 46 |
+
def get_hash_value(self) -> int:
|
| 47 |
+
return self.block_hash.hash_value
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# The hash seed for the first block of any prefix block sequence.
|
| 51 |
+
#
|
| 52 |
+
# We use a random value to avoid hash collisions or PYTHONHASHSEED environment
|
| 53 |
+
# variable if set such that processes can share the seed if needed.
|
| 54 |
+
# This aligns with the behavior of Python's hash() function, which also uses
|
| 55 |
+
# a random seed if PYTHONHASHSEED is not set.
|
| 56 |
+
#
|
| 57 |
+
# The function `init_none_hash` initializes this variable globally.
|
| 58 |
+
NONE_HASH: int
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def init_none_hash(hash_fn: Callable):
|
| 62 |
+
global NONE_HASH
|
| 63 |
+
|
| 64 |
+
hash_seed = os.getenv("PYTHONHASHSEED")
|
| 65 |
+
if hash_seed is None and hash_fn is sha256_cbor_64bit:
|
| 66 |
+
logger.warning(
|
| 67 |
+
"PYTHONHASHSEED is not set. This will lead to non-reproducible "
|
| 68 |
+
"block-hashes when using sha256_cbor_64bit as the hash function."
|
| 69 |
+
"Consider setting PYTHONHASHSEED to a fixed value for "
|
| 70 |
+
"reproducibility.")
|
| 71 |
+
|
| 72 |
+
NONE_HASH = (int.from_bytes(os.urandom(32), byteorder="big")
|
| 73 |
+
if hash_seed is None else hash_fn(hash_seed))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class PrefixCachingMetrics:
|
| 77 |
+
"""Metrics for prefix caching with a hit rate of the max recent N requests.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
max_recent_requests: The number of the max recent requests to aggregate.
|
| 81 |
+
Defaults to 1000.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(self, max_recent_requests: int = 1000):
|
| 85 |
+
self.max_recent_requests = max_recent_requests
|
| 86 |
+
# The current aggregated values.
|
| 87 |
+
self.aggregated_requests = 0
|
| 88 |
+
self.aggregated_query_total = 0
|
| 89 |
+
self.aggregated_query_hit = 0
|
| 90 |
+
# A deque of (requests, queries, hits) for the most recent requests.
|
| 91 |
+
self.query_queue: deque[tuple[int, int, int]] = deque()
|
| 92 |
+
|
| 93 |
+
def observe(self, stats: PrefixCacheStats):
|
| 94 |
+
"""Observe the prefix caching for a set of requests.
|
| 95 |
+
|
| 96 |
+
This function is called with information gathered when new requests
|
| 97 |
+
are being scheduled and are looking for computed blocks.
|
| 98 |
+
|
| 99 |
+
When there are more than `interval` requests, the oldest set of
|
| 100 |
+
requests are removed from the metrics.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
stats: The prefix cache stats.
|
| 104 |
+
"""
|
| 105 |
+
# reset_prefix_cache was invoked before the current update.
|
| 106 |
+
# Reset the metrics before aggregating the current stats.
|
| 107 |
+
if stats.reset:
|
| 108 |
+
self.reset()
|
| 109 |
+
|
| 110 |
+
# Update the metrics.
|
| 111 |
+
self.query_queue.append((stats.requests, stats.queries, stats.hits))
|
| 112 |
+
self.aggregated_requests += stats.requests
|
| 113 |
+
self.aggregated_query_total += stats.queries
|
| 114 |
+
self.aggregated_query_hit += stats.hits
|
| 115 |
+
|
| 116 |
+
# Remove the oldest stats if the number of requests exceeds.
|
| 117 |
+
if self.aggregated_requests > self.max_recent_requests:
|
| 118 |
+
old_requests, old_queries, old_hits = self.query_queue.popleft()
|
| 119 |
+
self.aggregated_requests -= old_requests
|
| 120 |
+
self.aggregated_query_total -= old_queries
|
| 121 |
+
self.aggregated_query_hit -= old_hits
|
| 122 |
+
|
| 123 |
+
def reset(self):
|
| 124 |
+
"""Reset the metrics."""
|
| 125 |
+
self.aggregated_requests = 0
|
| 126 |
+
self.aggregated_query_total = 0
|
| 127 |
+
self.aggregated_query_hit = 0
|
| 128 |
+
self.query_queue.clear()
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def hit_rate(self) -> float:
|
| 132 |
+
"""Calculate the hit rate for the past N requests."""
|
| 133 |
+
if self.aggregated_query_total == 0:
|
| 134 |
+
return 0.0
|
| 135 |
+
return self.aggregated_query_hit / self.aggregated_query_total
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@dataclass
|
| 139 |
+
class KVCacheBlock:
|
| 140 |
+
"""KV-cache block metadata."""
|
| 141 |
+
# Block ID, ranging from 0 to num_gpu_blocks - 1.
|
| 142 |
+
block_id: int
|
| 143 |
+
# Reference count.
|
| 144 |
+
ref_cnt: int = 0
|
| 145 |
+
# The hash of the block composed of (block hash, tuple of token IDs).
|
| 146 |
+
# It is only available when the block is full.
|
| 147 |
+
_block_hash: Optional[BlockHashWithGroupId] = None
|
| 148 |
+
|
| 149 |
+
# Used to construct a doubly linked list for free blocks.
|
| 150 |
+
# These two attributes should only be manipulated by FreeKVCacheBlockQueue.
|
| 151 |
+
prev_free_block: Optional["KVCacheBlock"] = None
|
| 152 |
+
next_free_block: Optional["KVCacheBlock"] = None
|
| 153 |
+
|
| 154 |
+
# Whether the block is a null block that should never be cached.
|
| 155 |
+
is_null: bool = False
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def block_hash(self) -> Optional[BlockHashWithGroupId]:
|
| 159 |
+
return self._block_hash
|
| 160 |
+
|
| 161 |
+
@block_hash.setter
|
| 162 |
+
def block_hash(self, block_hash: BlockHashWithGroupId):
|
| 163 |
+
assert self.block_hash is None, (
|
| 164 |
+
"The block already has a hash. This should not happen.")
|
| 165 |
+
self._block_hash = block_hash
|
| 166 |
+
|
| 167 |
+
def reset_hash(self):
|
| 168 |
+
"""Reset the block hash when the block is evicted."""
|
| 169 |
+
self._block_hash = None
|
| 170 |
+
|
| 171 |
+
def __repr__(self) -> str:
|
| 172 |
+
# Use block_id instead of KVCacheBlock object to avoid calling __repr__
|
| 173 |
+
# on KVCacheBlock object recursively.
|
| 174 |
+
prev_block_id = (self.prev_free_block.block_id
|
| 175 |
+
if self.prev_free_block else None)
|
| 176 |
+
next_block_id = (self.next_free_block.block_id
|
| 177 |
+
if self.next_free_block else None)
|
| 178 |
+
return (f"KVCacheBlock(block_id={self.block_id}, "
|
| 179 |
+
f"ref_cnt={self.ref_cnt}, "
|
| 180 |
+
f"_block_hash={self._block_hash}, "
|
| 181 |
+
f"prev_free_block={prev_block_id}, "
|
| 182 |
+
f"next_free_block={next_block_id})")
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class FreeKVCacheBlockQueue:
|
| 186 |
+
"""This class organizes a list of KVCacheBlock objects to a doubly linked
|
| 187 |
+
list of free blocks. We implement this class instead of using Python
|
| 188 |
+
builtin deque to support removing a block in the middle of the queue
|
| 189 |
+
in O(1) time. To close the performance gap to the builtin deque which is
|
| 190 |
+
implemented in C++, this class does not allocate any Python objects when
|
| 191 |
+
manipulating the linked list. Instead, this class manipulates the
|
| 192 |
+
prev_free_block and next_free_block attributes of the given blocks.
|
| 193 |
+
|
| 194 |
+
The queue is ordered by block ID in the beginning. When a block is allocated
|
| 195 |
+
and then freed, it will be appended back with the eviction order:
|
| 196 |
+
1. The least recent used block is at the front (LRU).
|
| 197 |
+
2. If two blocks have the same last accessed time (allocated by the
|
| 198 |
+
same sequence), the one with more hash tokens (the tail of a block
|
| 199 |
+
chain) is at the front.
|
| 200 |
+
Note that we maintain this order by reversing the block order when free
|
| 201 |
+
blocks of a request. This operation is outside of this class.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
blocks: A list of KVCacheBlock objects.
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
def __init__(self, blocks: list[KVCacheBlock]) -> None:
|
| 208 |
+
self.num_free_blocks = len(blocks)
|
| 209 |
+
|
| 210 |
+
# Initialize doubly links of consecutive blocks
|
| 211 |
+
for i in range(self.num_free_blocks):
|
| 212 |
+
if i > 0:
|
| 213 |
+
blocks[i].prev_free_block = blocks[i - 1]
|
| 214 |
+
if i < self.num_free_blocks - 1:
|
| 215 |
+
blocks[i].next_free_block = blocks[i + 1]
|
| 216 |
+
|
| 217 |
+
# Create a fake head and a tail block for the doubly linked list to
|
| 218 |
+
# reduce branching in the code
|
| 219 |
+
#
|
| 220 |
+
# The implementation garenteed that the fake head and tail
|
| 221 |
+
# are NEVER got popped, so we could safely assume each real blocks
|
| 222 |
+
# in the queue has prev and next blocks.
|
| 223 |
+
self.fake_free_list_head = KVCacheBlock(block_id=-1)
|
| 224 |
+
self.fake_free_list_tail = KVCacheBlock(block_id=-1)
|
| 225 |
+
if self.num_free_blocks > 0:
|
| 226 |
+
# Connect fake_head and fake_tail to the first and last block
|
| 227 |
+
# respectively.
|
| 228 |
+
self.fake_free_list_head.next_free_block = blocks[0]
|
| 229 |
+
blocks[0].prev_free_block = self.fake_free_list_head
|
| 230 |
+
self.fake_free_list_tail.prev_free_block = blocks[-1]
|
| 231 |
+
blocks[-1].next_free_block = self.fake_free_list_tail
|
| 232 |
+
else:
|
| 233 |
+
# For empty list, simply connect the fake head and tail.
|
| 234 |
+
self.fake_free_list_head.next_free_block = self.fake_free_list_tail
|
| 235 |
+
self.fake_free_list_tail.prev_free_block = self.fake_free_list_head
|
| 236 |
+
|
| 237 |
+
def popleft(self) -> KVCacheBlock:
|
| 238 |
+
"""Pop the first free block and reduce num_free_blocks by 1.
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
The first free block.
|
| 242 |
+
"""
|
| 243 |
+
if (self.fake_free_list_head.next_free_block
|
| 244 |
+
is self.fake_free_list_tail
|
| 245 |
+
or self.fake_free_list_head.next_free_block is None):
|
| 246 |
+
assert self.num_free_blocks == 0, (
|
| 247 |
+
f"num_free_blocks ({self.num_free_blocks}) is out of sync "
|
| 248 |
+
"with the free list.")
|
| 249 |
+
raise ValueError("No free blocks available")
|
| 250 |
+
|
| 251 |
+
first_block: KVCacheBlock = self.fake_free_list_head.next_free_block
|
| 252 |
+
|
| 253 |
+
if first_block.next_free_block is None:
|
| 254 |
+
# This should not happen if the block is from the free list.
|
| 255 |
+
# It indicates a bug in the caller's logic.
|
| 256 |
+
raise RuntimeError("Invalid block found in popleft() "
|
| 257 |
+
"which doesn't have a valid next_free_block")
|
| 258 |
+
|
| 259 |
+
# Connect fake_head and the next block of first_block (i.e. second block
|
| 260 |
+
# or fake tail).
|
| 261 |
+
self.fake_free_list_head.next_free_block = first_block.next_free_block
|
| 262 |
+
first_block.next_free_block.prev_free_block = self.fake_free_list_head
|
| 263 |
+
|
| 264 |
+
# Remove the block from the linked list.
|
| 265 |
+
first_block.prev_free_block = first_block.next_free_block = None
|
| 266 |
+
|
| 267 |
+
self.num_free_blocks -= 1
|
| 268 |
+
return first_block
|
| 269 |
+
|
| 270 |
+
def popleft_n(self, n: int) -> list[KVCacheBlock]:
|
| 271 |
+
"""Pop the first n free blocks and reduce num_free_blocks by n.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
n: The number of blocks to pop.
|
| 275 |
+
|
| 276 |
+
Returns:
|
| 277 |
+
A list of n free blocks.
|
| 278 |
+
"""
|
| 279 |
+
if n == 0:
|
| 280 |
+
return []
|
| 281 |
+
assert self.num_free_blocks >= n
|
| 282 |
+
self.num_free_blocks -= n
|
| 283 |
+
|
| 284 |
+
curr_block = self.fake_free_list_head.next_free_block
|
| 285 |
+
# Pop n blocks from the head of the list
|
| 286 |
+
ret = []
|
| 287 |
+
for _ in range(n):
|
| 288 |
+
assert curr_block is not None
|
| 289 |
+
ret.append(curr_block)
|
| 290 |
+
last_block = curr_block
|
| 291 |
+
curr_block = curr_block.next_free_block
|
| 292 |
+
# Reset prev_free_block and next_free_block of all popped blocks
|
| 293 |
+
last_block.prev_free_block = None
|
| 294 |
+
last_block.next_free_block = None
|
| 295 |
+
|
| 296 |
+
if curr_block is not None:
|
| 297 |
+
# The queue is not empty, connect the fake head to
|
| 298 |
+
# the new first block.
|
| 299 |
+
self.fake_free_list_head.next_free_block = curr_block
|
| 300 |
+
curr_block.prev_free_block = self.fake_free_list_head
|
| 301 |
+
return ret
|
| 302 |
+
|
| 303 |
+
def remove(self, block: KVCacheBlock) -> None:
|
| 304 |
+
"""Remove a block in the free list and reduce num_free_blocks by 1.
|
| 305 |
+
|
| 306 |
+
Args:
|
| 307 |
+
block: The block to remove.
|
| 308 |
+
"""
|
| 309 |
+
if block.prev_free_block is None or block.next_free_block is None:
|
| 310 |
+
# This should not happen if the block is from the free list.
|
| 311 |
+
# It indicates a bug in the caller's logic.
|
| 312 |
+
raise RuntimeError(f"remove() called on an invalid block: {block}")
|
| 313 |
+
|
| 314 |
+
# Link the previous block to the next block.
|
| 315 |
+
block.prev_free_block.next_free_block = block.next_free_block
|
| 316 |
+
# Link the next block to the previous block.
|
| 317 |
+
block.next_free_block.prev_free_block = block.prev_free_block
|
| 318 |
+
|
| 319 |
+
# Remove the block from the linked list.
|
| 320 |
+
block.prev_free_block = block.next_free_block = None
|
| 321 |
+
self.num_free_blocks -= 1
|
| 322 |
+
|
| 323 |
+
def append(self, block: KVCacheBlock) -> None:
|
| 324 |
+
"""Put a block back into the free list and increase
|
| 325 |
+
num_free_blocks by 1.
|
| 326 |
+
|
| 327 |
+
Args:
|
| 328 |
+
block: The block to append.
|
| 329 |
+
"""
|
| 330 |
+
if self.fake_free_list_tail.prev_free_block is None:
|
| 331 |
+
raise RuntimeError(
|
| 332 |
+
"prev_free_block of fake_free_list_tail should always exist")
|
| 333 |
+
last_block: KVCacheBlock = self.fake_free_list_tail.prev_free_block
|
| 334 |
+
|
| 335 |
+
# Connect the new block after the last block.
|
| 336 |
+
last_block.next_free_block = block
|
| 337 |
+
block.prev_free_block = last_block
|
| 338 |
+
|
| 339 |
+
# Connect the fake tail after the new block.
|
| 340 |
+
block.next_free_block = self.fake_free_list_tail
|
| 341 |
+
self.fake_free_list_tail.prev_free_block = block
|
| 342 |
+
|
| 343 |
+
self.num_free_blocks += 1
|
| 344 |
+
|
| 345 |
+
def append_n(self, blocks: list[KVCacheBlock]) -> None:
|
| 346 |
+
"""Put a list of blocks back into the free list
|
| 347 |
+
|
| 348 |
+
Args:
|
| 349 |
+
blocks: The blocks to append.
|
| 350 |
+
"""
|
| 351 |
+
if len(blocks) == 0:
|
| 352 |
+
return
|
| 353 |
+
self.num_free_blocks += len(blocks)
|
| 354 |
+
|
| 355 |
+
last_block = self.fake_free_list_tail.prev_free_block
|
| 356 |
+
assert last_block is not None, (
|
| 357 |
+
"prev_free_block of fake_free_list_tail should always exist")
|
| 358 |
+
# Add inter-connections between consecutive blocks
|
| 359 |
+
for block in blocks:
|
| 360 |
+
block.prev_free_block = last_block
|
| 361 |
+
last_block.next_free_block = block
|
| 362 |
+
last_block = block
|
| 363 |
+
|
| 364 |
+
# Connect the last block of <blocks> to the fake tail
|
| 365 |
+
last_block.next_free_block = self.fake_free_list_tail
|
| 366 |
+
self.fake_free_list_tail.prev_free_block = last_block
|
| 367 |
+
|
| 368 |
+
def get_all_free_blocks(self) -> list[KVCacheBlock]:
|
| 369 |
+
"""Get all free blocks in the free list. Mainly used for testing.
|
| 370 |
+
|
| 371 |
+
Returns:
|
| 372 |
+
A list of free blocks.
|
| 373 |
+
"""
|
| 374 |
+
ret = []
|
| 375 |
+
if self.fake_free_list_head.next_free_block is None:
|
| 376 |
+
raise RuntimeError(
|
| 377 |
+
"next_free_block of fake_free_list_head should always exist")
|
| 378 |
+
# Start from the first block
|
| 379 |
+
curr_block: KVCacheBlock = self.fake_free_list_head.next_free_block
|
| 380 |
+
# As long as next_free_block is available, we haven't reached to
|
| 381 |
+
# the fake tail yet.
|
| 382 |
+
while curr_block.next_free_block is not None:
|
| 383 |
+
ret.append(curr_block)
|
| 384 |
+
curr_block = curr_block.next_free_block
|
| 385 |
+
return ret
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def need_extra_keys(request: Request) -> bool:
|
| 389 |
+
"""Check whether the blocks allocated to this request need extra hash keys.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
request (Request): The request.
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
bool: Whether blocks allocated to this request need extra hash keys.
|
| 396 |
+
"""
|
| 397 |
+
|
| 398 |
+
# Multimodal requests need to include the MM hash.
|
| 399 |
+
# LoRA requests need to include the LoRA ID.
|
| 400 |
+
# Request with provided cache salt need to include the salt.
|
| 401 |
+
return bool(request.mm_hashes) or (request.lora_request
|
| 402 |
+
is not None) or (request.cache_salt
|
| 403 |
+
is not None)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def _gen_mm_extra_hash_keys(request: Request, start_token_idx: int,
|
| 407 |
+
end_token_idx: int,
|
| 408 |
+
start_mm_idx: int) -> tuple[list[Any], int]:
|
| 409 |
+
"""Generate extra keys related to MultiModal request for block hash
|
| 410 |
+
computation. For multi-modal inputs, the extra keys are
|
| 411 |
+
(mm_hash, start_offset) that indicate a mm input contained in the
|
| 412 |
+
block and its starting offset in the block tokens.
|
| 413 |
+
|
| 414 |
+
Args:
|
| 415 |
+
request: The request object.
|
| 416 |
+
start_token_idx: The start token index of the block.
|
| 417 |
+
end_token_idx: The end token index of the block.
|
| 418 |
+
start_mm_idx: The start multi-modal index of the block.
|
| 419 |
+
|
| 420 |
+
Returns:
|
| 421 |
+
A tuple of extra keys and the next multi-modal index.
|
| 422 |
+
"""
|
| 423 |
+
extra_keys: list[Any] = []
|
| 424 |
+
|
| 425 |
+
mm_positions, mm_hashes = request.mm_positions, request.mm_hashes
|
| 426 |
+
if not mm_positions:
|
| 427 |
+
return extra_keys, start_mm_idx
|
| 428 |
+
|
| 429 |
+
if mm_positions and len(mm_positions) != len(mm_hashes):
|
| 430 |
+
raise ValueError(
|
| 431 |
+
"The number of multi-modal positions and hashes must match. This "
|
| 432 |
+
"is likely because you did not enable MM hashing. "
|
| 433 |
+
"Please set `mm_processor_cache_gb > 0`.")
|
| 434 |
+
|
| 435 |
+
# Note that we assume mm_positions is sorted by offset.
|
| 436 |
+
# We do not need to check all mm inputs if the start token index is out of
|
| 437 |
+
# range. This usually happens in the late prefill phase and decoding phase.
|
| 438 |
+
if mm_positions[-1].offset + mm_positions[-1].length < start_token_idx:
|
| 439 |
+
return extra_keys, start_mm_idx
|
| 440 |
+
|
| 441 |
+
# Support start_mm_idx == -1 to indicate the last mm input.
|
| 442 |
+
if start_mm_idx < 0:
|
| 443 |
+
assert -start_mm_idx <= len(mm_positions)
|
| 444 |
+
start_mm_idx = len(mm_positions) + start_mm_idx
|
| 445 |
+
|
| 446 |
+
curr_mm_idx = start_mm_idx
|
| 447 |
+
while mm_positions and curr_mm_idx < len(mm_positions):
|
| 448 |
+
assert mm_hashes[curr_mm_idx] is not None
|
| 449 |
+
offset = mm_positions[curr_mm_idx].offset
|
| 450 |
+
length = mm_positions[curr_mm_idx].length
|
| 451 |
+
if end_token_idx > offset:
|
| 452 |
+
if start_token_idx > offset + length:
|
| 453 |
+
# This block has passed the current mm input.
|
| 454 |
+
curr_mm_idx += 1
|
| 455 |
+
continue
|
| 456 |
+
|
| 457 |
+
# The block contains the current mm input.
|
| 458 |
+
extra_keys.append(mm_hashes[curr_mm_idx])
|
| 459 |
+
|
| 460 |
+
if end_token_idx >= offset + length:
|
| 461 |
+
# If this block contains the end of the current mm input,
|
| 462 |
+
# move to the next mm input as this block may also contain
|
| 463 |
+
# the next mm input.
|
| 464 |
+
curr_mm_idx += 1
|
| 465 |
+
else:
|
| 466 |
+
# Otherwise this block is done with mm inputs.
|
| 467 |
+
break
|
| 468 |
+
else:
|
| 469 |
+
# This block has not reached the current mm input.
|
| 470 |
+
break
|
| 471 |
+
return extra_keys, curr_mm_idx
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def _gen_lora_extra_hash_keys(request: Request) -> list[int]:
|
| 475 |
+
"""Generate extra keys related to LoRA for block hash computation.
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
request: The request object.
|
| 479 |
+
|
| 480 |
+
Returns:
|
| 481 |
+
Return LoRA id of the request if it is a LoRA request. Return empty
|
| 482 |
+
list otherwise.
|
| 483 |
+
"""
|
| 484 |
+
if not request.lora_request:
|
| 485 |
+
return []
|
| 486 |
+
return [request.lora_request.lora_int_id]
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def generate_block_hash_extra_keys(
|
| 490 |
+
request: Request, start_token_idx: int, end_token_idx: int,
|
| 491 |
+
start_mm_idx: int) -> tuple[Optional[tuple[Any, ...]], int]:
|
| 492 |
+
"""Generate extra keys for the block hash. The extra keys can come from
|
| 493 |
+
the multi-modal inputs and request specific metadata (e.g., LoRA ID).
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
request: The request object.
|
| 497 |
+
start_token_idx: The start token index of the block.
|
| 498 |
+
end_token_idx: The end token index of the block.
|
| 499 |
+
start_mm_idx: The start multi-modal index of the block.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
A tuple of extra keys and the next multi-modal index.
|
| 503 |
+
"""
|
| 504 |
+
mm_extra_keys: list[Any]
|
| 505 |
+
mm_extra_keys, new_start_mm_idx = _gen_mm_extra_hash_keys(
|
| 506 |
+
request, start_token_idx, end_token_idx, start_mm_idx)
|
| 507 |
+
lora_extra_keys: list[int] = _gen_lora_extra_hash_keys(request)
|
| 508 |
+
cache_salt_keys: list[str] = [request.cache_salt] if (
|
| 509 |
+
start_token_idx == 0 and request.cache_salt) else []
|
| 510 |
+
|
| 511 |
+
extra_keys: list[Any] = lora_extra_keys + mm_extra_keys + cache_salt_keys
|
| 512 |
+
|
| 513 |
+
if not extra_keys:
|
| 514 |
+
return None, new_start_mm_idx
|
| 515 |
+
|
| 516 |
+
return tuple(extra_keys), new_start_mm_idx
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def hash_block_tokens(
|
| 520 |
+
hash_function: Callable,
|
| 521 |
+
parent_block_hash: Optional[int],
|
| 522 |
+
curr_block_token_ids: Sequence[int],
|
| 523 |
+
extra_keys: Optional[tuple[Any, ...]] = None) -> BlockHash:
|
| 524 |
+
"""Computes a hash value corresponding to the contents of a block and
|
| 525 |
+
the contents of the preceding block(s). The hash value is used for
|
| 526 |
+
prefix caching. We use LRU cache for this function to avoid recomputing
|
| 527 |
+
hash values for the same block contents.
|
| 528 |
+
|
| 529 |
+
Args:
|
| 530 |
+
parent_block_hash: The hash of the parent block. None
|
| 531 |
+
if this is the first block.
|
| 532 |
+
curr_block_token_ids: A list of token ids in the current
|
| 533 |
+
block. The current block is assumed to be full.
|
| 534 |
+
extra_keys: Extra keys for the block.
|
| 535 |
+
|
| 536 |
+
Returns:
|
| 537 |
+
The hash value of the block and the token ids in the block.
|
| 538 |
+
The entire tuple is used as the hash key of the block.
|
| 539 |
+
"""
|
| 540 |
+
if not parent_block_hash:
|
| 541 |
+
parent_block_hash = NONE_HASH
|
| 542 |
+
|
| 543 |
+
curr_block_token_ids_tuple = tuple(curr_block_token_ids)
|
| 544 |
+
return BlockHash(
|
| 545 |
+
hash_function(
|
| 546 |
+
(parent_block_hash, curr_block_token_ids_tuple, extra_keys)),
|
| 547 |
+
curr_block_token_ids_tuple, extra_keys)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def get_request_block_hasher(
|
| 551 |
+
block_size: int,
|
| 552 |
+
caching_hash_fn: Callable[[Any],
|
| 553 |
+
int]) -> Callable[[Request], list[BlockHash]]:
|
| 554 |
+
"""
|
| 555 |
+
Returns a function which computes the list of un-computed block hashes
|
| 556 |
+
of a request.
|
| 557 |
+
|
| 558 |
+
Each request holds a list of its block hashes (request.block_hashes).
|
| 559 |
+
When a request is created, it calls the below function to compute
|
| 560 |
+
the hashes of all full blocks of the request's initial tokens.
|
| 561 |
+
The hashes are then stored in request.block_hashes.
|
| 562 |
+
Later, whenever new tokens are appended to the request, it calls
|
| 563 |
+
the below function again to compute any new full blocks of tokens.
|
| 564 |
+
The returned new hashes are appended to request.block_hashes.
|
| 565 |
+
"""
|
| 566 |
+
|
| 567 |
+
def request_block_hasher(request: Request) -> list[BlockHash]:
|
| 568 |
+
start_token_idx = len(request.block_hashes) * block_size
|
| 569 |
+
num_tokens = request.num_tokens
|
| 570 |
+
|
| 571 |
+
curr_mm_idx = 0
|
| 572 |
+
if start_token_idx > 0:
|
| 573 |
+
# Set curr_mm_idx = -1 to indicate the last mm input.
|
| 574 |
+
# Note that since we reach to this branch only when the block is
|
| 575 |
+
# completed with generated tokens, we only need to consider the
|
| 576 |
+
# last mm input.
|
| 577 |
+
curr_mm_idx = -1
|
| 578 |
+
|
| 579 |
+
prev_block_hash_value = request.block_hashes[-1].hash_value \
|
| 580 |
+
if request.block_hashes else None
|
| 581 |
+
new_block_hashes: list[BlockHash] = []
|
| 582 |
+
while True:
|
| 583 |
+
end_token_idx = start_token_idx + block_size
|
| 584 |
+
if end_token_idx > num_tokens:
|
| 585 |
+
# We only hash full blocks
|
| 586 |
+
break
|
| 587 |
+
|
| 588 |
+
# MM and LoRA requests need extra keys for block-hash computation.
|
| 589 |
+
extra_keys, curr_mm_idx = generate_block_hash_extra_keys(
|
| 590 |
+
request, start_token_idx, end_token_idx, curr_mm_idx)
|
| 591 |
+
|
| 592 |
+
# Compute the hash of the current block
|
| 593 |
+
block_tokens = request.all_token_ids[start_token_idx:end_token_idx]
|
| 594 |
+
block_hash = hash_block_tokens(caching_hash_fn,
|
| 595 |
+
prev_block_hash_value, block_tokens,
|
| 596 |
+
extra_keys)
|
| 597 |
+
|
| 598 |
+
new_block_hashes.append(block_hash)
|
| 599 |
+
start_token_idx += block_size
|
| 600 |
+
prev_block_hash_value = block_hash.hash_value
|
| 601 |
+
|
| 602 |
+
return new_block_hashes
|
| 603 |
+
|
| 604 |
+
return request_block_hasher
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def max_memory_usage_bytes(vllm_config: VllmConfig,
|
| 608 |
+
kv_cache_specs: Iterable[KVCacheSpec]) -> int:
|
| 609 |
+
"""
|
| 610 |
+
Get the maximum memory usage in bytes for the given KV cache specs.
|
| 611 |
+
"""
|
| 612 |
+
return sum(
|
| 613 |
+
spec.max_memory_usage_bytes(vllm_config) for spec in kv_cache_specs)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
def estimate_max_model_len(vllm_config: VllmConfig,
|
| 617 |
+
kv_cache_spec: dict[str, KVCacheSpec],
|
| 618 |
+
available_memory: int) -> int:
|
| 619 |
+
"""
|
| 620 |
+
Estimates the maximum model length that can fit in the available memory
|
| 621 |
+
using binary search.
|
| 622 |
+
|
| 623 |
+
Args:
|
| 624 |
+
vllm_config: The global VllmConfig
|
| 625 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 626 |
+
available_memory: Memory available for KV cache in bytes.
|
| 627 |
+
|
| 628 |
+
Returns:
|
| 629 |
+
The estimated maximum model length that can fit in the available memory.
|
| 630 |
+
"""
|
| 631 |
+
|
| 632 |
+
# Define a function to check if a given model length fits in memory
|
| 633 |
+
def fits_in_memory(model_len: int) -> bool:
|
| 634 |
+
# Modify the max_model_len for this calculation
|
| 635 |
+
vllm_config.model_config.max_model_len = model_len
|
| 636 |
+
# Calculate memory needed for the given model length
|
| 637 |
+
memory_needed = max_memory_usage_bytes(vllm_config,
|
| 638 |
+
kv_cache_spec.values())
|
| 639 |
+
return memory_needed <= available_memory
|
| 640 |
+
|
| 641 |
+
# Binary search for the maximum model length
|
| 642 |
+
current_max = vllm_config.model_config.max_model_len
|
| 643 |
+
left, right = 1, current_max
|
| 644 |
+
|
| 645 |
+
# If even the smallest model length doesn't fit, return 0
|
| 646 |
+
if not fits_in_memory(left):
|
| 647 |
+
return 0
|
| 648 |
+
|
| 649 |
+
# Binary search for the maximum model length that fits
|
| 650 |
+
result = 1
|
| 651 |
+
while left <= right:
|
| 652 |
+
mid = (left + right) // 2
|
| 653 |
+
if fits_in_memory(mid):
|
| 654 |
+
result = mid
|
| 655 |
+
left = mid + 1
|
| 656 |
+
else:
|
| 657 |
+
right = mid - 1
|
| 658 |
+
return result
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def check_enough_kv_cache_memory(vllm_config: VllmConfig,
|
| 662 |
+
kv_cache_spec: dict[str, KVCacheSpec],
|
| 663 |
+
available_memory: int):
|
| 664 |
+
"""
|
| 665 |
+
Checks whether `available_memory` is enough for the KV cache to hold at
|
| 666 |
+
least one request with the model's max_model_len.
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
vllm_config: The global VllmConfig
|
| 670 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 671 |
+
available_memory: Memory available for KV cache in bytes.
|
| 672 |
+
|
| 673 |
+
Raises:
|
| 674 |
+
ValueError: If there is not enough memory available for the KV cache.
|
| 675 |
+
"""
|
| 676 |
+
|
| 677 |
+
# No need to check for available memory if the kv_cache_spec is empty
|
| 678 |
+
if not kv_cache_spec:
|
| 679 |
+
return
|
| 680 |
+
|
| 681 |
+
if available_memory <= 0:
|
| 682 |
+
raise ValueError("No available memory for the cache blocks. "
|
| 683 |
+
"Try increasing `gpu_memory_utilization` when "
|
| 684 |
+
"initializing the engine.")
|
| 685 |
+
|
| 686 |
+
max_model_len = vllm_config.model_config.max_model_len
|
| 687 |
+
needed_memory = max_memory_usage_bytes(vllm_config, kv_cache_spec.values())
|
| 688 |
+
|
| 689 |
+
if needed_memory > available_memory:
|
| 690 |
+
# Estimate the maximum model length that can fit in the available memory
|
| 691 |
+
estimated_max_len = estimate_max_model_len(vllm_config, kv_cache_spec,
|
| 692 |
+
available_memory)
|
| 693 |
+
estimated_msg = ""
|
| 694 |
+
if estimated_max_len > 0:
|
| 695 |
+
estimated_msg = (
|
| 696 |
+
"Based on the available memory, "
|
| 697 |
+
f"the estimated maximum model length is {estimated_max_len}.")
|
| 698 |
+
|
| 699 |
+
raise ValueError(
|
| 700 |
+
f"To serve at least one request with the models's max seq len "
|
| 701 |
+
f"({max_model_len}), ({needed_memory/GiB_bytes:.2f} GiB KV "
|
| 702 |
+
f"cache is needed, which is larger than the available KV cache "
|
| 703 |
+
f"memory ({available_memory/GiB_bytes:.2f} GiB). "
|
| 704 |
+
f"{estimated_msg} "
|
| 705 |
+
f"Try increasing `gpu_memory_utilization` or decreasing "
|
| 706 |
+
f"`max_model_len` when initializing the engine.")
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def create_kv_cache_group_specs(
|
| 710 |
+
kv_cache_spec: dict[str, KVCacheSpec],
|
| 711 |
+
grouped_layer_names: list[list[str]]) -> list[KVCacheGroupSpec]:
|
| 712 |
+
"""
|
| 713 |
+
Create KVCacheGroupSpec object for each kv cache group layer.
|
| 714 |
+
The layers in the same group should share the same
|
| 715 |
+
KVCacheSpec.
|
| 716 |
+
|
| 717 |
+
Args:
|
| 718 |
+
kv_cache_spec:
|
| 719 |
+
A mapping from each layer name to its corresponding KVCacheSpec.
|
| 720 |
+
grouped_layer_names:
|
| 721 |
+
A list of kv cache groups, where each element is a list of layer
|
| 722 |
+
names that belong to the same group and should share the same
|
| 723 |
+
KVCacheSpec.
|
| 724 |
+
Returns:
|
| 725 |
+
A list of KVCacheGroupSpec objects, one for each group.
|
| 726 |
+
"""
|
| 727 |
+
kv_cache_groups = []
|
| 728 |
+
for layer_names_one_group in grouped_layer_names:
|
| 729 |
+
layer_specs = [
|
| 730 |
+
kv_cache_spec[layer_name] for layer_name in layer_names_one_group
|
| 731 |
+
]
|
| 732 |
+
merged_layer_spec = layer_specs[0].merge(layer_specs)
|
| 733 |
+
kv_cache_groups.append(
|
| 734 |
+
KVCacheGroupSpec(layer_names_one_group, merged_layer_spec))
|
| 735 |
+
return kv_cache_groups
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def is_kv_cache_type_uniform(kv_cache_spec: dict[str, KVCacheSpec]) -> bool:
|
| 739 |
+
"""
|
| 740 |
+
Whether all layers in the given KVCacheSpec have the same KV cache spec.
|
| 741 |
+
Note that we regard FullAttentionSpec with and without sliding window as
|
| 742 |
+
the same type.
|
| 743 |
+
|
| 744 |
+
Args:
|
| 745 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 746 |
+
|
| 747 |
+
Returns:
|
| 748 |
+
True if all layers have the same type, False otherwise.
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
try:
|
| 752 |
+
kv_cache_spec_values = list(kv_cache_spec.values())
|
| 753 |
+
_ = kv_cache_spec_values[0].merge(kv_cache_spec_values)
|
| 754 |
+
except AssertionError:
|
| 755 |
+
return False
|
| 756 |
+
return True
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
def get_max_concurrency_for_kv_cache_config(
|
| 760 |
+
vllm_config: VllmConfig, kv_cache_config: KVCacheConfig) -> float:
|
| 761 |
+
"""
|
| 762 |
+
Get the maximum concurrency for the given KV cache configuration.
|
| 763 |
+
"""
|
| 764 |
+
num_layer_per_group = max(
|
| 765 |
+
len(group.layer_names) for group in kv_cache_config.kv_cache_groups)
|
| 766 |
+
max_memory_usage_per_request = num_layer_per_group * max_memory_usage_bytes(
|
| 767 |
+
vllm_config,
|
| 768 |
+
(group.kv_cache_spec for group in kv_cache_config.kv_cache_groups))
|
| 769 |
+
memory_per_block = kv_cache_config.kv_cache_groups[
|
| 770 |
+
0].kv_cache_spec.page_size_bytes * num_layer_per_group
|
| 771 |
+
num_block_per_request = cdiv(max_memory_usage_per_request,
|
| 772 |
+
memory_per_block)
|
| 773 |
+
max_concurrency = kv_cache_config.num_blocks / num_block_per_request
|
| 774 |
+
return max_concurrency
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def get_num_blocks(vllm_config: VllmConfig, num_layers: int,
|
| 778 |
+
available_memory: int, page_size: int) -> int:
|
| 779 |
+
"""
|
| 780 |
+
Get the number of kv cache blocks.
|
| 781 |
+
|
| 782 |
+
Args:
|
| 783 |
+
vllm_config: The global VllmConfig
|
| 784 |
+
num_layers: The number of layers
|
| 785 |
+
available_memory: Memory available for KV cache in bytes.
|
| 786 |
+
page_size: The page size of the KV cache.
|
| 787 |
+
"""
|
| 788 |
+
num_blocks = int(available_memory // page_size // num_layers)
|
| 789 |
+
num_blocks = max(num_blocks, 0)
|
| 790 |
+
if vllm_config.cache_config.num_gpu_blocks_override is not None:
|
| 791 |
+
num_gpu_blocks_override = \
|
| 792 |
+
vllm_config.cache_config.num_gpu_blocks_override
|
| 793 |
+
logger.info(
|
| 794 |
+
"Overriding num_gpu_blocks=%d with "
|
| 795 |
+
"num_gpu_blocks_override=%d", num_blocks, num_gpu_blocks_override)
|
| 796 |
+
num_blocks = num_gpu_blocks_override
|
| 797 |
+
return num_blocks
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
def get_uniform_page_size(kv_cache_spec: dict[str, KVCacheSpec]) -> int:
|
| 801 |
+
"""
|
| 802 |
+
Get the page size of the KV cache.
|
| 803 |
+
"""
|
| 804 |
+
page_sizes = set(layer.page_size_bytes for layer in kv_cache_spec.values())
|
| 805 |
+
assert len(page_sizes) == 1
|
| 806 |
+
return page_sizes.pop()
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def _get_kv_cache_config_uniform_type(vllm_config: VllmConfig,
|
| 810 |
+
kv_cache_spec: dict[str, KVCacheSpec],
|
| 811 |
+
available_memory: int) -> KVCacheConfig:
|
| 812 |
+
"""
|
| 813 |
+
Generates the KV cache configuration for a model with one type of KV cache.
|
| 814 |
+
Divide the available memory equally among all layers.
|
| 815 |
+
|
| 816 |
+
Args:
|
| 817 |
+
vllm_config: The global VllmConfig
|
| 818 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 819 |
+
available_memory: Memory available for KV cache in bytes.
|
| 820 |
+
|
| 821 |
+
Returns:
|
| 822 |
+
The generated KVCacheConfig
|
| 823 |
+
"""
|
| 824 |
+
|
| 825 |
+
page_size = get_uniform_page_size(kv_cache_spec)
|
| 826 |
+
num_blocks = get_num_blocks(vllm_config, len(kv_cache_spec),
|
| 827 |
+
available_memory, page_size)
|
| 828 |
+
|
| 829 |
+
per_layer_size = page_size * num_blocks
|
| 830 |
+
# All layers have the same KV cache spec, so we create one kv cache group
|
| 831 |
+
# for all layers.
|
| 832 |
+
grouped_layer_names = [list(kv_cache_spec.keys())]
|
| 833 |
+
|
| 834 |
+
# Each layer uses a separate Tensor to store its KV cache.
|
| 835 |
+
kv_cache_tensors = [
|
| 836 |
+
KVCacheTensor(size=per_layer_size, shared_by=[layer_name])
|
| 837 |
+
for layer_name in kv_cache_spec
|
| 838 |
+
]
|
| 839 |
+
|
| 840 |
+
kv_cache_config = KVCacheConfig(
|
| 841 |
+
num_blocks=num_blocks,
|
| 842 |
+
kv_cache_tensors=kv_cache_tensors,
|
| 843 |
+
kv_cache_groups=create_kv_cache_group_specs(kv_cache_spec,
|
| 844 |
+
grouped_layer_names),
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
num_tokens = num_blocks * vllm_config.cache_config.block_size
|
| 848 |
+
num_tokens_str = f"{num_tokens:,}"
|
| 849 |
+
logger.info("GPU KV cache size: %s tokens", num_tokens_str)
|
| 850 |
+
max_model_len_str = f"{vllm_config.model_config.max_model_len:,}"
|
| 851 |
+
max_concurrency = get_max_concurrency_for_kv_cache_config(
|
| 852 |
+
vllm_config, kv_cache_config)
|
| 853 |
+
logger.info("Maximum concurrency for %s tokens per request: %.2fx",
|
| 854 |
+
max_model_len_str, max_concurrency)
|
| 855 |
+
return kv_cache_config
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def is_kv_cache_page_size_uniform(
|
| 859 |
+
kv_cache_spec: dict[str, KVCacheSpec]) -> bool:
|
| 860 |
+
"""
|
| 861 |
+
Whether all layers in the given KVCacheSpec have the same page size.
|
| 862 |
+
Args:
|
| 863 |
+
kv_cache_spec: The KVCacheSpec of each attention layer in the model
|
| 864 |
+
|
| 865 |
+
Returns:
|
| 866 |
+
True if all layers have the same page size, False otherwise.
|
| 867 |
+
"""
|
| 868 |
+
|
| 869 |
+
page_sizes = {layer.page_size_bytes for layer in kv_cache_spec.values()}
|
| 870 |
+
return len(page_sizes) == 1
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
def is_kv_cache_type_attention_free(
|
| 874 |
+
kv_cache_spec: dict[str, KVCacheSpec]) -> bool:
|
| 875 |
+
|
| 876 |
+
# kv_cache_spec is an empty dict for attention free models
|
| 877 |
+
return not kv_cache_spec
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
def _get_kv_cache_config_uniform_page_size(
|
| 881 |
+
vllm_config: VllmConfig, kv_cache_spec: dict[str, KVCacheSpec],
|
| 882 |
+
available_memory: int) -> KVCacheConfig:
|
| 883 |
+
"""
|
| 884 |
+
Generates the KV cache configuration for hybrid models with multiple
|
| 885 |
+
attention types but still with a uniform page size (physical memory per
|
| 886 |
+
block per layer) for all layers.
|
| 887 |
+
|
| 888 |
+
Detailed explanation about kv cache management of hybrid models:
|
| 889 |
+
The layers in the models are repeated with some patterns, e.g., a model
|
| 890 |
+
with 10 full attention layers and 20 sliding window attention layers can be
|
| 891 |
+
regarded as repeating the pattern (1 * full, 2 * sw) 10 times.
|
| 892 |
+
The KVCacheManager allocates different block tables for each of the 3 layers
|
| 893 |
+
in the pattern, and repeats each of them 10 times to generate the
|
| 894 |
+
block_table for the 30 layers in the model.
|
| 895 |
+
Therefore, we can group the layers in the model into 3 kv_cache_groups, each
|
| 896 |
+
of which contains 10 layers in the model.
|
| 897 |
+
The KVCacheManager allocates the block_table for each group based on its
|
| 898 |
+
kv_cache spec, and the model runner applies the block table to each layer
|
| 899 |
+
in the group.
|
| 900 |
+
For example:
|
| 901 |
+
1. A model only uses full attention. The pattern is
|
| 902 |
+
(num_hidden_layers * full), so there is only one group and the block table
|
| 903 |
+
is shared by all layers. It is already handled by
|
| 904 |
+
`_get_kv_cache_config_uniform_type`.
|
| 905 |
+
2. A model with 10 full attention layers and 20 sliding window
|
| 906 |
+
attention layers. There are 3 layers in the pattern (1 * full, 2 * sw), so
|
| 907 |
+
there are 3 kv_cache_groups, each of which represents 10 layers.
|
| 908 |
+
|
| 909 |
+
To simplify the implementation, we make the following assumptions:
|
| 910 |
+
1. Physical memory per block: Must be the same across all KV cache groups.
|
| 911 |
+
Breaking this assumption is non-trivial due to memory fragmentation concerns
|
| 912 |
+
when allocating blocks of different sizes.
|
| 913 |
+
2. Tokens per block (block_size): Currently, we directly use
|
| 914 |
+
`CacheConfig.block_size` for all layers. It can be extended to vary by KV
|
| 915 |
+
cache group, but within each KV cache group, all layers must share the same
|
| 916 |
+
block size.
|
| 917 |
+
3. Physical memory per token per layer: This property is decided by model
|
| 918 |
+
config. Currently we only support models that have the same physical memory
|
| 919 |
+
per token per layer for all layers. Can be relaxed with a simple extension,
|
| 920 |
+
but still need to keep physical memory per block the same for all groups.
|
| 921 |
+
4. Number of layers per group: Currently assumed the same for all layers.
|
| 922 |
+
Can be relaxed with a simple extension, but still need to keep physical
|
| 923 |
+
memory per block the same for all groups.
|
| 924 |
+
5. Attention type within groups: All layers in a group must share the same
|
| 925 |
+
attention type. One exception is that, when
|
| 926 |
+
`--disable-hybrid-kv-cache-manager` is true, the single group for full
|
| 927 |
+
attention layers may also include attention layers using sliding window or
|
| 928 |
+
LLaMA 4 local attention. See `unify_hybrid_kv_cache_specs` for more details.
|
| 929 |
+
6. Support for multiple attention types: The design for most components is
|
| 930 |
+
general to an arbitrary number of attention types. But
|
| 931 |
+
`find_longest_cache_hit` only supports one attention type or two
|
| 932 |
+
types of full-attention plus exactly one another type. The general
|
| 933 |
+
implementation of this function is feasible but we don't know how to
|
| 934 |
+
implement it cleanly yet.
|
| 935 |
+
|
| 936 |
+
As we assume tokens per block, physical memory per token per layer, and
|
| 937 |
+
number of layers per group are the same now, we can ensure that physical
|
| 938 |
+
memory per block is the same for all groups.
|
| 939 |
+
|
| 940 |
+
Args:
|
| 941 |
+
vllm_config: The global VllmConfig
|
| 942 |
+
kv_cache_spec: The KVCacheSpec of each attention layer in the model
|
| 943 |
+
available_memory: Memory available for KV cache in bytes.
|
| 944 |
+
Returns:
|
| 945 |
+
The generated KVCacheConfig
|
| 946 |
+
"""
|
| 947 |
+
# Group all layers by kv_cache_spec.
|
| 948 |
+
# E.g., 2 full attention layers and 3 sliding window attention layers,
|
| 949 |
+
# -> (full.0, full.1), (sw.0, sw.1, sw.2).
|
| 950 |
+
same_type_layers: dict[KVCacheSpec, list[str]] = defaultdict(list)
|
| 951 |
+
for layer_name, layer_spec in kv_cache_spec.items():
|
| 952 |
+
same_type_layers[layer_spec].append(layer_name)
|
| 953 |
+
|
| 954 |
+
# Split each group into smaller groups, to make the number of layers in each
|
| 955 |
+
# group identical. Add padding to the last group of each type if necessary.
|
| 956 |
+
# E.g., (full.0, full.1), (sw.0, sw.1, sw.2)
|
| 957 |
+
# split to 3 groups with 2 layers each:
|
| 958 |
+
# (full.0, full.1), (sw.0, sw.1), (sw.2, padding).
|
| 959 |
+
# FIXME(Chen): At the moment of writing this code (2025-06-02), all
|
| 960 |
+
# open-source hybrid model follows a n:1 pattern between different attention
|
| 961 |
+
# types (e.g., Gemma3 5:1 between sw and full, LLaMA4 3:1 between local and
|
| 962 |
+
# full), so we can use the "1" in the n:1 pattern as the group size, which
|
| 963 |
+
# is the minimum number of layers among all attention types. Need a better
|
| 964 |
+
# strategy if we want to support more complex patterns (e.g., 20 full + 30
|
| 965 |
+
# sw, where the group size should be 10).
|
| 966 |
+
group_size = min([len(layers) for layers in same_type_layers.values()])
|
| 967 |
+
grouped_layers = []
|
| 968 |
+
for layers in same_type_layers.values():
|
| 969 |
+
num_padding_layers = group_size - len(layers) % group_size
|
| 970 |
+
if num_padding_layers != group_size:
|
| 971 |
+
logger.warning(
|
| 972 |
+
"Add %d padding layers, may waste at most %.2f%% KV cache memory", # noqa
|
| 973 |
+
num_padding_layers,
|
| 974 |
+
num_padding_layers / len(layers) * 100,
|
| 975 |
+
)
|
| 976 |
+
for i in range(0, len(layers), group_size):
|
| 977 |
+
grouped_layers.append(layers[i:i + group_size])
|
| 978 |
+
kv_cache_groups = create_kv_cache_group_specs(kv_cache_spec,
|
| 979 |
+
grouped_layers)
|
| 980 |
+
|
| 981 |
+
# Determine how model runners should initialize the KV cache tensors.
|
| 982 |
+
# We will have group_size memory pools, each is shared by one layer from
|
| 983 |
+
# each group. As layers of different groups have different block table,
|
| 984 |
+
# they will use different parts of the shared Tensor.
|
| 985 |
+
# The memory layout in the example will be:
|
| 986 |
+
# full.0, sw.0, sw.2: share a Tensor with size=available_memory//2
|
| 987 |
+
# full.1, sw.1: share another Tensor with size=available_memory//2
|
| 988 |
+
page_size = get_uniform_page_size(kv_cache_spec)
|
| 989 |
+
num_blocks = get_num_blocks(vllm_config, group_size, available_memory,
|
| 990 |
+
page_size)
|
| 991 |
+
per_memory_pool_size = page_size * num_blocks
|
| 992 |
+
kv_cache_tensors = []
|
| 993 |
+
for i in range(group_size):
|
| 994 |
+
shared_by = []
|
| 995 |
+
for j in range(len(kv_cache_groups)):
|
| 996 |
+
if i < len(grouped_layers[j]):
|
| 997 |
+
shared_by.append(grouped_layers[j][i])
|
| 998 |
+
kv_cache_tensors.append(
|
| 999 |
+
KVCacheTensor(size=per_memory_pool_size, shared_by=shared_by))
|
| 1000 |
+
|
| 1001 |
+
kv_cache_config = KVCacheConfig(
|
| 1002 |
+
num_blocks=num_blocks,
|
| 1003 |
+
kv_cache_tensors=kv_cache_tensors,
|
| 1004 |
+
kv_cache_groups=kv_cache_groups,
|
| 1005 |
+
)
|
| 1006 |
+
|
| 1007 |
+
min_block_size = min(
|
| 1008 |
+
[group.kv_cache_spec.block_size for group in kv_cache_groups])
|
| 1009 |
+
|
| 1010 |
+
# Print the KV cache size and maximum concurrency.
|
| 1011 |
+
num_tokens = num_blocks // len(grouped_layers) * min_block_size
|
| 1012 |
+
num_tokens_str = f"{num_tokens:,}"
|
| 1013 |
+
logger.info("GPU KV cache size: %s tokens", num_tokens_str)
|
| 1014 |
+
max_model_len_str = f"{vllm_config.model_config.max_model_len:,}"
|
| 1015 |
+
max_concurrency = get_max_concurrency_for_kv_cache_config(
|
| 1016 |
+
vllm_config, kv_cache_config)
|
| 1017 |
+
logger.info("Maximum concurrency for %s tokens per request: %.2fx",
|
| 1018 |
+
max_model_len_str, max_concurrency)
|
| 1019 |
+
return kv_cache_config
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def _get_kv_cache_config_attention_free() -> KVCacheConfig:
|
| 1023 |
+
return KVCacheConfig(num_blocks=1, kv_cache_tensors=[], kv_cache_groups=[])
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
def unify_hybrid_kv_cache_specs(kv_cache_spec: dict[str, KVCacheSpec]):
|
| 1027 |
+
"""
|
| 1028 |
+
This function tries to convert the KV cache specs to one type if the model
|
| 1029 |
+
is a hybrid model with multiple type of KV cache. It will convert all
|
| 1030 |
+
SlidingWindowSpec to FullAttentionSpec if both types are present.
|
| 1031 |
+
|
| 1032 |
+
Args:
|
| 1033 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 1034 |
+
"""
|
| 1035 |
+
|
| 1036 |
+
if is_kv_cache_type_uniform(kv_cache_spec):
|
| 1037 |
+
return
|
| 1038 |
+
|
| 1039 |
+
logger.warning(
|
| 1040 |
+
"Hybrid KV cache manager is disabled for this hybrid model, "
|
| 1041 |
+
"This means we do not enable any optimizations for saving KV cache "
|
| 1042 |
+
"memory (e.g., dropping the KV cache outside the sliding window). "
|
| 1043 |
+
"The compute of layers like sliding window is still saved.")
|
| 1044 |
+
|
| 1045 |
+
has_full_attention = any(
|
| 1046 |
+
isinstance(spec, FullAttentionSpec) for spec in kv_cache_spec.values())
|
| 1047 |
+
has_sliding_window = any(
|
| 1048 |
+
isinstance(spec, SlidingWindowSpec) for spec in kv_cache_spec.values())
|
| 1049 |
+
has_chunked_local_attention = any(
|
| 1050 |
+
isinstance(spec, ChunkedLocalAttentionSpec)
|
| 1051 |
+
for spec in kv_cache_spec.values())
|
| 1052 |
+
if has_full_attention and (has_sliding_window
|
| 1053 |
+
or has_chunked_local_attention):
|
| 1054 |
+
for layer_name, spec in kv_cache_spec.items():
|
| 1055 |
+
if isinstance(spec, SlidingWindowSpec):
|
| 1056 |
+
kv_cache_spec[layer_name] = FullAttentionSpec(
|
| 1057 |
+
block_size=spec.block_size,
|
| 1058 |
+
num_kv_heads=spec.num_kv_heads,
|
| 1059 |
+
head_size=spec.head_size,
|
| 1060 |
+
dtype=spec.dtype,
|
| 1061 |
+
use_mla=spec.use_mla,
|
| 1062 |
+
sliding_window=spec.sliding_window,
|
| 1063 |
+
)
|
| 1064 |
+
elif isinstance(spec, ChunkedLocalAttentionSpec):
|
| 1065 |
+
kv_cache_spec[layer_name] = FullAttentionSpec(
|
| 1066 |
+
block_size=spec.block_size,
|
| 1067 |
+
num_kv_heads=spec.num_kv_heads,
|
| 1068 |
+
head_size=spec.head_size,
|
| 1069 |
+
dtype=spec.dtype,
|
| 1070 |
+
use_mla=spec.use_mla,
|
| 1071 |
+
attention_chunk_size=spec.attention_chunk_size,
|
| 1072 |
+
)
|
| 1073 |
+
|
| 1074 |
+
if not is_kv_cache_type_uniform(kv_cache_spec):
|
| 1075 |
+
raise ValueError("Hybrid KV cache manager is disabled but failed to "
|
| 1076 |
+
"convert the KV cache specs to one unified type.")
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def get_kv_cache_config(
|
| 1080 |
+
vllm_config: VllmConfig,
|
| 1081 |
+
kv_cache_spec: dict[str, KVCacheSpec],
|
| 1082 |
+
available_memory: int,
|
| 1083 |
+
) -> KVCacheConfig:
|
| 1084 |
+
"""
|
| 1085 |
+
Generates the KV cache configuration for a model.
|
| 1086 |
+
|
| 1087 |
+
Args:
|
| 1088 |
+
vllm_config: The global VllmConfig
|
| 1089 |
+
kv_cache_spec: The kv cache spec of each attention layer in the model
|
| 1090 |
+
available_memory: Memory available for KV cache in bytes.
|
| 1091 |
+
|
| 1092 |
+
Returns:
|
| 1093 |
+
The generated KVCacheConfigs
|
| 1094 |
+
"""
|
| 1095 |
+
check_enough_kv_cache_memory(vllm_config, kv_cache_spec, available_memory)
|
| 1096 |
+
if vllm_config.scheduler_config.disable_hybrid_kv_cache_manager:
|
| 1097 |
+
unify_hybrid_kv_cache_specs(kv_cache_spec)
|
| 1098 |
+
|
| 1099 |
+
if is_kv_cache_type_attention_free(kv_cache_spec):
|
| 1100 |
+
# This returns a kv_cache config with 0 kv_cache groups and 1 block
|
| 1101 |
+
# to allow for the KVCache manager to handle attention free models.
|
| 1102 |
+
return _get_kv_cache_config_attention_free()
|
| 1103 |
+
elif is_kv_cache_type_uniform(kv_cache_spec):
|
| 1104 |
+
# KV cache of all layers are the same, which is true for
|
| 1105 |
+
# most models. Allocate the same amount of memory for
|
| 1106 |
+
# each layer.
|
| 1107 |
+
return _get_kv_cache_config_uniform_type(vllm_config, kv_cache_spec,
|
| 1108 |
+
available_memory)
|
| 1109 |
+
elif is_kv_cache_page_size_uniform(kv_cache_spec):
|
| 1110 |
+
# Model contains multiple attention types, but KV cache of all layers
|
| 1111 |
+
# have the same physical memory per block per layer. Split the layers
|
| 1112 |
+
# into groups with the same number of layers, and thus same total page
|
| 1113 |
+
# size.
|
| 1114 |
+
return _get_kv_cache_config_uniform_page_size(vllm_config,
|
| 1115 |
+
kv_cache_spec,
|
| 1116 |
+
available_memory)
|
| 1117 |
+
|
| 1118 |
+
raise NotImplementedError
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
def unify_kv_cache_configs(kv_cache_configs: list[KVCacheConfig]):
|
| 1122 |
+
"""
|
| 1123 |
+
Make the KV cache configurations for each worker consistent, so that all
|
| 1124 |
+
workers can be controlled by the same KVCacheManager.
|
| 1125 |
+
This function verifies that the layer group of each worker are the same,
|
| 1126 |
+
and changes the num_blocks of each worker to the smallest among all workers.
|
| 1127 |
+
|
| 1128 |
+
Args:
|
| 1129 |
+
kv_cache_configs: The KV cache configurations for each worker. Will be
|
| 1130 |
+
in-place modified to make them consistent.
|
| 1131 |
+
"""
|
| 1132 |
+
|
| 1133 |
+
# Sort the kv cache groups by their KV cache spec.
|
| 1134 |
+
# This can avoid the inconsistency caused by the order of groups.
|
| 1135 |
+
for kv_cache_config in kv_cache_configs:
|
| 1136 |
+
kv_cache_config.kv_cache_groups.sort(key=lambda x: (type(
|
| 1137 |
+
x.kv_cache_spec).__name__, astuple(x.kv_cache_spec)))
|
| 1138 |
+
|
| 1139 |
+
# Verify that the groups of each rank are the same.
|
| 1140 |
+
for kv_cache_config in kv_cache_configs[1:]:
|
| 1141 |
+
for group_rank_0, group_rank_i in zip(
|
| 1142 |
+
kv_cache_configs[0].kv_cache_groups,
|
| 1143 |
+
kv_cache_config.kv_cache_groups):
|
| 1144 |
+
assert group_rank_0.kv_cache_spec == group_rank_i.kv_cache_spec
|
| 1145 |
+
|
| 1146 |
+
# Change the num_blocks of each rank to the smallest among all ranks. We
|
| 1147 |
+
# do not need to shrink the tensor size because it is valid to only use the
|
| 1148 |
+
# first `num_blocks` blocks of the tensor.
|
| 1149 |
+
min_num_blocks = min(kv_cache_config.num_blocks
|
| 1150 |
+
for kv_cache_config in kv_cache_configs)
|
| 1151 |
+
for kv_cache_config in kv_cache_configs:
|
| 1152 |
+
kv_cache_config.num_blocks = min_num_blocks
|
| 1153 |
+
|
| 1154 |
+
return kv_cache_configs
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/core/single_type_kv_cache_manager.py
ADDED
|
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import itertools
|
| 4 |
+
from abc import ABC, abstractmethod
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from vllm.utils import cdiv
|
| 8 |
+
from vllm.v1.core.block_pool import BlockPool
|
| 9 |
+
from vllm.v1.core.kv_cache_utils import BlockHash, KVCacheBlock
|
| 10 |
+
from vllm.v1.kv_cache_interface import (ChunkedLocalAttentionSpec,
|
| 11 |
+
FullAttentionSpec, KVCacheSpec,
|
| 12 |
+
MambaSpec, SlidingWindowSpec)
|
| 13 |
+
from vllm.v1.request import Request
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class SingleTypeKVCacheManager(ABC):
|
| 17 |
+
"""
|
| 18 |
+
An abstract base class for a manager that handle the kv cache management
|
| 19 |
+
logic of one specific type of attention layer.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
kv_cache_spec: KVCacheSpec,
|
| 25 |
+
block_pool: BlockPool,
|
| 26 |
+
kv_cache_group_id: int,
|
| 27 |
+
) -> None:
|
| 28 |
+
"""
|
| 29 |
+
Initializes the SingleTypeKVCacheManager.
|
| 30 |
+
Args:
|
| 31 |
+
kv_cache_spec: The kv_cache_spec for this manager.
|
| 32 |
+
block_pool: The block pool.
|
| 33 |
+
kv_cache_group_id: The id of the kv cache group of this manager.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
self.block_size = kv_cache_spec.block_size
|
| 37 |
+
self.kv_cache_spec = kv_cache_spec
|
| 38 |
+
self.block_pool = block_pool
|
| 39 |
+
|
| 40 |
+
# Mapping from request ID to blocks to track the blocks allocated
|
| 41 |
+
# for each request, so that we can free the blocks when the request
|
| 42 |
+
# is finished.
|
| 43 |
+
self.req_to_blocks: defaultdict[str,
|
| 44 |
+
list[KVCacheBlock]] = defaultdict(list)
|
| 45 |
+
|
| 46 |
+
# {req_id: The number of cached blocks for this given request}
|
| 47 |
+
# This is used to track the number of cached blocks for each request.
|
| 48 |
+
# This is only used to track the RUNNING requests, we do not track the
|
| 49 |
+
# data for reempted ones.
|
| 50 |
+
self.num_cached_block: dict[str, int] = {}
|
| 51 |
+
|
| 52 |
+
self.kv_cache_group_id = kv_cache_group_id
|
| 53 |
+
self._null_block = block_pool.null_block
|
| 54 |
+
|
| 55 |
+
def get_num_blocks_to_allocate(
|
| 56 |
+
self, request_id: str, num_tokens: int,
|
| 57 |
+
new_computed_blocks: list[KVCacheBlock]) -> int:
|
| 58 |
+
"""
|
| 59 |
+
Get the number of blocks needed to be allocated for the request.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
request_id: The request ID.
|
| 63 |
+
num_tokens: The total number of tokens that need a slot (including
|
| 64 |
+
tokens that are already allocated).
|
| 65 |
+
new_computed_blocks: The new computed blocks just hitting the
|
| 66 |
+
prefix caching.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
The number of blocks.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
num_required_blocks = cdiv(num_tokens, self.block_size)
|
| 73 |
+
num_new_blocks = (num_required_blocks - len(new_computed_blocks) -
|
| 74 |
+
len(self.req_to_blocks[request_id]))
|
| 75 |
+
# If a computed block of a request is an eviction candidate (in the
|
| 76 |
+
# free queue and ref_cnt == 0), it will be changed from a free block
|
| 77 |
+
# to a computed block when the request is allocated, so we also count
|
| 78 |
+
# it as needed to be allocated.
|
| 79 |
+
num_evictable_computed_blocks = sum(
|
| 80 |
+
blk.ref_cnt == 0 and not blk.is_null
|
| 81 |
+
for blk in new_computed_blocks)
|
| 82 |
+
return num_new_blocks + num_evictable_computed_blocks
|
| 83 |
+
|
| 84 |
+
def save_new_computed_blocks(
|
| 85 |
+
self, request_id: str,
|
| 86 |
+
new_computed_blocks: list[KVCacheBlock]) -> None:
|
| 87 |
+
"""
|
| 88 |
+
Add the new computed blocks to the request.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
request_id: The request ID.
|
| 92 |
+
new_computed_blocks: The new computed blocks just hitting the
|
| 93 |
+
prefix cache.
|
| 94 |
+
"""
|
| 95 |
+
if request_id not in self.num_cached_block:
|
| 96 |
+
# A new request.
|
| 97 |
+
req_blocks = self.req_to_blocks[request_id]
|
| 98 |
+
assert len(req_blocks) == 0
|
| 99 |
+
req_blocks.extend(new_computed_blocks)
|
| 100 |
+
self.num_cached_block[request_id] = len(new_computed_blocks)
|
| 101 |
+
else:
|
| 102 |
+
# A running request. Should not have new computed blocks.
|
| 103 |
+
assert len(new_computed_blocks) == 0
|
| 104 |
+
|
| 105 |
+
def allocate_new_blocks(self, request_id: str,
|
| 106 |
+
num_tokens: int) -> list[KVCacheBlock]:
|
| 107 |
+
"""
|
| 108 |
+
Allocate new blocks for the request to give it at least `num_tokens`
|
| 109 |
+
token slots.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
request_id: The request ID.
|
| 113 |
+
num_tokens: The total number of tokens that need a slot (including
|
| 114 |
+
tokens that are already allocated).
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
The new allocated blocks.
|
| 118 |
+
"""
|
| 119 |
+
req_blocks = self.req_to_blocks[request_id]
|
| 120 |
+
num_required_blocks = cdiv(num_tokens, self.block_size)
|
| 121 |
+
num_new_blocks = num_required_blocks - len(req_blocks)
|
| 122 |
+
if num_new_blocks <= 0:
|
| 123 |
+
return []
|
| 124 |
+
else:
|
| 125 |
+
new_blocks = self.block_pool.get_new_blocks(num_new_blocks)
|
| 126 |
+
req_blocks.extend(new_blocks)
|
| 127 |
+
return new_blocks
|
| 128 |
+
|
| 129 |
+
def cache_blocks(self, request: Request, num_tokens: int) -> None:
|
| 130 |
+
"""
|
| 131 |
+
Cache the blocks for the request.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
request: The request.
|
| 135 |
+
num_tokens: The total number of tokens that need to be cached
|
| 136 |
+
(including tokens that are already cached).
|
| 137 |
+
"""
|
| 138 |
+
num_cached_blocks = self.num_cached_block[request.request_id]
|
| 139 |
+
num_full_blocks = num_tokens // self.block_size
|
| 140 |
+
|
| 141 |
+
self.block_pool.cache_full_blocks(
|
| 142 |
+
request=request,
|
| 143 |
+
blocks=self.req_to_blocks[request.request_id],
|
| 144 |
+
num_cached_blocks=num_cached_blocks,
|
| 145 |
+
num_full_blocks=num_full_blocks,
|
| 146 |
+
block_size=self.block_size,
|
| 147 |
+
kv_cache_group_id=self.kv_cache_group_id,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
self.num_cached_block[request.request_id] = num_full_blocks
|
| 151 |
+
|
| 152 |
+
def free(self, request_id: str) -> None:
|
| 153 |
+
"""
|
| 154 |
+
Free the blocks for the request.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
request_id: The request ID.
|
| 158 |
+
"""
|
| 159 |
+
# Default to [] in case a request is freed (aborted) before alloc.
|
| 160 |
+
req_blocks = self.req_to_blocks.pop(request_id, [])
|
| 161 |
+
|
| 162 |
+
# Free blocks in reverse order so that the tail blocks are
|
| 163 |
+
# freed first.
|
| 164 |
+
ordered_blocks = reversed(req_blocks)
|
| 165 |
+
|
| 166 |
+
self.block_pool.free_blocks(ordered_blocks)
|
| 167 |
+
self.num_cached_block.pop(request_id, None)
|
| 168 |
+
|
| 169 |
+
@abstractmethod
|
| 170 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 171 |
+
num_running_requests: int) -> int:
|
| 172 |
+
"""
|
| 173 |
+
Get the number of common prefix blocks for all requests in the RUNNING
|
| 174 |
+
state.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
request_id: The request ID.
|
| 178 |
+
num_running_requests: The total number of requests in the RUNNING
|
| 179 |
+
state.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
The number of common prefix blocks for all requests in the RUNNING
|
| 183 |
+
state.
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
raise NotImplementedError
|
| 187 |
+
|
| 188 |
+
@classmethod
|
| 189 |
+
@abstractmethod
|
| 190 |
+
def find_longest_cache_hit(
|
| 191 |
+
cls,
|
| 192 |
+
block_hashes: list[BlockHash],
|
| 193 |
+
max_length: int,
|
| 194 |
+
kv_cache_group_ids: list[int],
|
| 195 |
+
block_pool: BlockPool,
|
| 196 |
+
kv_cache_spec: KVCacheSpec,
|
| 197 |
+
use_eagle: bool,
|
| 198 |
+
) -> tuple[list[KVCacheBlock], ...]:
|
| 199 |
+
"""
|
| 200 |
+
Get the longest cache hit prefix of the blocks that is not longer than
|
| 201 |
+
`max_length`. The prefix should be a common prefix hit for all the
|
| 202 |
+
kv cache groups in `kv_cache_group_ids`. If no cache hit is found,
|
| 203 |
+
return an empty list.
|
| 204 |
+
If eagle is enabled, drop the last matched block to force recompute the
|
| 205 |
+
last block to get the required hidden states for eagle drafting head.
|
| 206 |
+
Need to be customized for each attention type.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
block_hashes: The block hashes of the request.
|
| 210 |
+
max_length: The maximum length of the cache hit prefix.
|
| 211 |
+
kv_cache_group_ids: The ids of the kv cache groups.
|
| 212 |
+
block_pool: The block pool.
|
| 213 |
+
kv_cache_spec: The kv cache spec.
|
| 214 |
+
use_eagle: Whether to use eagle.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
A list of cached blocks with skipped blocks replaced by null block
|
| 218 |
+
for each kv cache group in `kv_cache_group_ids`.
|
| 219 |
+
Return a list of length `len(kv_cache_group_ids)`, where the i-th
|
| 220 |
+
element is a list of cached blocks for the i-th kv cache group
|
| 221 |
+
in `kv_cache_group_ids`.
|
| 222 |
+
For example, sliding window manager should return a list like
|
| 223 |
+
([NULL, NULL, KVCacheBlock(7), KVCacheBlock(8)]) for block size 4
|
| 224 |
+
and sliding window 8 and len(kv_cache_group_ids) = 1.
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
raise NotImplementedError
|
| 228 |
+
|
| 229 |
+
@abstractmethod
|
| 230 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 231 |
+
num_computed_tokens: int) -> None:
|
| 232 |
+
"""
|
| 233 |
+
Remove the blocks that are no longer needed from `blocks` and free the
|
| 234 |
+
blocks. The removed blocks should be replaced by null_block.
|
| 235 |
+
Need to be customized for each attention type.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
request_id: The request ID.
|
| 239 |
+
num_computed_tokens: The number of tokens that have been computed.
|
| 240 |
+
"""
|
| 241 |
+
raise NotImplementedError
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class FullAttentionManager(SingleTypeKVCacheManager):
|
| 245 |
+
|
| 246 |
+
@classmethod
|
| 247 |
+
def find_longest_cache_hit(
|
| 248 |
+
cls,
|
| 249 |
+
block_hashes: list[BlockHash],
|
| 250 |
+
max_length: int,
|
| 251 |
+
kv_cache_group_ids: list[int],
|
| 252 |
+
block_pool: BlockPool,
|
| 253 |
+
kv_cache_spec: KVCacheSpec,
|
| 254 |
+
use_eagle: bool,
|
| 255 |
+
) -> tuple[list[KVCacheBlock], ...]:
|
| 256 |
+
assert isinstance(
|
| 257 |
+
kv_cache_spec, (FullAttentionSpec, ChunkedLocalAttentionSpec)
|
| 258 |
+
), "FullAttentionManager can only be used for full attention " \
|
| 259 |
+
"and chunked local attention groups"
|
| 260 |
+
computed_blocks: tuple[list[KVCacheBlock], ...] = tuple(
|
| 261 |
+
[] for _ in range(len(kv_cache_group_ids)))
|
| 262 |
+
max_num_blocks = max_length // kv_cache_spec.block_size
|
| 263 |
+
for block_hash in itertools.islice(block_hashes, max_num_blocks):
|
| 264 |
+
# block_hashes is a chain of block hashes. If a block hash is not
|
| 265 |
+
# in the cached_block_hash_to_id, the following block hashes are
|
| 266 |
+
# not computed yet for sure.
|
| 267 |
+
if cached_block := block_pool.get_cached_block(
|
| 268 |
+
block_hash, kv_cache_group_ids):
|
| 269 |
+
for computed, cached in zip(computed_blocks, cached_block):
|
| 270 |
+
computed.append(cached)
|
| 271 |
+
else:
|
| 272 |
+
break
|
| 273 |
+
if use_eagle and computed_blocks[0]:
|
| 274 |
+
for computed in computed_blocks:
|
| 275 |
+
computed.pop()
|
| 276 |
+
return computed_blocks
|
| 277 |
+
|
| 278 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 279 |
+
num_computed_tokens: int) -> None:
|
| 280 |
+
# No need to remove blocks for full attention.
|
| 281 |
+
pass
|
| 282 |
+
|
| 283 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 284 |
+
num_running_requests: int) -> int:
|
| 285 |
+
blocks = self.req_to_blocks[request_id]
|
| 286 |
+
num_common_blocks = 0
|
| 287 |
+
for block in blocks:
|
| 288 |
+
if block.ref_cnt == num_running_requests:
|
| 289 |
+
num_common_blocks += 1
|
| 290 |
+
else:
|
| 291 |
+
break
|
| 292 |
+
return num_common_blocks
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class SlidingWindowManager(SingleTypeKVCacheManager):
|
| 296 |
+
|
| 297 |
+
def __init__(self, kv_cache_spec: SlidingWindowSpec, block_pool: BlockPool,
|
| 298 |
+
**kwargs) -> None:
|
| 299 |
+
super().__init__(kv_cache_spec, block_pool, **kwargs)
|
| 300 |
+
self.sliding_window = kv_cache_spec.sliding_window
|
| 301 |
+
self._null_block = block_pool.null_block
|
| 302 |
+
|
| 303 |
+
@classmethod
|
| 304 |
+
def find_longest_cache_hit(
|
| 305 |
+
cls,
|
| 306 |
+
block_hashes: list[BlockHash],
|
| 307 |
+
max_length: int,
|
| 308 |
+
kv_cache_group_ids: list[int],
|
| 309 |
+
block_pool: BlockPool,
|
| 310 |
+
kv_cache_spec: KVCacheSpec,
|
| 311 |
+
use_eagle: bool,
|
| 312 |
+
) -> tuple[list[KVCacheBlock], ...]:
|
| 313 |
+
assert isinstance(kv_cache_spec, SlidingWindowSpec), (
|
| 314 |
+
"SlidingWindowManager can only be used for sliding window groups")
|
| 315 |
+
|
| 316 |
+
# The number of contiguous blocks needed for prefix cache hit.
|
| 317 |
+
# -1 since the input token itself is also included in the window
|
| 318 |
+
sliding_window_contiguous_blocks = cdiv(
|
| 319 |
+
kv_cache_spec.sliding_window - 1, kv_cache_spec.block_size)
|
| 320 |
+
if use_eagle:
|
| 321 |
+
# Need to drop the last matched block if eagle is enabled. For
|
| 322 |
+
# sliding window layer, we achieve this by increasing the number of
|
| 323 |
+
# contiguous blocks needed for prefix cache hit by one and dropping
|
| 324 |
+
# the last matched block.
|
| 325 |
+
sliding_window_contiguous_blocks += 1
|
| 326 |
+
|
| 327 |
+
# TODO: reduce i by sliding_window_contiguous_blocks when cache miss, to
|
| 328 |
+
# optimize the time complexity from O(max_num_blocks) to
|
| 329 |
+
# O(max_num_blocks / sliding_window_contiguous_blocks +
|
| 330 |
+
# sliding_window_contiguous_blocks),
|
| 331 |
+
# which is good for low cache hit rate scenarios.
|
| 332 |
+
max_num_blocks = max_length // kv_cache_spec.block_size
|
| 333 |
+
computed_blocks = tuple([block_pool.null_block] * max_num_blocks
|
| 334 |
+
for _ in range(len(kv_cache_group_ids)))
|
| 335 |
+
num_contiguous_blocks = 0
|
| 336 |
+
match_found = False
|
| 337 |
+
# Search from right to left and early stop when a match is found.
|
| 338 |
+
for i in range(max_num_blocks - 1, -1, -1):
|
| 339 |
+
if cached_block := block_pool.get_cached_block(
|
| 340 |
+
block_hashes[i], kv_cache_group_ids):
|
| 341 |
+
for computed, cached in zip(computed_blocks, cached_block):
|
| 342 |
+
computed[i] = cached
|
| 343 |
+
num_contiguous_blocks += 1
|
| 344 |
+
if num_contiguous_blocks >= sliding_window_contiguous_blocks:
|
| 345 |
+
# Trim the trailing blocks.
|
| 346 |
+
# E.g., [NULL, NULL, 8, 3, NULL, 9] -> [NULL, NULL, 8, 3]
|
| 347 |
+
# when sliding_window_contiguous_blocks=2.
|
| 348 |
+
for computed in computed_blocks:
|
| 349 |
+
del computed[i + num_contiguous_blocks:]
|
| 350 |
+
match_found = True
|
| 351 |
+
break
|
| 352 |
+
else:
|
| 353 |
+
num_contiguous_blocks = 0
|
| 354 |
+
if not match_found:
|
| 355 |
+
# The first `num_contiguous_blocks` is a cache hit even if
|
| 356 |
+
# `num_contiguous_blocks < sliding_window_contiguous_blocks`.
|
| 357 |
+
for computed in computed_blocks:
|
| 358 |
+
del computed[num_contiguous_blocks:]
|
| 359 |
+
if use_eagle and computed_blocks[0]:
|
| 360 |
+
for computed in computed_blocks:
|
| 361 |
+
computed.pop()
|
| 362 |
+
return computed_blocks
|
| 363 |
+
|
| 364 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 365 |
+
num_computed_tokens: int) -> None:
|
| 366 |
+
# Remove the blocks that are no longer be in the sliding window and
|
| 367 |
+
# skipped during the attention computation.
|
| 368 |
+
last_useful_token = num_computed_tokens - self.sliding_window + 1
|
| 369 |
+
last_useful_block = last_useful_token // self.block_size
|
| 370 |
+
blocks = self.req_to_blocks[request_id]
|
| 371 |
+
removed_blocks: list[KVCacheBlock] = []
|
| 372 |
+
for i in range(last_useful_block - 1, -1, -1):
|
| 373 |
+
if blocks[i] == self._null_block:
|
| 374 |
+
# If the block is already a null block, the blocks before it
|
| 375 |
+
# should also have been set to null blocks by the previous calls
|
| 376 |
+
# to this function.
|
| 377 |
+
break
|
| 378 |
+
removed_blocks.append(blocks[i])
|
| 379 |
+
blocks[i] = self._null_block
|
| 380 |
+
self.block_pool.free_blocks(removed_blocks)
|
| 381 |
+
|
| 382 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 383 |
+
num_running_requests: int) -> int:
|
| 384 |
+
"""
|
| 385 |
+
NOTE(Chen): The prefix blocks are null blocks for sliding window layers.
|
| 386 |
+
So it's not correct to count ref_cnt like FullAttentionManager. Return
|
| 387 |
+
0 here for correctness. Need to support cascade attention + sliding
|
| 388 |
+
window in the future.
|
| 389 |
+
"""
|
| 390 |
+
return 0
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
class ChunkedLocalAttentionManager(SingleTypeKVCacheManager):
|
| 394 |
+
|
| 395 |
+
def __init__(self, kv_cache_spec: ChunkedLocalAttentionSpec,
|
| 396 |
+
block_pool: BlockPool, **kwargs) -> None:
|
| 397 |
+
super().__init__(kv_cache_spec, block_pool, **kwargs)
|
| 398 |
+
self.attention_chunk_size = kv_cache_spec.attention_chunk_size
|
| 399 |
+
self._null_block = block_pool.null_block
|
| 400 |
+
|
| 401 |
+
@classmethod
|
| 402 |
+
def find_longest_cache_hit(
|
| 403 |
+
cls,
|
| 404 |
+
block_hashes: list[BlockHash],
|
| 405 |
+
max_length: int,
|
| 406 |
+
kv_cache_group_ids: list[int],
|
| 407 |
+
block_pool: BlockPool,
|
| 408 |
+
kv_cache_spec: KVCacheSpec,
|
| 409 |
+
use_eagle: bool,
|
| 410 |
+
) -> tuple[list[KVCacheBlock], ...]:
|
| 411 |
+
"""
|
| 412 |
+
For chunked local attention, we need to find the longest cache hit
|
| 413 |
+
prefix of the blocks that is not longer than `max_length`. The prefix
|
| 414 |
+
should be a common prefix hit for all the kv cache groups in
|
| 415 |
+
`kv_cache_group_ids`. If no cache hit is found, return an empty list.
|
| 416 |
+
note we mark as computed if the whole block is outside of the local
|
| 417 |
+
window, and set the block as null. Examples:
|
| 418 |
+
|
| 419 |
+
1. Attention chunk size of 8, block size of 4, max length of 15
|
| 420 |
+
for next token at 15th (zero-indexed), 8th - 14th tokens are in
|
| 421 |
+
the window(needs lookup), 0th - 7th are not in the window,
|
| 422 |
+
so they are already marked as computed. We check the complete
|
| 423 |
+
block3 (8th - 11th tokens), Assume block 3 is hit, we will return
|
| 424 |
+
[null, null, block 3], otherwise, we return [null, null]
|
| 425 |
+
|
| 426 |
+
2. Attention chunk size of 8, block size of 4, max length of 16
|
| 427 |
+
for next token at 16th (zero-indexed), 0th - 15th tokens are not
|
| 428 |
+
in the window, so they are already marked as computed.
|
| 429 |
+
we return 4 blocks[null, null, null, null]
|
| 430 |
+
|
| 431 |
+
Args:
|
| 432 |
+
block_hashes: The block hashes of the request.
|
| 433 |
+
max_length: The maximum length of the cache hit prefix.
|
| 434 |
+
kv_cache_group_ids: The ids of the kv cache groups.
|
| 435 |
+
block_pool: The block pool.
|
| 436 |
+
kv_cache_spec: The kv cache spec.
|
| 437 |
+
use_eagle: Whether to use eagle.
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
A list of cached blocks
|
| 441 |
+
"""
|
| 442 |
+
assert isinstance(kv_cache_spec, ChunkedLocalAttentionSpec), (
|
| 443 |
+
"ChunkedLocalAttentionManager can only be used for " +
|
| 444 |
+
"chunked local attention groups")
|
| 445 |
+
assert use_eagle is False, ("Hybrid KV cache is not supported for " +
|
| 446 |
+
"eagle + chunked local attention.")
|
| 447 |
+
max_num_blocks = max_length // kv_cache_spec.block_size
|
| 448 |
+
if max_length > 0:
|
| 449 |
+
local_attention_start_idx = (max_length //
|
| 450 |
+
kv_cache_spec.attention_chunk_size *
|
| 451 |
+
kv_cache_spec.attention_chunk_size)
|
| 452 |
+
else:
|
| 453 |
+
local_attention_start_idx = 0
|
| 454 |
+
# we marked blocks out of window as computed
|
| 455 |
+
# with null blocks, and blocks inside window based on cache lookup
|
| 456 |
+
# result [null] [null] ... [null] [hit block 1 (1st block contain
|
| 457 |
+
# last window)] [hit block 2] ... [hit block x]
|
| 458 |
+
local_attention_start_block_idx = (local_attention_start_idx //
|
| 459 |
+
kv_cache_spec.block_size)
|
| 460 |
+
computed_blocks: tuple[list[KVCacheBlock], ...] = tuple(
|
| 461 |
+
[block_pool.null_block] * local_attention_start_block_idx
|
| 462 |
+
for _ in range(len(kv_cache_group_ids)))
|
| 463 |
+
for i in range(local_attention_start_block_idx, max_num_blocks):
|
| 464 |
+
block_hash = block_hashes[i]
|
| 465 |
+
if cached_block := block_pool.get_cached_block(
|
| 466 |
+
block_hash, kv_cache_group_ids):
|
| 467 |
+
for computed, cached in zip(computed_blocks, cached_block):
|
| 468 |
+
computed.append(cached)
|
| 469 |
+
else:
|
| 470 |
+
break
|
| 471 |
+
return computed_blocks
|
| 472 |
+
|
| 473 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 474 |
+
num_computed_tokens: int) -> None:
|
| 475 |
+
# Remove the blocks that are no longer be in the chunked attention
|
| 476 |
+
# window and skipped during the attention computation.
|
| 477 |
+
|
| 478 |
+
# [chunk 0][chunk 1]local_attention_start_idx ... current
|
| 479 |
+
# we computed previous number of chunks to get the idx of
|
| 480 |
+
# current chunk window starting offset,
|
| 481 |
+
# e.g. for computed 1024 tokens, the 1024th token (0 indexed)
|
| 482 |
+
# is in the second chunk, there are 1 prev chunk, the start idx
|
| 483 |
+
# is 1024. for 1023, it will be 0.
|
| 484 |
+
num_cached_block = self.num_cached_block.get(request_id, 0)
|
| 485 |
+
local_attention_start_idx = (
|
| 486 |
+
num_computed_tokens
|
| 487 |
+
) // self.attention_chunk_size * self.attention_chunk_size
|
| 488 |
+
first_useful_block_idx = local_attention_start_idx // self.block_size
|
| 489 |
+
if num_cached_block > 0:
|
| 490 |
+
# Make sure we don't delete the last cached block
|
| 491 |
+
first_useful_block_idx = min(first_useful_block_idx,
|
| 492 |
+
num_cached_block - 1)
|
| 493 |
+
# if block size = 128, 0 -> block 0, 1024 (= 128 * 8) ->
|
| 494 |
+
# block 8, 372 (= 128 * 2 + 116) -> block 2
|
| 495 |
+
blocks = self.req_to_blocks[request_id]
|
| 496 |
+
removed_blocks: list[KVCacheBlock] = []
|
| 497 |
+
# we need to keep the last block to get the previous hash key
|
| 498 |
+
for i in range(first_useful_block_idx - 1, -1, -1):
|
| 499 |
+
if blocks[i] == self._null_block:
|
| 500 |
+
# If the block is already a null block, the blocks before it
|
| 501 |
+
# should also have been set to null blocks by the previous calls
|
| 502 |
+
# to this function.
|
| 503 |
+
break
|
| 504 |
+
removed_blocks.append(blocks[i])
|
| 505 |
+
blocks[i] = self._null_block
|
| 506 |
+
self.block_pool.free_blocks(removed_blocks)
|
| 507 |
+
|
| 508 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 509 |
+
num_running_requests: int) -> int:
|
| 510 |
+
"""
|
| 511 |
+
cascade attention is not supported by chunked local attention.
|
| 512 |
+
"""
|
| 513 |
+
return 0
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
class MambaManager(SingleTypeKVCacheManager):
|
| 517 |
+
|
| 518 |
+
@classmethod
|
| 519 |
+
def find_longest_cache_hit(
|
| 520 |
+
cls,
|
| 521 |
+
block_hashes: list[BlockHash],
|
| 522 |
+
max_length: int,
|
| 523 |
+
kv_cache_group_ids: list[int],
|
| 524 |
+
block_pool: BlockPool,
|
| 525 |
+
kv_cache_spec: KVCacheSpec,
|
| 526 |
+
use_eagle: bool,
|
| 527 |
+
) -> tuple[list[KVCacheBlock], ...]:
|
| 528 |
+
assert isinstance(
|
| 529 |
+
kv_cache_spec,
|
| 530 |
+
MambaSpec), ("MambaManager can only be used for mamba groups")
|
| 531 |
+
# Prefix caching is not supported for mamba now. Always return empty
|
| 532 |
+
# list.
|
| 533 |
+
computed_blocks: tuple[list[KVCacheBlock], ...] = tuple(
|
| 534 |
+
[] for _ in range(len(kv_cache_group_ids)))
|
| 535 |
+
return computed_blocks
|
| 536 |
+
|
| 537 |
+
def remove_skipped_blocks(self, request_id: str,
|
| 538 |
+
num_computed_tokens: int) -> None:
|
| 539 |
+
# Each request will always have 1 block at this moment, so no need to
|
| 540 |
+
# remove blocks.
|
| 541 |
+
pass
|
| 542 |
+
|
| 543 |
+
def get_num_common_prefix_blocks(self, request_id: str,
|
| 544 |
+
num_running_requests: int) -> int:
|
| 545 |
+
return 0
|
| 546 |
+
|
| 547 |
+
def allocate_new_blocks(self, request_id: str,
|
| 548 |
+
num_tokens: int) -> list[KVCacheBlock]:
|
| 549 |
+
new_blocks = super().allocate_new_blocks(request_id, num_tokens)
|
| 550 |
+
assert len(self.req_to_blocks[request_id]) == 1, (
|
| 551 |
+
"MambaManager should only allocate 1 block for each request.")
|
| 552 |
+
return new_blocks
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
spec_manager_map: dict[type[KVCacheSpec], type[SingleTypeKVCacheManager]] = {
|
| 556 |
+
FullAttentionSpec: FullAttentionManager,
|
| 557 |
+
SlidingWindowSpec: SlidingWindowManager,
|
| 558 |
+
ChunkedLocalAttentionSpec: ChunkedLocalAttentionManager,
|
| 559 |
+
MambaSpec: MambaManager,
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def get_manager_for_kv_cache_spec(kv_cache_spec: KVCacheSpec,
|
| 564 |
+
**kwargs) -> SingleTypeKVCacheManager:
|
| 565 |
+
manager_class = spec_manager_map[type(kv_cache_spec)]
|
| 566 |
+
manager = manager_class(kv_cache_spec, **kwargs)
|
| 567 |
+
return manager
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/__init__.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import enum
|
| 5 |
+
import time
|
| 6 |
+
from collections.abc import Sequence
|
| 7 |
+
from typing import Any, Optional, Union
|
| 8 |
+
|
| 9 |
+
import msgspec
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from vllm.lora.request import LoRARequest
|
| 13 |
+
from vllm.multimodal.inputs import MultiModalKwargsItem, PlaceholderRange
|
| 14 |
+
from vllm.pooling_params import PoolingParams
|
| 15 |
+
from vllm.sampling_params import SamplingParams
|
| 16 |
+
from vllm.v1.metrics.stats import SchedulerStats
|
| 17 |
+
from vllm.v1.outputs import LogprobsLists, LogprobsTensors
|
| 18 |
+
|
| 19 |
+
# These are possible values of RequestOutput.finish_reason,
|
| 20 |
+
# so form part of the external API.
|
| 21 |
+
FINISH_REASON_STRINGS = ("stop", "length", "abort")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class FinishReason(enum.IntEnum):
|
| 25 |
+
"""
|
| 26 |
+
Reason a request finished - stop, length, or abort.
|
| 27 |
+
|
| 28 |
+
Int rather than Str for more compact serialization.
|
| 29 |
+
|
| 30 |
+
stop - a stop string was emitted
|
| 31 |
+
length - max_tokens was consumed, or max_model_len was reached
|
| 32 |
+
abort - aborted for another reason
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
STOP = 0
|
| 36 |
+
LENGTH = 1
|
| 37 |
+
ABORT = 2
|
| 38 |
+
|
| 39 |
+
def __str__(self):
|
| 40 |
+
return FINISH_REASON_STRINGS[self.value]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class EngineCoreRequest(
|
| 44 |
+
msgspec.Struct,
|
| 45 |
+
array_like=True, # type: ignore[call-arg]
|
| 46 |
+
omit_defaults=True, # type: ignore[call-arg]
|
| 47 |
+
gc=False): # type: ignore[call-arg]
|
| 48 |
+
|
| 49 |
+
request_id: str
|
| 50 |
+
prompt_token_ids: list[int]
|
| 51 |
+
mm_kwargs: Optional[Sequence[Optional[MultiModalKwargsItem]]]
|
| 52 |
+
mm_hashes: Optional[list[str]]
|
| 53 |
+
mm_placeholders: Optional[list[PlaceholderRange]]
|
| 54 |
+
sampling_params: Optional[SamplingParams]
|
| 55 |
+
pooling_params: Optional[PoolingParams]
|
| 56 |
+
eos_token_id: Optional[int]
|
| 57 |
+
arrival_time: float
|
| 58 |
+
lora_request: Optional[LoRARequest]
|
| 59 |
+
cache_salt: Optional[str]
|
| 60 |
+
data_parallel_rank: Optional[int]
|
| 61 |
+
|
| 62 |
+
# Index of the client, used to ensure outputs are sent back to the same
|
| 63 |
+
# client for this request when scaling out the front-end.
|
| 64 |
+
client_index: int = 0
|
| 65 |
+
|
| 66 |
+
# Used in DP case to indicate which wave of requests this is expected to
|
| 67 |
+
# belong to, to cover a race condition where the request is sent before
|
| 68 |
+
# a wave finished notification is received.
|
| 69 |
+
current_wave: int = 0
|
| 70 |
+
priority: int = 0
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class EngineCoreEventType(enum.IntEnum):
|
| 74 |
+
"""The type of engine core request event."""
|
| 75 |
+
QUEUED = 1
|
| 76 |
+
SCHEDULED = 2
|
| 77 |
+
PREEMPTED = 3
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class EngineCoreEvent(msgspec.Struct):
|
| 81 |
+
"""A timestamped engine core event associated with a request.
|
| 82 |
+
|
| 83 |
+
The timestamp is a monotonic timestamps and is used for by the engine
|
| 84 |
+
frontend to calculate intervals between engine core events. These
|
| 85 |
+
timestamps should not be compared with timestamps from other processes.
|
| 86 |
+
"""
|
| 87 |
+
type: EngineCoreEventType
|
| 88 |
+
timestamp: float
|
| 89 |
+
|
| 90 |
+
@classmethod
|
| 91 |
+
def new_event(cls,
|
| 92 |
+
event_type: EngineCoreEventType,
|
| 93 |
+
timestamp: Optional[float] = None) -> "EngineCoreEvent":
|
| 94 |
+
timestamp = time.monotonic() if timestamp is None else timestamp
|
| 95 |
+
return cls(event_type, timestamp)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class EngineCoreOutput(
|
| 99 |
+
msgspec.Struct,
|
| 100 |
+
array_like=True, # type: ignore[call-arg]
|
| 101 |
+
omit_defaults=True, # type: ignore[call-arg]
|
| 102 |
+
gc=False): # type: ignore[call-arg]
|
| 103 |
+
|
| 104 |
+
request_id: str
|
| 105 |
+
new_token_ids: list[int]
|
| 106 |
+
|
| 107 |
+
new_logprobs: Optional[LogprobsLists] = None
|
| 108 |
+
new_prompt_logprobs_tensors: Optional[LogprobsTensors] = None
|
| 109 |
+
|
| 110 |
+
pooling_output: Optional[torch.Tensor] = None
|
| 111 |
+
|
| 112 |
+
finish_reason: Optional[FinishReason] = None
|
| 113 |
+
stop_reason: Union[int, str, None] = None
|
| 114 |
+
events: Optional[list[EngineCoreEvent]] = None
|
| 115 |
+
kv_transfer_params: Optional[dict[str, Any]] = None
|
| 116 |
+
|
| 117 |
+
# The number of tokens with prefix cache hits.
|
| 118 |
+
num_cached_tokens: int = 0
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def finished(self) -> bool:
|
| 122 |
+
return self.finish_reason is not None
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class UtilityResult:
|
| 126 |
+
"""Wrapper for special handling when serializing/deserializing."""
|
| 127 |
+
|
| 128 |
+
def __init__(self, r: Any = None):
|
| 129 |
+
self.result = r
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class UtilityOutput(
|
| 133 |
+
msgspec.Struct,
|
| 134 |
+
array_like=True, # type: ignore[call-arg]
|
| 135 |
+
gc=False): # type: ignore[call-arg]
|
| 136 |
+
|
| 137 |
+
call_id: int
|
| 138 |
+
|
| 139 |
+
# Non-None implies the call failed, result should be None.
|
| 140 |
+
failure_message: Optional[str] = None
|
| 141 |
+
result: Optional[UtilityResult] = None
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class EngineCoreOutputs(
|
| 145 |
+
msgspec.Struct,
|
| 146 |
+
array_like=True, # type: ignore[call-arg]
|
| 147 |
+
omit_defaults=True, # type: ignore[call-arg]
|
| 148 |
+
gc=False): # type: ignore[call-arg]
|
| 149 |
+
|
| 150 |
+
#NOTE(Nick): We could consider ways to make this more compact,
|
| 151 |
+
# e.g. columnwise layout
|
| 152 |
+
|
| 153 |
+
engine_index: int = 0
|
| 154 |
+
|
| 155 |
+
# [num_reqs]
|
| 156 |
+
outputs: list[EngineCoreOutput] = []
|
| 157 |
+
scheduler_stats: Optional[SchedulerStats] = None
|
| 158 |
+
timestamp: float = 0.0
|
| 159 |
+
|
| 160 |
+
utility_output: Optional[UtilityOutput] = None
|
| 161 |
+
finished_requests: Optional[set[str]] = None
|
| 162 |
+
|
| 163 |
+
# In DP case, used to signal that the current wave of requests
|
| 164 |
+
# has finished and the engines are paused.
|
| 165 |
+
wave_complete: Optional[int] = None
|
| 166 |
+
# In DP case, used to signal that a request was received for an
|
| 167 |
+
# "old" wave, so the next wave needs to be started in other engines.
|
| 168 |
+
start_wave: Optional[int] = None
|
| 169 |
+
|
| 170 |
+
def __post_init__(self):
|
| 171 |
+
if self.timestamp == 0.0:
|
| 172 |
+
self.timestamp = time.monotonic()
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class EngineCoreRequestType(enum.Enum):
|
| 176 |
+
"""
|
| 177 |
+
Request types defined as hex byte strings, so it can be sent over sockets
|
| 178 |
+
without separate encoding step.
|
| 179 |
+
"""
|
| 180 |
+
ADD = b'\x00'
|
| 181 |
+
ABORT = b'\x01'
|
| 182 |
+
START_DP_WAVE = b'\x02'
|
| 183 |
+
UTILITY = b'\x03'
|
| 184 |
+
# Sentinel used within EngineCoreProc.
|
| 185 |
+
EXECUTOR_FAILED = b'\x04'
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class ReconfigureDistributedRequest(msgspec.Struct):
|
| 189 |
+
new_data_parallel_size: int
|
| 190 |
+
new_data_parallel_rank: int
|
| 191 |
+
new_data_parallel_rank_local: int
|
| 192 |
+
new_data_parallel_master_ip: str
|
| 193 |
+
new_data_parallel_master_port: int
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class ReconfigureRankType(enum.IntEnum):
|
| 197 |
+
"""
|
| 198 |
+
Rank type for reconfiguring distributed request.
|
| 199 |
+
"""
|
| 200 |
+
KEEP_CURRENT_RANK = -1
|
| 201 |
+
SHUTDOWN_CURRENT_RANK = -2
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/async_llm.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import asyncio
|
| 4 |
+
import time
|
| 5 |
+
from collections.abc import AsyncGenerator, Iterable, Mapping
|
| 6 |
+
from copy import copy
|
| 7 |
+
from typing import Any, Optional, Union
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
import vllm.envs as envs
|
| 12 |
+
from vllm.config import ModelConfig, VllmConfig
|
| 13 |
+
from vllm.engine.arg_utils import AsyncEngineArgs
|
| 14 |
+
from vllm.engine.protocol import EngineClient
|
| 15 |
+
from vllm.envs import VLLM_V1_OUTPUT_PROC_CHUNK_SIZE
|
| 16 |
+
from vllm.inputs import PromptType
|
| 17 |
+
from vllm.inputs.preprocess import InputPreprocessor
|
| 18 |
+
from vllm.logger import init_logger
|
| 19 |
+
from vllm.lora.request import LoRARequest
|
| 20 |
+
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
| 21 |
+
from vllm.outputs import PoolingRequestOutput, RequestOutput
|
| 22 |
+
from vllm.pooling_params import PoolingParams
|
| 23 |
+
from vllm.sampling_params import SamplingParams
|
| 24 |
+
from vllm.tasks import SupportedTask
|
| 25 |
+
from vllm.transformers_utils.config import (
|
| 26 |
+
maybe_register_config_serialize_by_value)
|
| 27 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
| 28 |
+
from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs
|
| 29 |
+
from vllm.usage.usage_lib import UsageContext
|
| 30 |
+
from vllm.utils import (Device, as_list, cancel_task_threadsafe, cdiv,
|
| 31 |
+
deprecate_kwargs)
|
| 32 |
+
from vllm.v1.engine import EngineCoreRequest
|
| 33 |
+
from vllm.v1.engine.core_client import EngineCoreClient
|
| 34 |
+
from vllm.v1.engine.exceptions import EngineDeadError, EngineGenerateError
|
| 35 |
+
from vllm.v1.engine.output_processor import (OutputProcessor,
|
| 36 |
+
RequestOutputCollector)
|
| 37 |
+
from vllm.v1.engine.parallel_sampling import ParentRequest
|
| 38 |
+
from vllm.v1.engine.processor import Processor
|
| 39 |
+
from vllm.v1.executor.abstract import Executor
|
| 40 |
+
from vllm.v1.metrics.loggers import StatLoggerFactory, StatLoggerManager
|
| 41 |
+
from vllm.v1.metrics.prometheus import shutdown_prometheus
|
| 42 |
+
from vllm.v1.metrics.stats import IterationStats
|
| 43 |
+
|
| 44 |
+
logger = init_logger(__name__)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class AsyncLLM(EngineClient):
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
vllm_config: VllmConfig,
|
| 52 |
+
executor_class: type[Executor],
|
| 53 |
+
log_stats: bool,
|
| 54 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 55 |
+
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
|
| 56 |
+
use_cached_outputs: bool = False,
|
| 57 |
+
log_requests: bool = True,
|
| 58 |
+
start_engine_loop: bool = True,
|
| 59 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 60 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 61 |
+
client_count: int = 1,
|
| 62 |
+
client_index: int = 0,
|
| 63 |
+
) -> None:
|
| 64 |
+
"""
|
| 65 |
+
Create an AsyncLLM.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
vllm_config: global configuration.
|
| 69 |
+
executor_class: an Executor impl, e.g. MultiprocExecutor.
|
| 70 |
+
log_stats: Whether to log stats.
|
| 71 |
+
usage_context: Usage context of the LLM.
|
| 72 |
+
mm_registry: Multi-modal registry.
|
| 73 |
+
use_cached_outputs: Whether to use cached outputs.
|
| 74 |
+
log_requests: Whether to log requests.
|
| 75 |
+
start_engine_loop: Whether to start the engine loop.
|
| 76 |
+
stat_loggers: customized stat loggers for the engine.
|
| 77 |
+
If not provided, default stat loggers will be used.
|
| 78 |
+
PLEASE BE AWARE THAT STAT LOGGER IS NOT STABLE
|
| 79 |
+
IN V1, AND ITS BASE CLASS INTERFACE MIGHT CHANGE.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
None
|
| 83 |
+
"""
|
| 84 |
+
if not envs.VLLM_USE_V1:
|
| 85 |
+
raise ValueError(
|
| 86 |
+
"Using V1 AsyncLLMEngine, but envs.VLLM_USE_V1=False. "
|
| 87 |
+
"This should not happen. As a workaround, try using "
|
| 88 |
+
"AsyncLLMEngine.from_vllm_config(...) or explicitly set "
|
| 89 |
+
"VLLM_USE_V1=0 or 1 and report this issue on Github.")
|
| 90 |
+
|
| 91 |
+
# Ensure we can serialize custom transformer configs
|
| 92 |
+
maybe_register_config_serialize_by_value()
|
| 93 |
+
|
| 94 |
+
self.model_config = vllm_config.model_config
|
| 95 |
+
self.vllm_config = vllm_config
|
| 96 |
+
self.log_requests = log_requests
|
| 97 |
+
self.log_stats = log_stats
|
| 98 |
+
|
| 99 |
+
if self.model_config.skip_tokenizer_init:
|
| 100 |
+
self.tokenizer = None
|
| 101 |
+
else:
|
| 102 |
+
# Tokenizer (+ ensure liveness if running in another process).
|
| 103 |
+
self.tokenizer = init_tokenizer_from_configs(
|
| 104 |
+
model_config=vllm_config.model_config,
|
| 105 |
+
scheduler_config=vllm_config.scheduler_config,
|
| 106 |
+
lora_config=vllm_config.lora_config)
|
| 107 |
+
|
| 108 |
+
# Processor (converts Inputs --> EngineCoreRequests).
|
| 109 |
+
self.processor = Processor(
|
| 110 |
+
vllm_config=vllm_config,
|
| 111 |
+
tokenizer=self.tokenizer,
|
| 112 |
+
mm_registry=mm_registry,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# OutputProcessor (converts EngineCoreOutputs --> RequestOutput).
|
| 116 |
+
self.output_processor = OutputProcessor(self.tokenizer,
|
| 117 |
+
log_stats=self.log_stats)
|
| 118 |
+
|
| 119 |
+
# EngineCore (starts the engine in background process).
|
| 120 |
+
self.engine_core = EngineCoreClient.make_async_mp_client(
|
| 121 |
+
vllm_config=vllm_config,
|
| 122 |
+
executor_class=executor_class,
|
| 123 |
+
log_stats=self.log_stats,
|
| 124 |
+
client_addresses=client_addresses,
|
| 125 |
+
client_count=client_count,
|
| 126 |
+
client_index=client_index,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
# Loggers.
|
| 130 |
+
self.logger_manager: Optional[StatLoggerManager] = None
|
| 131 |
+
if self.log_stats:
|
| 132 |
+
self.logger_manager = StatLoggerManager(
|
| 133 |
+
vllm_config=vllm_config,
|
| 134 |
+
engine_idxs=self.engine_core.engine_ranks_managed,
|
| 135 |
+
custom_stat_loggers=stat_loggers,
|
| 136 |
+
)
|
| 137 |
+
self.logger_manager.log_engine_initialized()
|
| 138 |
+
|
| 139 |
+
self.output_handler: Optional[asyncio.Task] = None
|
| 140 |
+
try:
|
| 141 |
+
# Start output handler eagerly if we are in the asyncio eventloop.
|
| 142 |
+
asyncio.get_running_loop()
|
| 143 |
+
self._run_output_handler()
|
| 144 |
+
except RuntimeError:
|
| 145 |
+
pass
|
| 146 |
+
|
| 147 |
+
@classmethod
|
| 148 |
+
@deprecate_kwargs(
|
| 149 |
+
"disable_log_requests",
|
| 150 |
+
additional_message=("This argument will have no effect. "
|
| 151 |
+
"Use `enable_log_requests` instead."),
|
| 152 |
+
)
|
| 153 |
+
def from_vllm_config(
|
| 154 |
+
cls,
|
| 155 |
+
vllm_config: VllmConfig,
|
| 156 |
+
start_engine_loop: bool = True,
|
| 157 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 158 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 159 |
+
enable_log_requests: bool = False,
|
| 160 |
+
disable_log_stats: bool = False,
|
| 161 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 162 |
+
client_count: int = 1,
|
| 163 |
+
client_index: int = 0,
|
| 164 |
+
disable_log_requests: bool = True, # Deprecated, will be removed
|
| 165 |
+
) -> "AsyncLLM":
|
| 166 |
+
if not envs.VLLM_USE_V1:
|
| 167 |
+
raise ValueError(
|
| 168 |
+
"Using V1 AsyncLLMEngine, but envs.VLLM_USE_V1=False. "
|
| 169 |
+
"This should not happen. As a workaround, try using "
|
| 170 |
+
"AsyncLLMEngine.from_vllm_config(...) or explicitly set "
|
| 171 |
+
"VLLM_USE_V1=0 or 1 and report this issue on Github.")
|
| 172 |
+
|
| 173 |
+
# Create the LLMEngine.
|
| 174 |
+
return cls(
|
| 175 |
+
vllm_config=vllm_config,
|
| 176 |
+
executor_class=Executor.get_class(vllm_config),
|
| 177 |
+
start_engine_loop=start_engine_loop,
|
| 178 |
+
stat_loggers=stat_loggers,
|
| 179 |
+
log_requests=enable_log_requests,
|
| 180 |
+
log_stats=not disable_log_stats,
|
| 181 |
+
usage_context=usage_context,
|
| 182 |
+
client_addresses=client_addresses,
|
| 183 |
+
client_count=client_count,
|
| 184 |
+
client_index=client_index,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
@classmethod
|
| 188 |
+
def from_engine_args(
|
| 189 |
+
cls,
|
| 190 |
+
engine_args: AsyncEngineArgs,
|
| 191 |
+
start_engine_loop: bool = True,
|
| 192 |
+
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
|
| 193 |
+
stat_loggers: Optional[list[StatLoggerFactory]] = None,
|
| 194 |
+
) -> "AsyncLLM":
|
| 195 |
+
"""Create an AsyncLLM from the EngineArgs."""
|
| 196 |
+
|
| 197 |
+
# Create the engine configs.
|
| 198 |
+
vllm_config = engine_args.create_engine_config(usage_context)
|
| 199 |
+
executor_class = Executor.get_class(vllm_config)
|
| 200 |
+
|
| 201 |
+
# Create the AsyncLLM.
|
| 202 |
+
return cls(
|
| 203 |
+
vllm_config=vllm_config,
|
| 204 |
+
executor_class=executor_class,
|
| 205 |
+
log_requests=engine_args.enable_log_requests,
|
| 206 |
+
log_stats=not engine_args.disable_log_stats,
|
| 207 |
+
start_engine_loop=start_engine_loop,
|
| 208 |
+
usage_context=usage_context,
|
| 209 |
+
stat_loggers=stat_loggers,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
def __del__(self):
|
| 213 |
+
self.shutdown()
|
| 214 |
+
|
| 215 |
+
def shutdown(self):
|
| 216 |
+
"""Shutdown, cleaning up the background proc and IPC."""
|
| 217 |
+
|
| 218 |
+
shutdown_prometheus()
|
| 219 |
+
|
| 220 |
+
if engine_core := getattr(self, "engine_core", None):
|
| 221 |
+
engine_core.shutdown()
|
| 222 |
+
|
| 223 |
+
cancel_task_threadsafe(getattr(self, "output_handler", None))
|
| 224 |
+
|
| 225 |
+
async def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 226 |
+
return await self.engine_core.get_supported_tasks_async()
|
| 227 |
+
|
| 228 |
+
async def add_request(
|
| 229 |
+
self,
|
| 230 |
+
request_id: str,
|
| 231 |
+
prompt: PromptType,
|
| 232 |
+
params: Union[SamplingParams, PoolingParams],
|
| 233 |
+
arrival_time: Optional[float] = None,
|
| 234 |
+
lora_request: Optional[LoRARequest] = None,
|
| 235 |
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
| 236 |
+
trace_headers: Optional[Mapping[str, str]] = None,
|
| 237 |
+
priority: int = 0,
|
| 238 |
+
data_parallel_rank: Optional[int] = None,
|
| 239 |
+
) -> RequestOutputCollector:
|
| 240 |
+
"""Add new request to the AsyncLLM."""
|
| 241 |
+
|
| 242 |
+
if self.errored:
|
| 243 |
+
raise EngineDeadError()
|
| 244 |
+
|
| 245 |
+
is_pooling = isinstance(params, PoolingParams)
|
| 246 |
+
|
| 247 |
+
# Create a new output collector for the request.
|
| 248 |
+
queue = RequestOutputCollector(output_kind=params.output_kind)
|
| 249 |
+
|
| 250 |
+
# Convert Input --> Request.
|
| 251 |
+
prompt_str, request = self.processor.process_inputs(
|
| 252 |
+
request_id, prompt, params, arrival_time, lora_request,
|
| 253 |
+
tokenization_kwargs, trace_headers, priority, data_parallel_rank)
|
| 254 |
+
|
| 255 |
+
if is_pooling or params.n == 1:
|
| 256 |
+
await self._add_request(request, prompt_str, None, 0, queue)
|
| 257 |
+
return queue
|
| 258 |
+
|
| 259 |
+
# Fan out child requests (for n>1).
|
| 260 |
+
parent_request = ParentRequest(request_id, params)
|
| 261 |
+
for idx in range(params.n):
|
| 262 |
+
request_id, params = parent_request.get_child_info(idx)
|
| 263 |
+
child_request = request if idx == params.n - 1 else copy(request)
|
| 264 |
+
child_request.request_id = request_id
|
| 265 |
+
child_request.sampling_params = params
|
| 266 |
+
await self._add_request(child_request, prompt_str, parent_request,
|
| 267 |
+
idx, queue)
|
| 268 |
+
return queue
|
| 269 |
+
|
| 270 |
+
async def _add_request(self, request: EngineCoreRequest,
|
| 271 |
+
prompt: Optional[str],
|
| 272 |
+
parent_req: Optional[ParentRequest], index: int,
|
| 273 |
+
queue: RequestOutputCollector):
|
| 274 |
+
|
| 275 |
+
# Add the request to OutputProcessor (this process).
|
| 276 |
+
self.output_processor.add_request(request, prompt, parent_req, index,
|
| 277 |
+
queue)
|
| 278 |
+
|
| 279 |
+
# Add the EngineCoreRequest to EngineCore (separate process).
|
| 280 |
+
await self.engine_core.add_request_async(request)
|
| 281 |
+
|
| 282 |
+
if self.log_requests:
|
| 283 |
+
logger.info("Added request %s.", request.request_id)
|
| 284 |
+
|
| 285 |
+
# TODO: we should support multiple prompts in one call, as you
|
| 286 |
+
# can do with LLM.generate. So that for multi-prompt completion
|
| 287 |
+
# requests we don't need to send multiple messages to core proc,
|
| 288 |
+
# and so we don't need multiple streams which then get
|
| 289 |
+
# re-multiplexed in the API server anyhow.
|
| 290 |
+
async def generate(
|
| 291 |
+
self,
|
| 292 |
+
prompt: PromptType,
|
| 293 |
+
sampling_params: SamplingParams,
|
| 294 |
+
request_id: str,
|
| 295 |
+
lora_request: Optional[LoRARequest] = None,
|
| 296 |
+
trace_headers: Optional[Mapping[str, str]] = None,
|
| 297 |
+
priority: int = 0,
|
| 298 |
+
data_parallel_rank: Optional[int] = None,
|
| 299 |
+
) -> AsyncGenerator[RequestOutput, None]:
|
| 300 |
+
"""
|
| 301 |
+
Main function called by the API server to kick off a request
|
| 302 |
+
* 1) Making an AsyncStream corresponding to the Request.
|
| 303 |
+
* 2) Processing the Input.
|
| 304 |
+
* 3) Adding the Request to the Detokenizer.
|
| 305 |
+
* 4) Adding the Request to the EngineCore (separate process).
|
| 306 |
+
|
| 307 |
+
A separate output_handler loop runs in a background AsyncIO task,
|
| 308 |
+
pulling outputs from EngineCore and putting them into the
|
| 309 |
+
per-request AsyncStream.
|
| 310 |
+
|
| 311 |
+
The caller of generate() iterates the returned AsyncGenerator,
|
| 312 |
+
returning the RequestOutput back to the caller.
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
try:
|
| 316 |
+
# We start the output_handler on the first call to generate() so
|
| 317 |
+
# we can call __init__ before the event loop, which enables us
|
| 318 |
+
# to handle startup failure gracefully in the OpenAI server.
|
| 319 |
+
self._run_output_handler()
|
| 320 |
+
|
| 321 |
+
q = await self.add_request(
|
| 322 |
+
request_id,
|
| 323 |
+
prompt,
|
| 324 |
+
sampling_params,
|
| 325 |
+
lora_request=lora_request,
|
| 326 |
+
trace_headers=trace_headers,
|
| 327 |
+
priority=priority,
|
| 328 |
+
data_parallel_rank=data_parallel_rank,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
# The output_handler task pushes items into the queue.
|
| 332 |
+
# This task pulls from the queue and yields to caller.
|
| 333 |
+
finished = False
|
| 334 |
+
while not finished:
|
| 335 |
+
# Note: drain queue without await if possible (avoids
|
| 336 |
+
# task switching under load which helps performance).
|
| 337 |
+
out = q.get_nowait() or await q.get()
|
| 338 |
+
|
| 339 |
+
# Note: both OutputProcessor and EngineCore handle their
|
| 340 |
+
# own request cleanup based on finished.
|
| 341 |
+
finished = out.finished
|
| 342 |
+
yield out
|
| 343 |
+
|
| 344 |
+
# If the request is disconnected by the client, generate()
|
| 345 |
+
# is cancelled or the generator is garbage collected. So,
|
| 346 |
+
# we abort the request if we end up here.
|
| 347 |
+
except (asyncio.CancelledError, GeneratorExit):
|
| 348 |
+
await self.abort(request_id)
|
| 349 |
+
if self.log_requests:
|
| 350 |
+
logger.info("Request %s aborted.", request_id)
|
| 351 |
+
raise
|
| 352 |
+
|
| 353 |
+
# Engine is dead. Do not abort since we shut down.
|
| 354 |
+
except EngineDeadError:
|
| 355 |
+
if self.log_requests:
|
| 356 |
+
logger.info("Request %s failed (engine dead).", request_id)
|
| 357 |
+
raise
|
| 358 |
+
|
| 359 |
+
# Request validation error.
|
| 360 |
+
except ValueError:
|
| 361 |
+
if self.log_requests:
|
| 362 |
+
logger.info("Request %s failed (bad request).", request_id)
|
| 363 |
+
raise
|
| 364 |
+
|
| 365 |
+
# Unexpected error in the generate() task (possibly recoverable).
|
| 366 |
+
except Exception as e:
|
| 367 |
+
await self.abort(request_id)
|
| 368 |
+
if self.log_requests:
|
| 369 |
+
logger.info("Request %s failed.", request_id)
|
| 370 |
+
raise EngineGenerateError() from e
|
| 371 |
+
|
| 372 |
+
def _run_output_handler(self):
|
| 373 |
+
"""Background loop: pulls from EngineCore and pushes to AsyncStreams."""
|
| 374 |
+
|
| 375 |
+
if self.output_handler is not None:
|
| 376 |
+
return
|
| 377 |
+
|
| 378 |
+
# Ensure that the task doesn't have a circular ref back to the AsyncLLM
|
| 379 |
+
# object, or else it won't be garbage collected and cleaned up properly.
|
| 380 |
+
engine_core = self.engine_core
|
| 381 |
+
output_processor = self.output_processor
|
| 382 |
+
log_stats = self.log_stats
|
| 383 |
+
logger_manager = self.logger_manager
|
| 384 |
+
|
| 385 |
+
async def output_handler():
|
| 386 |
+
try:
|
| 387 |
+
while True:
|
| 388 |
+
# 1) Pull EngineCoreOutputs from the EngineCore.
|
| 389 |
+
outputs = await engine_core.get_output_async()
|
| 390 |
+
num_outputs = len(outputs.outputs)
|
| 391 |
+
|
| 392 |
+
iteration_stats = IterationStats() if (
|
| 393 |
+
log_stats and num_outputs) else None
|
| 394 |
+
|
| 395 |
+
# Split outputs into chunks of at most
|
| 396 |
+
# VLLM_V1_OUTPUT_PROC_CHUNK_SIZE, so that we don't block the
|
| 397 |
+
# event loop for too long.
|
| 398 |
+
if num_outputs <= VLLM_V1_OUTPUT_PROC_CHUNK_SIZE:
|
| 399 |
+
slices = (outputs.outputs, )
|
| 400 |
+
else:
|
| 401 |
+
slices = np.array_split(
|
| 402 |
+
outputs.outputs,
|
| 403 |
+
cdiv(num_outputs, VLLM_V1_OUTPUT_PROC_CHUNK_SIZE))
|
| 404 |
+
|
| 405 |
+
for i, outputs_slice in enumerate(slices):
|
| 406 |
+
# 2) Process EngineCoreOutputs.
|
| 407 |
+
processed_outputs = output_processor.process_outputs(
|
| 408 |
+
outputs_slice, outputs.timestamp, iteration_stats)
|
| 409 |
+
# NOTE: RequestOutputs are pushed to their queues.
|
| 410 |
+
assert not processed_outputs.request_outputs
|
| 411 |
+
|
| 412 |
+
# Allow other asyncio tasks to run between chunks
|
| 413 |
+
if i + 1 < len(slices):
|
| 414 |
+
await asyncio.sleep(0)
|
| 415 |
+
|
| 416 |
+
# 3) Abort any reqs that finished due to stop strings.
|
| 417 |
+
await engine_core.abort_requests_async(
|
| 418 |
+
processed_outputs.reqs_to_abort)
|
| 419 |
+
|
| 420 |
+
# 4) Logging.
|
| 421 |
+
# TODO(rob): make into a coroutine and launch it in
|
| 422 |
+
# background thread once Prometheus overhead is non-trivial.
|
| 423 |
+
if logger_manager:
|
| 424 |
+
logger_manager.record(
|
| 425 |
+
engine_idx=outputs.engine_index,
|
| 426 |
+
scheduler_stats=outputs.scheduler_stats,
|
| 427 |
+
iteration_stats=iteration_stats,
|
| 428 |
+
)
|
| 429 |
+
except Exception as e:
|
| 430 |
+
logger.exception("AsyncLLM output_handler failed.")
|
| 431 |
+
output_processor.propagate_error(e)
|
| 432 |
+
|
| 433 |
+
self.output_handler = asyncio.create_task(output_handler())
|
| 434 |
+
|
| 435 |
+
async def abort(self, request_id: Union[str, Iterable[str]]) -> None:
|
| 436 |
+
"""Abort RequestId in OutputProcessor and EngineCore."""
|
| 437 |
+
|
| 438 |
+
request_ids = (request_id, ) if isinstance(
|
| 439 |
+
request_id, str) else as_list(request_id)
|
| 440 |
+
all_request_ids = self.output_processor.abort_requests(request_ids)
|
| 441 |
+
await self.engine_core.abort_requests_async(all_request_ids)
|
| 442 |
+
|
| 443 |
+
if self.log_requests:
|
| 444 |
+
logger.info("Aborted request(s) %s.", ",".join(request_ids))
|
| 445 |
+
|
| 446 |
+
async def encode(
|
| 447 |
+
self,
|
| 448 |
+
prompt: PromptType,
|
| 449 |
+
pooling_params: PoolingParams,
|
| 450 |
+
request_id: str,
|
| 451 |
+
lora_request: Optional[LoRARequest] = None,
|
| 452 |
+
trace_headers: Optional[Mapping[str, str]] = None,
|
| 453 |
+
priority: int = 0,
|
| 454 |
+
tokenization_kwargs: Optional[dict[str, Any]] = None,
|
| 455 |
+
) -> AsyncGenerator[PoolingRequestOutput, None]:
|
| 456 |
+
"""
|
| 457 |
+
Main function called by the API server to kick off a request
|
| 458 |
+
* 1) Making an AsyncStream corresponding to the Request.
|
| 459 |
+
* 2) Processing the Input.
|
| 460 |
+
* 3) Adding the Request to the EngineCore (separate process).
|
| 461 |
+
|
| 462 |
+
A separate output_handler loop runs in a background AsyncIO task,
|
| 463 |
+
pulling outputs from EngineCore and putting them into the
|
| 464 |
+
per-request AsyncStream.
|
| 465 |
+
|
| 466 |
+
The caller of generate() iterates the returned AsyncGenerator,
|
| 467 |
+
returning the RequestOutput back to the caller.
|
| 468 |
+
"""
|
| 469 |
+
|
| 470 |
+
try:
|
| 471 |
+
# We start the output_handler on the first call to generate() so
|
| 472 |
+
# we can call __init__ before the event loop, which enables us
|
| 473 |
+
# to handle startup failure gracefully in the OpenAI server.
|
| 474 |
+
self._run_output_handler()
|
| 475 |
+
|
| 476 |
+
q = await self.add_request(
|
| 477 |
+
request_id,
|
| 478 |
+
prompt,
|
| 479 |
+
pooling_params,
|
| 480 |
+
lora_request=lora_request,
|
| 481 |
+
trace_headers=trace_headers,
|
| 482 |
+
priority=priority,
|
| 483 |
+
tokenization_kwargs=tokenization_kwargs,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
# The output_handler task pushes items into the queue.
|
| 487 |
+
# This task pulls from the queue and yields to caller.
|
| 488 |
+
finished = False
|
| 489 |
+
while not finished:
|
| 490 |
+
# Note: drain queue without await if possible (avoids
|
| 491 |
+
# task switching under load which helps performance).
|
| 492 |
+
out = q.get_nowait() or await q.get()
|
| 493 |
+
assert isinstance(out, PoolingRequestOutput)
|
| 494 |
+
# Note: both OutputProcessor and EngineCore handle their
|
| 495 |
+
# own request cleanup based on finished.
|
| 496 |
+
finished = out.finished
|
| 497 |
+
yield out
|
| 498 |
+
|
| 499 |
+
# If the request is disconnected by the client, generate()
|
| 500 |
+
# is cancelled. So, we abort the request if we end up here.
|
| 501 |
+
except asyncio.CancelledError:
|
| 502 |
+
await self.abort(request_id)
|
| 503 |
+
if self.log_requests:
|
| 504 |
+
logger.info("Request %s aborted.", request_id)
|
| 505 |
+
raise
|
| 506 |
+
|
| 507 |
+
# Engine is dead. Do not abort since we shut down.
|
| 508 |
+
except EngineDeadError:
|
| 509 |
+
if self.log_requests:
|
| 510 |
+
logger.info("Request %s failed (engine dead).", request_id)
|
| 511 |
+
raise
|
| 512 |
+
|
| 513 |
+
# Request validation error.
|
| 514 |
+
except ValueError:
|
| 515 |
+
if self.log_requests:
|
| 516 |
+
logger.info("Request %s failed (bad request).", request_id)
|
| 517 |
+
raise
|
| 518 |
+
|
| 519 |
+
# Unexpected error in the generate() task (possibly recoverable).
|
| 520 |
+
except Exception as e:
|
| 521 |
+
await self.abort(request_id)
|
| 522 |
+
if self.log_requests:
|
| 523 |
+
logger.info("Request %s failed.", request_id)
|
| 524 |
+
raise EngineGenerateError() from e
|
| 525 |
+
|
| 526 |
+
async def get_vllm_config(self) -> VllmConfig:
|
| 527 |
+
return self.vllm_config
|
| 528 |
+
|
| 529 |
+
async def get_model_config(self) -> ModelConfig:
|
| 530 |
+
return self.model_config
|
| 531 |
+
|
| 532 |
+
async def get_decoding_config(self):
|
| 533 |
+
raise ValueError("Not Supported on V1 yet.")
|
| 534 |
+
|
| 535 |
+
async def get_input_preprocessor(self) -> InputPreprocessor:
|
| 536 |
+
return self.processor.input_preprocessor
|
| 537 |
+
|
| 538 |
+
async def get_tokenizer(
|
| 539 |
+
self,
|
| 540 |
+
lora_request: Optional[LoRARequest] = None,
|
| 541 |
+
) -> AnyTokenizer:
|
| 542 |
+
if self.tokenizer is None:
|
| 543 |
+
raise ValueError("Unable to get tokenizer because "
|
| 544 |
+
"skip_tokenizer_init is True")
|
| 545 |
+
|
| 546 |
+
return self.tokenizer.get_lora_tokenizer(lora_request)
|
| 547 |
+
|
| 548 |
+
async def is_tracing_enabled(self) -> bool:
|
| 549 |
+
return False
|
| 550 |
+
|
| 551 |
+
async def do_log_stats(
|
| 552 |
+
self,
|
| 553 |
+
scheduler_outputs=None,
|
| 554 |
+
model_output=None,
|
| 555 |
+
) -> None:
|
| 556 |
+
if self.logger_manager:
|
| 557 |
+
self.logger_manager.log()
|
| 558 |
+
|
| 559 |
+
async def check_health(self) -> None:
|
| 560 |
+
logger.debug("Called check_health.")
|
| 561 |
+
if self.errored:
|
| 562 |
+
raise self.dead_error
|
| 563 |
+
|
| 564 |
+
async def start_profile(self) -> None:
|
| 565 |
+
await self.engine_core.profile_async(True)
|
| 566 |
+
|
| 567 |
+
async def stop_profile(self) -> None:
|
| 568 |
+
await self.engine_core.profile_async(False)
|
| 569 |
+
|
| 570 |
+
async def reset_mm_cache(self) -> None:
|
| 571 |
+
self.processor.mm_registry.reset_processor_cache(self.model_config)
|
| 572 |
+
self.processor.mm_input_cache_client.reset()
|
| 573 |
+
await self.engine_core.reset_mm_cache_async()
|
| 574 |
+
|
| 575 |
+
async def reset_prefix_cache(self,
|
| 576 |
+
device: Optional[Device] = None) -> None:
|
| 577 |
+
if device == Device.CPU:
|
| 578 |
+
raise ValueError("Not supported on CPU.")
|
| 579 |
+
await self.engine_core.reset_prefix_cache_async()
|
| 580 |
+
|
| 581 |
+
async def sleep(self, level: int = 1) -> None:
|
| 582 |
+
await self.reset_prefix_cache()
|
| 583 |
+
await self.engine_core.sleep_async(level)
|
| 584 |
+
|
| 585 |
+
async def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
| 586 |
+
await self.engine_core.wake_up_async(tags)
|
| 587 |
+
|
| 588 |
+
async def is_sleeping(self) -> bool:
|
| 589 |
+
return await self.engine_core.is_sleeping_async()
|
| 590 |
+
|
| 591 |
+
async def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 592 |
+
"""Load a new LoRA adapter into the engine for future requests."""
|
| 593 |
+
return await self.engine_core.add_lora_async(lora_request)
|
| 594 |
+
|
| 595 |
+
async def remove_lora(self, lora_id: int) -> bool:
|
| 596 |
+
"""Remove an already loaded LoRA adapter."""
|
| 597 |
+
return await self.engine_core.remove_lora_async(lora_id)
|
| 598 |
+
|
| 599 |
+
async def list_loras(self) -> set[int]:
|
| 600 |
+
"""List all registered adapters."""
|
| 601 |
+
return await self.engine_core.list_loras_async()
|
| 602 |
+
|
| 603 |
+
async def pin_lora(self, lora_id: int) -> bool:
|
| 604 |
+
"""Prevent an adapter from being evicted."""
|
| 605 |
+
return await self.engine_core.pin_lora_async(lora_id)
|
| 606 |
+
|
| 607 |
+
async def collective_rpc(self,
|
| 608 |
+
method: str,
|
| 609 |
+
timeout: Optional[float] = None,
|
| 610 |
+
args: tuple = (),
|
| 611 |
+
kwargs: Optional[dict] = None):
|
| 612 |
+
"""
|
| 613 |
+
Perform a collective RPC call to the given path.
|
| 614 |
+
"""
|
| 615 |
+
return await self.engine_core.collective_rpc_async(
|
| 616 |
+
method, timeout, args, kwargs)
|
| 617 |
+
|
| 618 |
+
async def wait_for_requests_to_drain(self, drain_timeout: int = 300):
|
| 619 |
+
"""Wait for all requests to be drained."""
|
| 620 |
+
start_time = time.time()
|
| 621 |
+
while time.time() - start_time < drain_timeout:
|
| 622 |
+
if not self.engine_core.dp_engines_running():
|
| 623 |
+
logger.info("Engines are idle, requests have been drained")
|
| 624 |
+
return
|
| 625 |
+
|
| 626 |
+
logger.info(
|
| 627 |
+
"Engines are still running, waiting for requests to drain...")
|
| 628 |
+
await asyncio.sleep(1) # Wait 1 second before checking again
|
| 629 |
+
|
| 630 |
+
raise TimeoutError(f"Timeout reached after {drain_timeout} seconds "
|
| 631 |
+
"waiting for requests to drain.")
|
| 632 |
+
|
| 633 |
+
async def scale_elastic_ep(self,
|
| 634 |
+
new_data_parallel_size: int,
|
| 635 |
+
drain_timeout: int = 300):
|
| 636 |
+
"""
|
| 637 |
+
Scale up or down the data parallel size by adding or removing
|
| 638 |
+
engine cores.
|
| 639 |
+
Args:
|
| 640 |
+
new_data_parallel_size: The new number of data parallel workers
|
| 641 |
+
drain_timeout:
|
| 642 |
+
Maximum time to wait for requests to drain (seconds)
|
| 643 |
+
"""
|
| 644 |
+
old_data_parallel_size = \
|
| 645 |
+
self.vllm_config.parallel_config.data_parallel_size
|
| 646 |
+
if old_data_parallel_size == new_data_parallel_size:
|
| 647 |
+
logger.info("Data parallel size is already %s, skipping scale",
|
| 648 |
+
new_data_parallel_size)
|
| 649 |
+
return
|
| 650 |
+
logger.info(
|
| 651 |
+
"Waiting for requests to drain before "
|
| 652 |
+
"scaling up to %s engines...", new_data_parallel_size)
|
| 653 |
+
await self.wait_for_requests_to_drain(drain_timeout)
|
| 654 |
+
logger.info(
|
| 655 |
+
"Requests have been drained, proceeding with scale "
|
| 656 |
+
"to %s engines", new_data_parallel_size)
|
| 657 |
+
await self.engine_core.scale_elastic_ep(new_data_parallel_size)
|
| 658 |
+
self.vllm_config.parallel_config.data_parallel_size = \
|
| 659 |
+
new_data_parallel_size
|
| 660 |
+
|
| 661 |
+
# recreate stat loggers
|
| 662 |
+
if new_data_parallel_size > old_data_parallel_size and self.log_stats:
|
| 663 |
+
# TODO(rob): fix this after talking with Ray team.
|
| 664 |
+
# This resets all the prometheus metrics since we
|
| 665 |
+
# unregister during initialization. Need to understand
|
| 666 |
+
# the intended behavior here better.
|
| 667 |
+
self.logger_manager = StatLoggerManager(
|
| 668 |
+
vllm_config=self.vllm_config,
|
| 669 |
+
engine_idxs=list(range(new_data_parallel_size)),
|
| 670 |
+
custom_stat_loggers=None,
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
@property
|
| 674 |
+
def is_running(self) -> bool:
|
| 675 |
+
# Is None before the loop is started.
|
| 676 |
+
return self.output_handler is None or not self.output_handler.done()
|
| 677 |
+
|
| 678 |
+
@property
|
| 679 |
+
def is_stopped(self) -> bool:
|
| 680 |
+
return self.errored
|
| 681 |
+
|
| 682 |
+
@property
|
| 683 |
+
def errored(self) -> bool:
|
| 684 |
+
return self.engine_core.resources.engine_dead or not self.is_running
|
| 685 |
+
|
| 686 |
+
@property
|
| 687 |
+
def dead_error(self) -> BaseException:
|
| 688 |
+
return EngineDeadError()
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/coordinator.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import copy
|
| 4 |
+
import multiprocessing
|
| 5 |
+
import time
|
| 6 |
+
import weakref
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
import msgspec.msgpack
|
| 10 |
+
import zmq
|
| 11 |
+
|
| 12 |
+
from vllm.config import ParallelConfig
|
| 13 |
+
from vllm.logger import init_logger
|
| 14 |
+
from vllm.utils import get_mp_context, make_zmq_socket, set_process_title
|
| 15 |
+
from vllm.v1.engine import EngineCoreOutputs, EngineCoreRequestType
|
| 16 |
+
from vllm.v1.serial_utils import MsgpackDecoder
|
| 17 |
+
from vllm.v1.utils import get_engine_client_zmq_addr, shutdown
|
| 18 |
+
|
| 19 |
+
logger = init_logger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DPCoordinator:
|
| 23 |
+
"""Coordinator process used for data-parallel deployments (DP>1).
|
| 24 |
+
|
| 25 |
+
Intermediates between multiple DP engine rank processes and one or more
|
| 26 |
+
front-end API server processes.
|
| 27 |
+
|
| 28 |
+
* Collects stats from each DP engine (currently just waiting and running
|
| 29 |
+
queue lengths), and publishes these to all front-ends for use in
|
| 30 |
+
load-balancing decisions.
|
| 31 |
+
|
| 32 |
+
* Keeps track of the current DP "request wave" number and running state
|
| 33 |
+
of the engines. This is received from the DP rank 0 engine and published
|
| 34 |
+
to the front-end processes along with the current load stats.
|
| 35 |
+
|
| 36 |
+
The engines alternate between a global running/paused state. The global
|
| 37 |
+
"request wave" number is a count of the number of times that the workers
|
| 38 |
+
collectively move from a running state to a paused state. This transition
|
| 39 |
+
is synchronized via the all-reduce operation performed in the
|
| 40 |
+
DPEngineCoreProc._has_global_unfinished_reqs method.
|
| 41 |
+
|
| 42 |
+
* Broadcasts the START_DP_WAVE message to engines to move them from paused
|
| 43 |
+
to running state when one engine receives a new request. This can happen
|
| 44 |
+
in two cases:
|
| 45 |
+
1) A front-end sending a new request while the engines are paused will
|
| 46 |
+
concurrently notify the coordinator.
|
| 47 |
+
2) An engine receiving a request for a stale request wave while in paused
|
| 48 |
+
state will notify the coordinator.
|
| 49 |
+
|
| 50 |
+
Engines will move into running state when receiving a new request or
|
| 51 |
+
START_DP_WAVE message.
|
| 52 |
+
|
| 53 |
+
Note that when deployed in External LB mode, no stats will be published by
|
| 54 |
+
the engines and thus updates will only be sent to front-ends when the
|
| 55 |
+
request wave / running state changes.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, parallel_config: ParallelConfig):
|
| 59 |
+
|
| 60 |
+
dp_size = parallel_config.data_parallel_size
|
| 61 |
+
assert dp_size > 1, "Coordinator only used for data parallel"
|
| 62 |
+
|
| 63 |
+
host = parallel_config.data_parallel_master_ip
|
| 64 |
+
external_lb = parallel_config.data_parallel_external_lb
|
| 65 |
+
hybrid_lb = parallel_config.data_parallel_hybrid_lb
|
| 66 |
+
|
| 67 |
+
# Assume coordinator is colocated with front-end procs when not in
|
| 68 |
+
# either external or hybrid DP LB mode.
|
| 69 |
+
local_only = not (external_lb or hybrid_lb)
|
| 70 |
+
front_publish_address = get_engine_client_zmq_addr(
|
| 71 |
+
local_only=local_only, host=host)
|
| 72 |
+
|
| 73 |
+
local_only_eng = dp_size == parallel_config.data_parallel_size_local
|
| 74 |
+
back_publish_address = get_engine_client_zmq_addr(local_only_eng, host)
|
| 75 |
+
back_output_address = get_engine_client_zmq_addr(local_only_eng, host)
|
| 76 |
+
|
| 77 |
+
context = get_mp_context()
|
| 78 |
+
self.proc: multiprocessing.Process = context.Process(
|
| 79 |
+
target=DPCoordinatorProc.run_coordinator,
|
| 80 |
+
name="VLLM_DP_Coordinator",
|
| 81 |
+
kwargs={
|
| 82 |
+
"engine_count": parallel_config.data_parallel_size,
|
| 83 |
+
"front_publish_address": front_publish_address,
|
| 84 |
+
"back_output_address": back_output_address,
|
| 85 |
+
"back_publish_address": back_publish_address,
|
| 86 |
+
},
|
| 87 |
+
daemon=True)
|
| 88 |
+
self.proc.start()
|
| 89 |
+
|
| 90 |
+
self.stats_publish_address = front_publish_address
|
| 91 |
+
self.coord_in_address = back_publish_address
|
| 92 |
+
self.coord_out_address = back_output_address
|
| 93 |
+
self._finalizer = weakref.finalize(self, shutdown, [self.proc])
|
| 94 |
+
|
| 95 |
+
def get_stats_publish_address(self) -> str:
|
| 96 |
+
return self.stats_publish_address
|
| 97 |
+
|
| 98 |
+
def get_engine_socket_addresses(self) -> tuple[str, str]:
|
| 99 |
+
"""Returns tuple of ZMQ input address, output address."""
|
| 100 |
+
return self.coord_in_address, self.coord_out_address
|
| 101 |
+
|
| 102 |
+
def close(self):
|
| 103 |
+
self._finalizer()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class EngineState:
|
| 107 |
+
|
| 108 |
+
def __init__(self):
|
| 109 |
+
self.request_counts = [0, 0] # [waiting, running]
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class DPCoordinatorProc:
|
| 113 |
+
|
| 114 |
+
def __init__(self,
|
| 115 |
+
engine_count: int,
|
| 116 |
+
min_stats_update_interval_ms: int = 100):
|
| 117 |
+
set_process_title("DPCoordinator")
|
| 118 |
+
self.ctx = zmq.Context()
|
| 119 |
+
|
| 120 |
+
self.engines = [EngineState() for _ in range(engine_count)]
|
| 121 |
+
|
| 122 |
+
self.stats_update_interval_ms = min_stats_update_interval_ms
|
| 123 |
+
|
| 124 |
+
@staticmethod
|
| 125 |
+
def run_coordinator(
|
| 126 |
+
engine_count: int,
|
| 127 |
+
front_publish_address: str,
|
| 128 |
+
back_output_address: str,
|
| 129 |
+
back_publish_address: str,
|
| 130 |
+
min_stats_update_interval_ms: int = 100,
|
| 131 |
+
):
|
| 132 |
+
coordinator = DPCoordinatorProc(
|
| 133 |
+
engine_count=engine_count,
|
| 134 |
+
min_stats_update_interval_ms=min_stats_update_interval_ms)
|
| 135 |
+
try:
|
| 136 |
+
coordinator.process_input_socket(
|
| 137 |
+
front_publish_address,
|
| 138 |
+
back_output_address,
|
| 139 |
+
back_publish_address,
|
| 140 |
+
)
|
| 141 |
+
except KeyboardInterrupt:
|
| 142 |
+
logger.info("DP Coordinator process exiting")
|
| 143 |
+
|
| 144 |
+
def process_input_socket(self, front_publish_address: str,
|
| 145 |
+
back_output_address: str,
|
| 146 |
+
back_publish_address: str):
|
| 147 |
+
|
| 148 |
+
decoder = MsgpackDecoder(EngineCoreOutputs)
|
| 149 |
+
|
| 150 |
+
# For tracking request wave progression.
|
| 151 |
+
current_wave = 0
|
| 152 |
+
engines_running = False
|
| 153 |
+
|
| 154 |
+
# For tracking request counts for internal load-balancing.
|
| 155 |
+
stats_changed = False
|
| 156 |
+
last_stats_step = -1
|
| 157 |
+
last_stats_wave = -1
|
| 158 |
+
last_step_counts: Optional[list[list[int]]] = None
|
| 159 |
+
|
| 160 |
+
with make_zmq_socket(
|
| 161 |
+
path=front_publish_address, # IPC
|
| 162 |
+
ctx=self.ctx,
|
| 163 |
+
socket_type=zmq.XPUB,
|
| 164 |
+
bind=True,
|
| 165 |
+
) as publish_front, make_zmq_socket(
|
| 166 |
+
path=back_output_address, # IPC or TCP
|
| 167 |
+
ctx=self.ctx,
|
| 168 |
+
socket_type=zmq.PULL,
|
| 169 |
+
bind=True,
|
| 170 |
+
) as output_back, make_zmq_socket(
|
| 171 |
+
path=back_publish_address, # IPC or TCP
|
| 172 |
+
ctx=self.ctx,
|
| 173 |
+
socket_type=zmq.XPUB,
|
| 174 |
+
bind=True,
|
| 175 |
+
) as publish_back:
|
| 176 |
+
|
| 177 |
+
# Wait until all engines subscribe.
|
| 178 |
+
for _ in self.engines:
|
| 179 |
+
if publish_back.recv() != b'\x01':
|
| 180 |
+
logger.error(
|
| 181 |
+
"DP Coordinator received unexpected message while "
|
| 182 |
+
"waiting for engines to subscribe")
|
| 183 |
+
return
|
| 184 |
+
# Send ready message to engines.
|
| 185 |
+
publish_back.send(b"READY")
|
| 186 |
+
|
| 187 |
+
logger.info("All engine subscriptions received by DP coordinator")
|
| 188 |
+
|
| 189 |
+
poller = zmq.Poller()
|
| 190 |
+
poller.register(publish_front, zmq.POLLIN)
|
| 191 |
+
poller.register(output_back, zmq.POLLIN)
|
| 192 |
+
last_publish_time = 0
|
| 193 |
+
while True:
|
| 194 |
+
elapsed = int(time.time() * 1000) - last_publish_time
|
| 195 |
+
# Send at stats_update_interval_ms interval if the stats have
|
| 196 |
+
# changed, or otherwise every 5 seconds.
|
| 197 |
+
wait_for = (self.stats_update_interval_ms
|
| 198 |
+
if stats_changed else 5000)
|
| 199 |
+
|
| 200 |
+
# Wait at least 50ms to ensure we've received all stats for
|
| 201 |
+
# the current step.
|
| 202 |
+
min_timeout = 50 if last_step_counts is None else 0
|
| 203 |
+
|
| 204 |
+
events = poller.poll(timeout=max(min_timeout, wait_for -
|
| 205 |
+
elapsed))
|
| 206 |
+
if not events:
|
| 207 |
+
# Poller timeout - publish current stats to front-ends.
|
| 208 |
+
if last_step_counts is not None:
|
| 209 |
+
engine_req_counts_list = last_step_counts
|
| 210 |
+
last_step_counts = None
|
| 211 |
+
else:
|
| 212 |
+
engine_req_counts_list = self._get_engine_counts()
|
| 213 |
+
stats_changed = False
|
| 214 |
+
|
| 215 |
+
to_publish = (engine_req_counts_list, current_wave,
|
| 216 |
+
engines_running)
|
| 217 |
+
publish_front.send(msgspec.msgpack.encode(to_publish))
|
| 218 |
+
last_publish_time = int(time.time() * 1000)
|
| 219 |
+
continue
|
| 220 |
+
|
| 221 |
+
events = dict(events)
|
| 222 |
+
wave_state_changed = False
|
| 223 |
+
|
| 224 |
+
if publish_front in events:
|
| 225 |
+
buffer = publish_front.recv()
|
| 226 |
+
if buffer in (b'\x01', b'\x00'):
|
| 227 |
+
# Ignore subscription messages.
|
| 228 |
+
continue
|
| 229 |
+
|
| 230 |
+
decoded = msgspec.msgpack.decode(buffer)
|
| 231 |
+
if isinstance(decoded, (list, tuple)) and len(
|
| 232 |
+
decoded) == 2 and decoded[0] == "SCALE_ELASTIC_EP":
|
| 233 |
+
# Handle scale up notification
|
| 234 |
+
new_engine_count = decoded[1]
|
| 235 |
+
current_count = len(self.engines)
|
| 236 |
+
if new_engine_count > current_count:
|
| 237 |
+
for _ in range(new_engine_count - current_count):
|
| 238 |
+
self.engines.append(EngineState())
|
| 239 |
+
# NOTE(yongji): handle the case
|
| 240 |
+
# where newly started engines have current_wave = 0
|
| 241 |
+
# if existing engines just finished a wave
|
| 242 |
+
# and engine_running isn't updated yet at
|
| 243 |
+
# CoordinatorProc requests routed to newly started
|
| 244 |
+
# engines may not wake up existing engines, as long
|
| 245 |
+
# as 0 < request.wave < existing engines'
|
| 246 |
+
# current_wave
|
| 247 |
+
# we note that 0 is the wave number for the new
|
| 248 |
+
# engine
|
| 249 |
+
engines_running = False
|
| 250 |
+
logger.info(
|
| 251 |
+
"DPCoordinator scaled up from %s to %s "
|
| 252 |
+
"engines", current_count, new_engine_count)
|
| 253 |
+
else:
|
| 254 |
+
self.engines = self.engines[:new_engine_count]
|
| 255 |
+
logger.info(
|
| 256 |
+
"DPCoordinator scaled down from %s to %s "
|
| 257 |
+
"engines", current_count, new_engine_count)
|
| 258 |
+
continue # Skip normal engine notification processing
|
| 259 |
+
|
| 260 |
+
# We received a message on the front-end XPUB socket,
|
| 261 |
+
# from an API server sending a new request while the
|
| 262 |
+
# engines are paused, so that we can wake the other
|
| 263 |
+
# engines.
|
| 264 |
+
engine_to_exclude, wave = decoded
|
| 265 |
+
if not engines_running:
|
| 266 |
+
if wave < current_wave:
|
| 267 |
+
# If the wave number is stale, ensure the message
|
| 268 |
+
# is handled by all the engines.
|
| 269 |
+
engine_to_exclude = None
|
| 270 |
+
|
| 271 |
+
engines_running = True
|
| 272 |
+
wave_state_changed = True
|
| 273 |
+
self._send_start_wave(publish_back, current_wave,
|
| 274 |
+
engine_to_exclude)
|
| 275 |
+
|
| 276 |
+
if output_back in events:
|
| 277 |
+
# We received a message from one of the engines.
|
| 278 |
+
|
| 279 |
+
buffer = output_back.recv()
|
| 280 |
+
outputs: EngineCoreOutputs = decoder.decode(buffer)
|
| 281 |
+
|
| 282 |
+
assert not outputs.outputs
|
| 283 |
+
assert outputs.utility_output is None
|
| 284 |
+
|
| 285 |
+
eng_index = outputs.engine_index
|
| 286 |
+
scheduler_stats = outputs.scheduler_stats
|
| 287 |
+
if scheduler_stats:
|
| 288 |
+
# 1. Updated request load stats - update our local
|
| 289 |
+
# state with these.
|
| 290 |
+
stats = self.engines[eng_index].request_counts
|
| 291 |
+
stats_step = scheduler_stats.step_counter
|
| 292 |
+
stats_wave = scheduler_stats.current_wave
|
| 293 |
+
if (stats_wave > last_stats_wave
|
| 294 |
+
or stats_wave == last_stats_wave
|
| 295 |
+
and stats_step > last_stats_step):
|
| 296 |
+
if stats_changed:
|
| 297 |
+
last_step_counts = self._get_engine_counts(
|
| 298 |
+
do_copy=True)
|
| 299 |
+
last_stats_step = stats_step
|
| 300 |
+
last_stats_wave = stats_wave
|
| 301 |
+
elif stats_wave != last_stats_wave or (
|
| 302 |
+
stats_step != last_stats_step):
|
| 303 |
+
logger.warning(
|
| 304 |
+
"Received stats for out-of-order "
|
| 305 |
+
"step (%d, %d) from engine %d (expected "
|
| 306 |
+
"> (%d, %d))", stats_wave, stats_step,
|
| 307 |
+
eng_index, last_stats_wave, last_stats_step)
|
| 308 |
+
stats[0] = scheduler_stats.num_waiting_reqs
|
| 309 |
+
stats[1] = scheduler_stats.num_running_reqs
|
| 310 |
+
stats_changed = True
|
| 311 |
+
|
| 312 |
+
if (wave := outputs.wave_complete) is not None:
|
| 313 |
+
# 2. Notification from rank 0 engine that we've
|
| 314 |
+
# moved into the global paused state
|
| 315 |
+
# (engines_running==False).
|
| 316 |
+
if current_wave <= wave:
|
| 317 |
+
new_wave = wave + 1
|
| 318 |
+
logger.debug("Moving DP wave from %d to %d.",
|
| 319 |
+
current_wave, new_wave)
|
| 320 |
+
current_wave = new_wave
|
| 321 |
+
engines_running = False
|
| 322 |
+
wave_state_changed = True
|
| 323 |
+
elif (wave := outputs.start_wave) is not None and (
|
| 324 |
+
wave > current_wave or
|
| 325 |
+
(wave == current_wave and not engines_running)):
|
| 326 |
+
# 3. The engine received request for a non-current wave
|
| 327 |
+
# so we must ensure that other engines progress to the
|
| 328 |
+
# next wave (race condition handling).
|
| 329 |
+
logger.debug(
|
| 330 |
+
"Starting wave %d after notification of "
|
| 331 |
+
"stale wave request from engine.", wave)
|
| 332 |
+
current_wave = wave
|
| 333 |
+
engines_running = True
|
| 334 |
+
wave_state_changed = True
|
| 335 |
+
self._send_start_wave(publish_back, wave, eng_index)
|
| 336 |
+
|
| 337 |
+
if wave_state_changed:
|
| 338 |
+
message = (None, current_wave, engines_running)
|
| 339 |
+
publish_front.send(msgspec.msgpack.encode(message))
|
| 340 |
+
|
| 341 |
+
@staticmethod
|
| 342 |
+
def _send_start_wave(socket: zmq.Socket, wave: int,
|
| 343 |
+
exclude_engine_index: Optional[int]):
|
| 344 |
+
"""Broadcast the START_DP_WAVE message to all the engines.
|
| 345 |
+
It includes the current wave number and index of engine which
|
| 346 |
+
has already received a request with this wave number and so doesn't
|
| 347 |
+
require additional notification.
|
| 348 |
+
"""
|
| 349 |
+
wave_encoded = msgspec.msgpack.encode((wave, exclude_engine_index))
|
| 350 |
+
socket.send_multipart(
|
| 351 |
+
(EngineCoreRequestType.START_DP_WAVE.value, wave_encoded))
|
| 352 |
+
|
| 353 |
+
def _get_engine_counts(self, do_copy=False) -> list[list[int]]:
|
| 354 |
+
"""Return list of [waiting, running] count lists for each engine."""
|
| 355 |
+
if do_copy:
|
| 356 |
+
return [copy.copy(e.request_counts) for e in self.engines]
|
| 357 |
+
return [e.request_counts for e in self.engines]
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/core.py
ADDED
|
@@ -0,0 +1,1216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import os
|
| 4 |
+
import queue
|
| 5 |
+
import signal
|
| 6 |
+
import threading
|
| 7 |
+
import time
|
| 8 |
+
from collections import deque
|
| 9 |
+
from collections.abc import Generator
|
| 10 |
+
from concurrent.futures import Future
|
| 11 |
+
from contextlib import ExitStack, contextmanager
|
| 12 |
+
from inspect import isclass, signature
|
| 13 |
+
from logging import DEBUG
|
| 14 |
+
from typing import Any, Callable, Optional, TypeVar, Union
|
| 15 |
+
|
| 16 |
+
import msgspec
|
| 17 |
+
import zmq
|
| 18 |
+
|
| 19 |
+
from vllm.config import ParallelConfig, VllmConfig
|
| 20 |
+
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
| 21 |
+
from vllm.logger import init_logger
|
| 22 |
+
from vllm.logging_utils.dump_input import dump_engine_exception
|
| 23 |
+
from vllm.lora.request import LoRARequest
|
| 24 |
+
from vllm.multimodal import MULTIMODAL_REGISTRY
|
| 25 |
+
from vllm.tasks import POOLING_TASKS, SupportedTask
|
| 26 |
+
from vllm.transformers_utils.config import (
|
| 27 |
+
maybe_register_config_serialize_by_value)
|
| 28 |
+
from vllm.utils import (decorate_logs, get_hash_fn_by_name, make_zmq_socket,
|
| 29 |
+
resolve_obj_by_qualname, set_process_title)
|
| 30 |
+
from vllm.v1.core.kv_cache_utils import (BlockHash, get_kv_cache_config,
|
| 31 |
+
get_request_block_hasher,
|
| 32 |
+
init_none_hash,
|
| 33 |
+
unify_kv_cache_configs)
|
| 34 |
+
from vllm.v1.core.sched.interface import SchedulerInterface
|
| 35 |
+
from vllm.v1.core.sched.output import SchedulerOutput
|
| 36 |
+
from vllm.v1.core.sched.scheduler import Scheduler as V1Scheduler
|
| 37 |
+
from vllm.v1.engine import (EngineCoreOutputs, EngineCoreRequest,
|
| 38 |
+
EngineCoreRequestType,
|
| 39 |
+
ReconfigureDistributedRequest, ReconfigureRankType,
|
| 40 |
+
UtilityOutput, UtilityResult)
|
| 41 |
+
from vllm.v1.engine.mm_input_cache import MultiModalInputCacheServer
|
| 42 |
+
from vllm.v1.engine.utils import EngineHandshakeMetadata, EngineZmqAddresses
|
| 43 |
+
from vllm.v1.executor.abstract import Executor
|
| 44 |
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
| 45 |
+
from vllm.v1.metrics.stats import SchedulerStats
|
| 46 |
+
from vllm.v1.outputs import ModelRunnerOutput
|
| 47 |
+
from vllm.v1.request import Request, RequestStatus
|
| 48 |
+
from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder
|
| 49 |
+
from vllm.v1.structured_output import StructuredOutputManager
|
| 50 |
+
from vllm.version import __version__ as VLLM_VERSION
|
| 51 |
+
|
| 52 |
+
logger = init_logger(__name__)
|
| 53 |
+
|
| 54 |
+
POLLING_TIMEOUT_S = 2.5
|
| 55 |
+
HANDSHAKE_TIMEOUT_MINS = 5
|
| 56 |
+
|
| 57 |
+
_R = TypeVar('_R') # Return type for collective_rpc
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class EngineCore:
|
| 61 |
+
"""Inner loop of vLLM's Engine."""
|
| 62 |
+
|
| 63 |
+
def __init__(self,
|
| 64 |
+
vllm_config: VllmConfig,
|
| 65 |
+
executor_class: type[Executor],
|
| 66 |
+
log_stats: bool,
|
| 67 |
+
executor_fail_callback: Optional[Callable] = None):
|
| 68 |
+
|
| 69 |
+
# plugins need to be loaded at the engine/scheduler level too
|
| 70 |
+
from vllm.plugins import load_general_plugins
|
| 71 |
+
load_general_plugins()
|
| 72 |
+
|
| 73 |
+
self.vllm_config = vllm_config
|
| 74 |
+
logger.info("Initializing a V1 LLM engine (v%s) with config: %s",
|
| 75 |
+
VLLM_VERSION, vllm_config)
|
| 76 |
+
|
| 77 |
+
self.log_stats = log_stats
|
| 78 |
+
|
| 79 |
+
# Setup Model.
|
| 80 |
+
self.model_executor = executor_class(vllm_config)
|
| 81 |
+
if executor_fail_callback is not None:
|
| 82 |
+
self.model_executor.register_failure_callback(
|
| 83 |
+
executor_fail_callback)
|
| 84 |
+
|
| 85 |
+
self.available_gpu_memory_for_kv_cache = -1
|
| 86 |
+
|
| 87 |
+
# Setup KV Caches and update CacheConfig after profiling.
|
| 88 |
+
num_gpu_blocks, num_cpu_blocks, kv_cache_config = \
|
| 89 |
+
self._initialize_kv_caches(vllm_config)
|
| 90 |
+
|
| 91 |
+
vllm_config.cache_config.num_gpu_blocks = num_gpu_blocks
|
| 92 |
+
vllm_config.cache_config.num_cpu_blocks = num_cpu_blocks
|
| 93 |
+
self.collective_rpc("initialize_cache",
|
| 94 |
+
args=(num_gpu_blocks, num_cpu_blocks))
|
| 95 |
+
|
| 96 |
+
self.structured_output_manager = StructuredOutputManager(vllm_config)
|
| 97 |
+
|
| 98 |
+
# Setup scheduler.
|
| 99 |
+
if isinstance(vllm_config.scheduler_config.scheduler_cls, str):
|
| 100 |
+
Scheduler = resolve_obj_by_qualname(
|
| 101 |
+
vllm_config.scheduler_config.scheduler_cls)
|
| 102 |
+
else:
|
| 103 |
+
Scheduler = vllm_config.scheduler_config.scheduler_cls
|
| 104 |
+
|
| 105 |
+
# This warning can be removed once the V1 Scheduler interface is
|
| 106 |
+
# finalized and we can maintain support for scheduler classes that
|
| 107 |
+
# implement it
|
| 108 |
+
if Scheduler is not V1Scheduler:
|
| 109 |
+
logger.warning(
|
| 110 |
+
"Using configured V1 scheduler class %s. "
|
| 111 |
+
"This scheduler interface is not public and "
|
| 112 |
+
"compatibility may not be maintained.",
|
| 113 |
+
vllm_config.scheduler_config.scheduler_cls)
|
| 114 |
+
|
| 115 |
+
if len(kv_cache_config.kv_cache_groups) == 0:
|
| 116 |
+
# Encoder models without KV cache don't support
|
| 117 |
+
# chunked prefill. But do SSM models?
|
| 118 |
+
logger.info("Disabling chunked prefill for model without KVCache")
|
| 119 |
+
vllm_config.scheduler_config.chunked_prefill_enabled = False
|
| 120 |
+
|
| 121 |
+
self.scheduler: SchedulerInterface = Scheduler(
|
| 122 |
+
vllm_config=vllm_config,
|
| 123 |
+
kv_cache_config=kv_cache_config,
|
| 124 |
+
structured_output_manager=self.structured_output_manager,
|
| 125 |
+
include_finished_set=vllm_config.parallel_config.data_parallel_size
|
| 126 |
+
> 1,
|
| 127 |
+
log_stats=self.log_stats,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
self.mm_input_cache_server = MultiModalInputCacheServer(
|
| 131 |
+
vllm_config.model_config, MULTIMODAL_REGISTRY)
|
| 132 |
+
|
| 133 |
+
# Setup batch queue for pipeline parallelism.
|
| 134 |
+
# Batch queue for scheduled batches. This enables us to asynchronously
|
| 135 |
+
# schedule and execute batches, and is required by pipeline parallelism
|
| 136 |
+
# to eliminate pipeline bubbles.
|
| 137 |
+
self.batch_queue_size = self.model_executor.max_concurrent_batches
|
| 138 |
+
self.batch_queue: Optional[queue.Queue[tuple[Future[ModelRunnerOutput],
|
| 139 |
+
SchedulerOutput]]] = None
|
| 140 |
+
if self.batch_queue_size > 1:
|
| 141 |
+
logger.info("Batch queue is enabled with size %d",
|
| 142 |
+
self.batch_queue_size)
|
| 143 |
+
self.batch_queue = queue.Queue(self.batch_queue_size)
|
| 144 |
+
|
| 145 |
+
self.request_block_hasher: Optional[Callable[[Request],
|
| 146 |
+
list[BlockHash]]] = None
|
| 147 |
+
if (self.vllm_config.cache_config.enable_prefix_caching
|
| 148 |
+
or self.scheduler.get_kv_connector() is not None):
|
| 149 |
+
|
| 150 |
+
block_size = vllm_config.cache_config.block_size
|
| 151 |
+
caching_hash_fn = get_hash_fn_by_name(
|
| 152 |
+
vllm_config.cache_config.prefix_caching_hash_algo)
|
| 153 |
+
init_none_hash(caching_hash_fn)
|
| 154 |
+
|
| 155 |
+
self.request_block_hasher = get_request_block_hasher(
|
| 156 |
+
block_size, caching_hash_fn)
|
| 157 |
+
|
| 158 |
+
def _initialize_kv_caches(
|
| 159 |
+
self, vllm_config: VllmConfig) -> tuple[int, int, KVCacheConfig]:
|
| 160 |
+
start = time.time()
|
| 161 |
+
|
| 162 |
+
# Get all kv cache needed by the model
|
| 163 |
+
kv_cache_specs = self.model_executor.get_kv_cache_specs()
|
| 164 |
+
|
| 165 |
+
has_kv_cache = any(kv_cache_spec for kv_cache_spec in kv_cache_specs)
|
| 166 |
+
if has_kv_cache:
|
| 167 |
+
if os.environ.get("VLLM_ELASTIC_EP_SCALE_UP_LAUNCH") == "1":
|
| 168 |
+
dp_group = getattr(self, "dp_group", None)
|
| 169 |
+
assert dp_group is not None
|
| 170 |
+
self.available_gpu_memory_for_kv_cache = \
|
| 171 |
+
ParallelConfig.sync_kv_cache_memory_size(dp_group, -1)
|
| 172 |
+
available_gpu_memory = [
|
| 173 |
+
self.available_gpu_memory_for_kv_cache
|
| 174 |
+
] * len(kv_cache_specs)
|
| 175 |
+
else:
|
| 176 |
+
# Profiles the peak memory usage of the model to determine how
|
| 177 |
+
# much memory can be allocated for kv cache.
|
| 178 |
+
available_gpu_memory = (
|
| 179 |
+
self.model_executor.determine_available_memory())
|
| 180 |
+
self.available_gpu_memory_for_kv_cache = \
|
| 181 |
+
available_gpu_memory[0]
|
| 182 |
+
else:
|
| 183 |
+
# Attention free models don't need memory for kv cache
|
| 184 |
+
available_gpu_memory = [0] * len(kv_cache_specs)
|
| 185 |
+
|
| 186 |
+
assert len(kv_cache_specs) == len(available_gpu_memory)
|
| 187 |
+
# Get the kv cache tensor size
|
| 188 |
+
kv_cache_configs = [
|
| 189 |
+
get_kv_cache_config(vllm_config, kv_cache_spec_one_worker,
|
| 190 |
+
available_gpu_memory_one_worker)
|
| 191 |
+
for kv_cache_spec_one_worker, available_gpu_memory_one_worker in
|
| 192 |
+
zip(kv_cache_specs, available_gpu_memory)
|
| 193 |
+
]
|
| 194 |
+
|
| 195 |
+
# Since we use a shared centralized controller, we need the
|
| 196 |
+
# `kv_cache_config` to be consistent across all workers to make sure
|
| 197 |
+
# all the memory operators can be applied to all workers.
|
| 198 |
+
unify_kv_cache_configs(kv_cache_configs)
|
| 199 |
+
|
| 200 |
+
# All workers have the same kv_cache_config except layer names, so use
|
| 201 |
+
# an arbitrary one to initialize the scheduler.
|
| 202 |
+
assert all([
|
| 203 |
+
cfg.num_blocks == kv_cache_configs[0].num_blocks
|
| 204 |
+
for cfg in kv_cache_configs
|
| 205 |
+
])
|
| 206 |
+
num_gpu_blocks = kv_cache_configs[0].num_blocks
|
| 207 |
+
num_cpu_blocks = 0
|
| 208 |
+
scheduler_kv_cache_config = kv_cache_configs[0]
|
| 209 |
+
|
| 210 |
+
# Initialize kv cache and warmup the execution
|
| 211 |
+
self.model_executor.initialize_from_config(kv_cache_configs)
|
| 212 |
+
|
| 213 |
+
elapsed = time.time() - start
|
| 214 |
+
logger.info(("init engine (profile, create kv cache, "
|
| 215 |
+
"warmup model) took %.2f seconds"), elapsed)
|
| 216 |
+
return num_gpu_blocks, num_cpu_blocks, scheduler_kv_cache_config
|
| 217 |
+
|
| 218 |
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 219 |
+
return self.model_executor.supported_tasks
|
| 220 |
+
|
| 221 |
+
def add_request(self, request: Request, request_wave: int = 0):
|
| 222 |
+
"""Add request to the scheduler.
|
| 223 |
+
|
| 224 |
+
`request_wave`: indicate which wave of requests this is expected to
|
| 225 |
+
belong to in DP case
|
| 226 |
+
"""
|
| 227 |
+
# Validate the request_id type.
|
| 228 |
+
if not isinstance(request.request_id, str):
|
| 229 |
+
raise TypeError(
|
| 230 |
+
f"request_id must be a string, got {type(request.request_id)}")
|
| 231 |
+
|
| 232 |
+
if pooling_params := request.pooling_params:
|
| 233 |
+
supported_pooling_tasks = [
|
| 234 |
+
task for task in self.get_supported_tasks()
|
| 235 |
+
if task in POOLING_TASKS
|
| 236 |
+
]
|
| 237 |
+
|
| 238 |
+
if pooling_params.task not in supported_pooling_tasks:
|
| 239 |
+
raise ValueError(f"Unsupported task: {pooling_params.task!r} "
|
| 240 |
+
f"Supported tasks: {supported_pooling_tasks}")
|
| 241 |
+
|
| 242 |
+
if request.kv_transfer_params is not None and (
|
| 243 |
+
not self.scheduler.get_kv_connector()):
|
| 244 |
+
logger.warning("Got kv_transfer_params, but no KVConnector found. "
|
| 245 |
+
"Disabling KVTransfer for this request.")
|
| 246 |
+
|
| 247 |
+
self.scheduler.add_request(request)
|
| 248 |
+
|
| 249 |
+
def abort_requests(self, request_ids: list[str]):
|
| 250 |
+
"""Abort requests from the scheduler."""
|
| 251 |
+
|
| 252 |
+
# TODO: The scheduler doesn't really need to know the
|
| 253 |
+
# specific finish reason, TBD whether we propagate that
|
| 254 |
+
# (i.e. client-aborted vs stop criteria met).
|
| 255 |
+
self.scheduler.finish_requests(request_ids,
|
| 256 |
+
RequestStatus.FINISHED_ABORTED)
|
| 257 |
+
|
| 258 |
+
def execute_model_with_error_logging(
|
| 259 |
+
self,
|
| 260 |
+
model_fn: Callable[[SchedulerOutput], ModelRunnerOutput],
|
| 261 |
+
scheduler_output: SchedulerOutput,
|
| 262 |
+
) -> ModelRunnerOutput:
|
| 263 |
+
"""Execute the model and log detailed info on failure."""
|
| 264 |
+
try:
|
| 265 |
+
return model_fn(scheduler_output)
|
| 266 |
+
except Exception as err:
|
| 267 |
+
# We do not want to catch BaseException here since we're only
|
| 268 |
+
# interested in dumping info when the exception is due to an
|
| 269 |
+
# error from execute_model itself.
|
| 270 |
+
|
| 271 |
+
# NOTE: This method is exception-free
|
| 272 |
+
dump_engine_exception(self.vllm_config, scheduler_output,
|
| 273 |
+
self.scheduler.make_stats())
|
| 274 |
+
raise err
|
| 275 |
+
|
| 276 |
+
def step(self) -> tuple[dict[int, EngineCoreOutputs], bool]:
|
| 277 |
+
"""Schedule, execute, and make output.
|
| 278 |
+
|
| 279 |
+
Returns tuple of outputs and a flag indicating whether the model
|
| 280 |
+
was executed.
|
| 281 |
+
"""
|
| 282 |
+
|
| 283 |
+
# Check for any requests remaining in the scheduler - unfinished,
|
| 284 |
+
# or finished and not yet removed from the batch.
|
| 285 |
+
if not self.scheduler.has_requests():
|
| 286 |
+
return {}, False
|
| 287 |
+
scheduler_output = self.scheduler.schedule()
|
| 288 |
+
model_output = self.execute_model_with_error_logging(
|
| 289 |
+
self.model_executor.execute_model, # type: ignore
|
| 290 |
+
scheduler_output)
|
| 291 |
+
engine_core_outputs = self.scheduler.update_from_output(
|
| 292 |
+
scheduler_output, model_output) # type: ignore
|
| 293 |
+
|
| 294 |
+
return (engine_core_outputs,
|
| 295 |
+
scheduler_output.total_num_scheduled_tokens > 0)
|
| 296 |
+
|
| 297 |
+
def step_with_batch_queue(
|
| 298 |
+
self) -> tuple[Optional[dict[int, EngineCoreOutputs]], bool]:
|
| 299 |
+
"""Schedule and execute batches with the batch queue.
|
| 300 |
+
Note that if nothing to output in this step, None is returned.
|
| 301 |
+
|
| 302 |
+
The execution flow is as follows:
|
| 303 |
+
1. Try to schedule a new batch if the batch queue is not full.
|
| 304 |
+
If a new batch is scheduled, directly return an empty engine core
|
| 305 |
+
output. In other words, fulfilling the batch queue has a higher priority
|
| 306 |
+
than getting model outputs.
|
| 307 |
+
2. If there is no new scheduled batch, meaning that the batch queue
|
| 308 |
+
is full or no other requests can be scheduled, we block until the first
|
| 309 |
+
batch in the job queue is finished.
|
| 310 |
+
3. Update the scheduler from the output.
|
| 311 |
+
"""
|
| 312 |
+
assert self.batch_queue is not None
|
| 313 |
+
|
| 314 |
+
engine_core_outputs = None
|
| 315 |
+
scheduler_output = None
|
| 316 |
+
# Try to schedule a new batch if the batch queue is not full, but
|
| 317 |
+
# the scheduler may return an empty batch if all requests are scheduled.
|
| 318 |
+
# Note that this is not blocking.
|
| 319 |
+
if not self.batch_queue.full():
|
| 320 |
+
scheduler_output = self.scheduler.schedule()
|
| 321 |
+
if scheduler_output.total_num_scheduled_tokens > 0:
|
| 322 |
+
future = self.model_executor.execute_model(scheduler_output)
|
| 323 |
+
self.batch_queue.put_nowait(
|
| 324 |
+
(future, scheduler_output)) # type: ignore
|
| 325 |
+
|
| 326 |
+
scheduled_batch = (scheduler_output is not None
|
| 327 |
+
and scheduler_output.total_num_scheduled_tokens > 0)
|
| 328 |
+
|
| 329 |
+
# If no more requests can be scheduled and the job queue is not empty,
|
| 330 |
+
# block until the first batch in the job queue is finished.
|
| 331 |
+
# TODO(comaniac): Ideally we should peek the first batch in the
|
| 332 |
+
# job queue to check if it's finished before scheduling a new batch,
|
| 333 |
+
# but peeking the first element in a queue is not thread-safe,
|
| 334 |
+
# so we need more work.
|
| 335 |
+
if not scheduled_batch and not self.batch_queue.empty():
|
| 336 |
+
future, scheduler_output = self.batch_queue.get_nowait()
|
| 337 |
+
|
| 338 |
+
# Blocking until the first result is available.
|
| 339 |
+
model_output = self.execute_model_with_error_logging(
|
| 340 |
+
lambda _: future.result(), scheduler_output)
|
| 341 |
+
|
| 342 |
+
self.batch_queue.task_done()
|
| 343 |
+
engine_core_outputs = (self.scheduler.update_from_output(
|
| 344 |
+
scheduler_output, model_output))
|
| 345 |
+
|
| 346 |
+
return engine_core_outputs, scheduled_batch
|
| 347 |
+
|
| 348 |
+
def shutdown(self):
|
| 349 |
+
self.structured_output_manager.clear_backend()
|
| 350 |
+
if self.model_executor:
|
| 351 |
+
self.model_executor.shutdown()
|
| 352 |
+
if self.scheduler:
|
| 353 |
+
self.scheduler.shutdown()
|
| 354 |
+
|
| 355 |
+
def profile(self, is_start: bool = True):
|
| 356 |
+
self.model_executor.profile(is_start)
|
| 357 |
+
|
| 358 |
+
def reset_mm_cache(self):
|
| 359 |
+
# NOTE: Since this is mainly for debugging, we don't attempt to
|
| 360 |
+
# re-sync the internal caches (P0 processor, P0 mirror, P1 mirror)
|
| 361 |
+
if self.scheduler.has_unfinished_requests():
|
| 362 |
+
logger.warning("Resetting the multi-modal cache when requests are "
|
| 363 |
+
"in progress may lead to desynced internal caches.")
|
| 364 |
+
|
| 365 |
+
self.mm_input_cache_server.reset()
|
| 366 |
+
|
| 367 |
+
def reset_prefix_cache(self):
|
| 368 |
+
self.scheduler.reset_prefix_cache()
|
| 369 |
+
|
| 370 |
+
def sleep(self, level: int = 1):
|
| 371 |
+
self.model_executor.sleep(level)
|
| 372 |
+
|
| 373 |
+
def wake_up(self, tags: Optional[list[str]] = None):
|
| 374 |
+
self.model_executor.wake_up(tags)
|
| 375 |
+
|
| 376 |
+
def is_sleeping(self) -> bool:
|
| 377 |
+
return self.model_executor.is_sleeping
|
| 378 |
+
|
| 379 |
+
def execute_dummy_batch(self):
|
| 380 |
+
self.model_executor.collective_rpc("execute_dummy_batch")
|
| 381 |
+
|
| 382 |
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 383 |
+
return self.model_executor.add_lora(lora_request)
|
| 384 |
+
|
| 385 |
+
def remove_lora(self, lora_id: int) -> bool:
|
| 386 |
+
return self.model_executor.remove_lora(lora_id)
|
| 387 |
+
|
| 388 |
+
def list_loras(self) -> set[int]:
|
| 389 |
+
return self.model_executor.list_loras()
|
| 390 |
+
|
| 391 |
+
def pin_lora(self, lora_id: int) -> bool:
|
| 392 |
+
return self.model_executor.pin_lora(lora_id)
|
| 393 |
+
|
| 394 |
+
def save_sharded_state(
|
| 395 |
+
self,
|
| 396 |
+
path: str,
|
| 397 |
+
pattern: Optional[str] = None,
|
| 398 |
+
max_size: Optional[int] = None,
|
| 399 |
+
) -> None:
|
| 400 |
+
self.model_executor.save_sharded_state(path=path,
|
| 401 |
+
pattern=pattern,
|
| 402 |
+
max_size=max_size)
|
| 403 |
+
|
| 404 |
+
def collective_rpc(self,
|
| 405 |
+
method: Union[str, Callable[..., _R]],
|
| 406 |
+
timeout: Optional[float] = None,
|
| 407 |
+
args: tuple = (),
|
| 408 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 409 |
+
return self.model_executor.collective_rpc(method, timeout, args,
|
| 410 |
+
kwargs)
|
| 411 |
+
|
| 412 |
+
def save_tensorized_model(
|
| 413 |
+
self,
|
| 414 |
+
tensorizer_config,
|
| 415 |
+
) -> None:
|
| 416 |
+
self.model_executor.save_tensorized_model(
|
| 417 |
+
tensorizer_config=tensorizer_config, )
|
| 418 |
+
|
| 419 |
+
def preprocess_add_request(
|
| 420 |
+
self, request: EngineCoreRequest) -> tuple[Request, int]:
|
| 421 |
+
"""Preprocess the request.
|
| 422 |
+
|
| 423 |
+
This function could be directly used in input processing thread to allow
|
| 424 |
+
request initialization running in parallel with Model forward
|
| 425 |
+
"""
|
| 426 |
+
if request.mm_hashes is not None:
|
| 427 |
+
assert request.mm_kwargs is not None
|
| 428 |
+
|
| 429 |
+
# Note on thread safety: no race condition.
|
| 430 |
+
# `mm_input_cache_server` is reset at the end of LLMEngine init,
|
| 431 |
+
# and will only accessed in the input processing thread afterwards.
|
| 432 |
+
request.mm_kwargs = self.mm_input_cache_server.get_and_update(
|
| 433 |
+
request.mm_kwargs, request.mm_hashes)
|
| 434 |
+
|
| 435 |
+
req = Request.from_engine_core_request(request,
|
| 436 |
+
self.request_block_hasher)
|
| 437 |
+
if req.use_structured_output:
|
| 438 |
+
# Note on thread safety: no race condition.
|
| 439 |
+
# `grammar_init` is only invoked in input processing thread. For
|
| 440 |
+
# `structured_output_manager`, each request is independent and
|
| 441 |
+
# grammar compilation is async. Scheduler always checks grammar
|
| 442 |
+
# compilation status before scheduling request.
|
| 443 |
+
self.structured_output_manager.grammar_init(req)
|
| 444 |
+
return req, request.current_wave
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
class EngineCoreProc(EngineCore):
|
| 448 |
+
"""ZMQ-wrapper for running EngineCore in background process."""
|
| 449 |
+
|
| 450 |
+
ENGINE_CORE_DEAD = b'ENGINE_CORE_DEAD'
|
| 451 |
+
|
| 452 |
+
def __init__(
|
| 453 |
+
self,
|
| 454 |
+
vllm_config: VllmConfig,
|
| 455 |
+
local_client: bool,
|
| 456 |
+
handshake_address: str,
|
| 457 |
+
executor_class: type[Executor],
|
| 458 |
+
log_stats: bool,
|
| 459 |
+
client_handshake_address: Optional[str] = None,
|
| 460 |
+
engine_index: int = 0,
|
| 461 |
+
):
|
| 462 |
+
self.input_queue = queue.Queue[tuple[EngineCoreRequestType, Any]]()
|
| 463 |
+
self.output_queue = queue.Queue[Union[tuple[int, EngineCoreOutputs],
|
| 464 |
+
bytes]]()
|
| 465 |
+
executor_fail_callback = lambda: self.input_queue.put_nowait(
|
| 466 |
+
(EngineCoreRequestType.EXECUTOR_FAILED, b''))
|
| 467 |
+
|
| 468 |
+
self.engine_index = engine_index
|
| 469 |
+
identity = self.engine_index.to_bytes(length=2, byteorder="little")
|
| 470 |
+
self.engines_running = False
|
| 471 |
+
|
| 472 |
+
with self._perform_handshakes(handshake_address, identity,
|
| 473 |
+
local_client, vllm_config,
|
| 474 |
+
client_handshake_address) as addresses:
|
| 475 |
+
self.client_count = len(addresses.outputs)
|
| 476 |
+
|
| 477 |
+
# Set up data parallel environment.
|
| 478 |
+
self.has_coordinator = addresses.coordinator_output is not None
|
| 479 |
+
self.frontend_stats_publish_address = (
|
| 480 |
+
addresses.frontend_stats_publish_address)
|
| 481 |
+
logger.debug("Has DP Coordinator: %s, stats publish address: %s",
|
| 482 |
+
self.has_coordinator,
|
| 483 |
+
self.frontend_stats_publish_address)
|
| 484 |
+
# Only publish request queue stats to coordinator for "internal"
|
| 485 |
+
# and "hybrid" LB modes .
|
| 486 |
+
self.publish_dp_lb_stats = (
|
| 487 |
+
self.has_coordinator
|
| 488 |
+
and not vllm_config.parallel_config.data_parallel_external_lb)
|
| 489 |
+
|
| 490 |
+
self._init_data_parallel(vllm_config)
|
| 491 |
+
|
| 492 |
+
super().__init__(vllm_config, executor_class, log_stats,
|
| 493 |
+
executor_fail_callback)
|
| 494 |
+
|
| 495 |
+
# Background Threads and Queues for IO. These enable us to
|
| 496 |
+
# overlap ZMQ socket IO with GPU since they release the GIL,
|
| 497 |
+
# and to overlap some serialization/deserialization with the
|
| 498 |
+
# model forward pass.
|
| 499 |
+
# Threads handle Socket <-> Queues and core_busy_loop uses Queue.
|
| 500 |
+
ready_event = threading.Event()
|
| 501 |
+
input_thread = threading.Thread(target=self.process_input_sockets,
|
| 502 |
+
args=(addresses.inputs,
|
| 503 |
+
addresses.coordinator_input,
|
| 504 |
+
identity, ready_event),
|
| 505 |
+
daemon=True)
|
| 506 |
+
input_thread.start()
|
| 507 |
+
|
| 508 |
+
self.output_thread = threading.Thread(
|
| 509 |
+
target=self.process_output_sockets,
|
| 510 |
+
args=(addresses.outputs, addresses.coordinator_output,
|
| 511 |
+
self.engine_index),
|
| 512 |
+
daemon=True)
|
| 513 |
+
self.output_thread.start()
|
| 514 |
+
|
| 515 |
+
# Don't complete handshake until DP coordinator ready message is
|
| 516 |
+
# received.
|
| 517 |
+
while not ready_event.wait(timeout=10):
|
| 518 |
+
if not input_thread.is_alive():
|
| 519 |
+
raise RuntimeError(
|
| 520 |
+
"Input socket thread died during startup")
|
| 521 |
+
assert addresses.coordinator_input is not None
|
| 522 |
+
logger.info("Waiting for READY message from DP Coordinator...")
|
| 523 |
+
|
| 524 |
+
self.step_fn = (self.step if self.batch_queue is None else
|
| 525 |
+
self.step_with_batch_queue)
|
| 526 |
+
|
| 527 |
+
@contextmanager
|
| 528 |
+
def _perform_handshakes(
|
| 529 |
+
self,
|
| 530 |
+
handshake_address: str,
|
| 531 |
+
identity: bytes,
|
| 532 |
+
local_client: bool,
|
| 533 |
+
vllm_config: VllmConfig,
|
| 534 |
+
client_handshake_address: Optional[str],
|
| 535 |
+
) -> Generator[EngineZmqAddresses, None, None]:
|
| 536 |
+
"""
|
| 537 |
+
Perform startup handshakes.
|
| 538 |
+
|
| 539 |
+
For DP=1 or offline mode, this is with the colocated front-end process.
|
| 540 |
+
|
| 541 |
+
For DP>1 with internal load-balancing this is with the shared front-end
|
| 542 |
+
process which may reside on a different node.
|
| 543 |
+
|
| 544 |
+
For DP>1 with external or hybrid load-balancing, two handshakes are
|
| 545 |
+
performed:
|
| 546 |
+
- With the rank 0 front-end process which retrieves the
|
| 547 |
+
DP Coordinator ZMQ addresses and DP process group address.
|
| 548 |
+
- With the colocated front-end process which retrieves the
|
| 549 |
+
client input/output socket addresses.
|
| 550 |
+
with the exception of the rank 0 and colocated engines themselves which
|
| 551 |
+
don't require the second handshake.
|
| 552 |
+
|
| 553 |
+
Here, "front-end" process can mean the process containing the engine
|
| 554 |
+
core client (which is the API server process in the case the API
|
| 555 |
+
server is not scaled out), OR the launcher process running the
|
| 556 |
+
run_multi_api_server() function in serve.py.
|
| 557 |
+
"""
|
| 558 |
+
input_ctx = zmq.Context()
|
| 559 |
+
is_local = local_client and client_handshake_address is None
|
| 560 |
+
headless = not local_client
|
| 561 |
+
handshake = self._perform_handshake(input_ctx, handshake_address,
|
| 562 |
+
identity, is_local, headless,
|
| 563 |
+
vllm_config,
|
| 564 |
+
vllm_config.parallel_config)
|
| 565 |
+
if client_handshake_address is None:
|
| 566 |
+
with handshake as addresses:
|
| 567 |
+
yield addresses
|
| 568 |
+
else:
|
| 569 |
+
assert local_client
|
| 570 |
+
local_handshake = self._perform_handshake(
|
| 571 |
+
input_ctx, client_handshake_address, identity, True, False,
|
| 572 |
+
vllm_config)
|
| 573 |
+
with handshake as addresses, local_handshake as client_addresses:
|
| 574 |
+
addresses.inputs = client_addresses.inputs
|
| 575 |
+
addresses.outputs = client_addresses.outputs
|
| 576 |
+
yield addresses
|
| 577 |
+
|
| 578 |
+
# Update config which may have changed from the handshake
|
| 579 |
+
vllm_config.__post_init__()
|
| 580 |
+
|
| 581 |
+
@contextmanager
|
| 582 |
+
def _perform_handshake(
|
| 583 |
+
self,
|
| 584 |
+
ctx: zmq.Context,
|
| 585 |
+
handshake_address: str,
|
| 586 |
+
identity: bytes,
|
| 587 |
+
local_client: bool,
|
| 588 |
+
headless: bool,
|
| 589 |
+
vllm_config: VllmConfig,
|
| 590 |
+
parallel_config_to_update: Optional[ParallelConfig] = None,
|
| 591 |
+
) -> Generator[EngineZmqAddresses, None, None]:
|
| 592 |
+
with make_zmq_socket(ctx,
|
| 593 |
+
handshake_address,
|
| 594 |
+
zmq.DEALER,
|
| 595 |
+
identity=identity,
|
| 596 |
+
linger=5000,
|
| 597 |
+
bind=False) as handshake_socket:
|
| 598 |
+
# Register engine with front-end.
|
| 599 |
+
addresses = self.startup_handshake(handshake_socket, local_client,
|
| 600 |
+
headless,
|
| 601 |
+
parallel_config_to_update)
|
| 602 |
+
yield addresses
|
| 603 |
+
|
| 604 |
+
# Send ready message.
|
| 605 |
+
num_gpu_blocks = vllm_config.cache_config.num_gpu_blocks
|
| 606 |
+
# We pass back the coordinator stats update address here for the
|
| 607 |
+
# external LB case for our colocated front-end to use (coordinator
|
| 608 |
+
# only runs with rank 0).
|
| 609 |
+
dp_stats_address = self.frontend_stats_publish_address
|
| 610 |
+
handshake_socket.send(
|
| 611 |
+
msgspec.msgpack.encode({
|
| 612 |
+
"status": "READY",
|
| 613 |
+
"local": local_client,
|
| 614 |
+
"headless": headless,
|
| 615 |
+
"num_gpu_blocks": num_gpu_blocks,
|
| 616 |
+
"dp_stats_address": dp_stats_address,
|
| 617 |
+
}))
|
| 618 |
+
|
| 619 |
+
@staticmethod
|
| 620 |
+
def startup_handshake(
|
| 621 |
+
handshake_socket: zmq.Socket,
|
| 622 |
+
local_client: bool,
|
| 623 |
+
headless: bool,
|
| 624 |
+
parallel_config: Optional[ParallelConfig] = None,
|
| 625 |
+
) -> EngineZmqAddresses:
|
| 626 |
+
|
| 627 |
+
# Send registration message.
|
| 628 |
+
handshake_socket.send(
|
| 629 |
+
msgspec.msgpack.encode({
|
| 630 |
+
"status": "HELLO",
|
| 631 |
+
"local": local_client,
|
| 632 |
+
"headless": headless,
|
| 633 |
+
}))
|
| 634 |
+
|
| 635 |
+
# Receive initialization message.
|
| 636 |
+
logger.info("Waiting for init message from front-end.")
|
| 637 |
+
if not handshake_socket.poll(timeout=HANDSHAKE_TIMEOUT_MINS * 60_000):
|
| 638 |
+
raise RuntimeError("Did not receive response from front-end "
|
| 639 |
+
f"process within {HANDSHAKE_TIMEOUT_MINS} "
|
| 640 |
+
f"minutes")
|
| 641 |
+
init_bytes = handshake_socket.recv()
|
| 642 |
+
init_message: EngineHandshakeMetadata = msgspec.msgpack.decode(
|
| 643 |
+
init_bytes, type=EngineHandshakeMetadata)
|
| 644 |
+
logger.debug("Received init message: %s", init_message)
|
| 645 |
+
|
| 646 |
+
if parallel_config is not None:
|
| 647 |
+
for key, value in init_message.parallel_config.items():
|
| 648 |
+
setattr(parallel_config, key, value)
|
| 649 |
+
|
| 650 |
+
return init_message.addresses
|
| 651 |
+
|
| 652 |
+
@staticmethod
|
| 653 |
+
def run_engine_core(*args,
|
| 654 |
+
dp_rank: int = 0,
|
| 655 |
+
local_dp_rank: int = 0,
|
| 656 |
+
**kwargs):
|
| 657 |
+
"""Launch EngineCore busy loop in background process."""
|
| 658 |
+
|
| 659 |
+
# Signal handler used for graceful termination.
|
| 660 |
+
# SystemExit exception is only raised once to allow this and worker
|
| 661 |
+
# processes to terminate without error
|
| 662 |
+
shutdown_requested = False
|
| 663 |
+
|
| 664 |
+
# Ensure we can serialize transformer config after spawning
|
| 665 |
+
maybe_register_config_serialize_by_value()
|
| 666 |
+
|
| 667 |
+
def signal_handler(signum, frame):
|
| 668 |
+
nonlocal shutdown_requested
|
| 669 |
+
if not shutdown_requested:
|
| 670 |
+
shutdown_requested = True
|
| 671 |
+
raise SystemExit()
|
| 672 |
+
|
| 673 |
+
# Either SIGTERM or SIGINT will terminate the engine_core
|
| 674 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
| 675 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 676 |
+
|
| 677 |
+
engine_core: Optional[EngineCoreProc] = None
|
| 678 |
+
try:
|
| 679 |
+
parallel_config: ParallelConfig = kwargs[
|
| 680 |
+
"vllm_config"].parallel_config
|
| 681 |
+
if parallel_config.data_parallel_size > 1 or dp_rank > 0:
|
| 682 |
+
set_process_title("DPEngineCore", str(dp_rank))
|
| 683 |
+
decorate_logs()
|
| 684 |
+
# Set data parallel rank for this engine process.
|
| 685 |
+
parallel_config.data_parallel_rank = dp_rank
|
| 686 |
+
parallel_config.data_parallel_rank_local = local_dp_rank
|
| 687 |
+
engine_core = DPEngineCoreProc(*args, **kwargs)
|
| 688 |
+
else:
|
| 689 |
+
set_process_title("EngineCore")
|
| 690 |
+
decorate_logs()
|
| 691 |
+
engine_core = EngineCoreProc(*args, **kwargs)
|
| 692 |
+
|
| 693 |
+
engine_core.run_busy_loop()
|
| 694 |
+
|
| 695 |
+
except SystemExit:
|
| 696 |
+
logger.debug("EngineCore exiting.")
|
| 697 |
+
raise
|
| 698 |
+
except Exception as e:
|
| 699 |
+
if engine_core is None:
|
| 700 |
+
logger.exception("EngineCore failed to start.")
|
| 701 |
+
else:
|
| 702 |
+
logger.exception("EngineCore encountered a fatal error.")
|
| 703 |
+
engine_core._send_engine_dead()
|
| 704 |
+
raise e
|
| 705 |
+
finally:
|
| 706 |
+
if engine_core is not None:
|
| 707 |
+
engine_core.shutdown()
|
| 708 |
+
|
| 709 |
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
| 710 |
+
pass
|
| 711 |
+
|
| 712 |
+
def run_busy_loop(self):
|
| 713 |
+
"""Core busy loop of the EngineCore."""
|
| 714 |
+
|
| 715 |
+
# Loop until process is sent a SIGINT or SIGTERM
|
| 716 |
+
while True:
|
| 717 |
+
# 1) Poll the input queue until there is work to do.
|
| 718 |
+
self._process_input_queue()
|
| 719 |
+
# 2) Step the engine core and return the outputs.
|
| 720 |
+
self._process_engine_step()
|
| 721 |
+
|
| 722 |
+
def _process_input_queue(self):
|
| 723 |
+
"""Exits when an engine step needs to be performed."""
|
| 724 |
+
|
| 725 |
+
waited = False
|
| 726 |
+
while not self.engines_running and not self.scheduler.has_requests():
|
| 727 |
+
if logger.isEnabledFor(DEBUG) and self.input_queue.empty():
|
| 728 |
+
logger.debug("EngineCore waiting for work.")
|
| 729 |
+
waited = True
|
| 730 |
+
req = self.input_queue.get()
|
| 731 |
+
self._handle_client_request(*req)
|
| 732 |
+
|
| 733 |
+
if waited:
|
| 734 |
+
logger.debug("EngineCore loop active.")
|
| 735 |
+
|
| 736 |
+
# Handle any more client requests.
|
| 737 |
+
while not self.input_queue.empty():
|
| 738 |
+
req = self.input_queue.get_nowait()
|
| 739 |
+
self._handle_client_request(*req)
|
| 740 |
+
|
| 741 |
+
def _process_engine_step(self) -> bool:
|
| 742 |
+
"""Called only when there are unfinished local requests."""
|
| 743 |
+
|
| 744 |
+
# Step the engine core.
|
| 745 |
+
outputs, model_executed = self.step_fn()
|
| 746 |
+
# Put EngineCoreOutputs into the output queue.
|
| 747 |
+
for output in (outputs.items() if outputs else ()):
|
| 748 |
+
self.output_queue.put_nowait(output)
|
| 749 |
+
|
| 750 |
+
return model_executed
|
| 751 |
+
|
| 752 |
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
| 753 |
+
request: Any) -> None:
|
| 754 |
+
"""Dispatch request from client."""
|
| 755 |
+
|
| 756 |
+
if request_type == EngineCoreRequestType.ADD:
|
| 757 |
+
req, request_wave = request
|
| 758 |
+
self.add_request(req, request_wave)
|
| 759 |
+
elif request_type == EngineCoreRequestType.ABORT:
|
| 760 |
+
self.abort_requests(request)
|
| 761 |
+
elif request_type == EngineCoreRequestType.UTILITY:
|
| 762 |
+
client_idx, call_id, method_name, args = request
|
| 763 |
+
output = UtilityOutput(call_id)
|
| 764 |
+
try:
|
| 765 |
+
method = getattr(self, method_name)
|
| 766 |
+
result = method(*self._convert_msgspec_args(method, args))
|
| 767 |
+
output.result = UtilityResult(result)
|
| 768 |
+
except BaseException as e:
|
| 769 |
+
logger.exception("Invocation of %s method failed", method_name)
|
| 770 |
+
output.failure_message = (f"Call to {method_name} method"
|
| 771 |
+
f" failed: {str(e)}")
|
| 772 |
+
self.output_queue.put_nowait(
|
| 773 |
+
(client_idx, EngineCoreOutputs(utility_output=output)))
|
| 774 |
+
elif request_type == EngineCoreRequestType.EXECUTOR_FAILED:
|
| 775 |
+
raise RuntimeError("Executor failed.")
|
| 776 |
+
else:
|
| 777 |
+
logger.error("Unrecognized input request type encountered: %s",
|
| 778 |
+
request_type)
|
| 779 |
+
|
| 780 |
+
@staticmethod
|
| 781 |
+
def _convert_msgspec_args(method, args):
|
| 782 |
+
"""If a provided arg type doesn't match corresponding target method
|
| 783 |
+
arg type, try converting to msgspec object."""
|
| 784 |
+
if not args:
|
| 785 |
+
return args
|
| 786 |
+
arg_types = signature(method).parameters.values()
|
| 787 |
+
assert len(args) <= len(arg_types)
|
| 788 |
+
return tuple(
|
| 789 |
+
msgspec.convert(v, type=p.annotation) if isclass(p.annotation)
|
| 790 |
+
and issubclass(p.annotation, msgspec.Struct)
|
| 791 |
+
and not isinstance(v, p.annotation) else v
|
| 792 |
+
for v, p in zip(args, arg_types))
|
| 793 |
+
|
| 794 |
+
def _send_engine_dead(self):
|
| 795 |
+
"""Send EngineDead status to the EngineCoreClient."""
|
| 796 |
+
|
| 797 |
+
# Put ENGINE_CORE_DEAD in the queue.
|
| 798 |
+
self.output_queue.put_nowait(EngineCoreProc.ENGINE_CORE_DEAD)
|
| 799 |
+
|
| 800 |
+
# Wait until msg sent by the daemon before shutdown.
|
| 801 |
+
self.output_thread.join(timeout=5.0)
|
| 802 |
+
if self.output_thread.is_alive():
|
| 803 |
+
logger.fatal("vLLM shutdown signal from EngineCore failed "
|
| 804 |
+
"to send. Please report this issue.")
|
| 805 |
+
|
| 806 |
+
def process_input_sockets(self, input_addresses: list[str],
|
| 807 |
+
coord_input_address: Optional[str],
|
| 808 |
+
identity: bytes, ready_event: threading.Event):
|
| 809 |
+
"""Input socket IO thread."""
|
| 810 |
+
|
| 811 |
+
# Msgpack serialization decoding.
|
| 812 |
+
add_request_decoder = MsgpackDecoder(EngineCoreRequest)
|
| 813 |
+
generic_decoder = MsgpackDecoder()
|
| 814 |
+
|
| 815 |
+
with ExitStack() as stack, zmq.Context() as ctx:
|
| 816 |
+
input_sockets = [
|
| 817 |
+
stack.enter_context(
|
| 818 |
+
make_zmq_socket(ctx,
|
| 819 |
+
input_address,
|
| 820 |
+
zmq.DEALER,
|
| 821 |
+
identity=identity,
|
| 822 |
+
bind=False))
|
| 823 |
+
for input_address in input_addresses
|
| 824 |
+
]
|
| 825 |
+
if coord_input_address is None:
|
| 826 |
+
coord_socket = None
|
| 827 |
+
else:
|
| 828 |
+
coord_socket = stack.enter_context(
|
| 829 |
+
make_zmq_socket(ctx,
|
| 830 |
+
coord_input_address,
|
| 831 |
+
zmq.XSUB,
|
| 832 |
+
identity=identity,
|
| 833 |
+
bind=False))
|
| 834 |
+
# Send subscription message to coordinator.
|
| 835 |
+
coord_socket.send(b'\x01')
|
| 836 |
+
|
| 837 |
+
# Register sockets with poller.
|
| 838 |
+
poller = zmq.Poller()
|
| 839 |
+
for input_socket in input_sockets:
|
| 840 |
+
# Send initial message to each input socket - this is required
|
| 841 |
+
# before the front-end ROUTER socket can send input messages
|
| 842 |
+
# back to us.
|
| 843 |
+
input_socket.send(b'')
|
| 844 |
+
poller.register(input_socket, zmq.POLLIN)
|
| 845 |
+
|
| 846 |
+
if coord_socket is not None:
|
| 847 |
+
# Wait for ready message from coordinator.
|
| 848 |
+
assert coord_socket.recv() == b"READY"
|
| 849 |
+
poller.register(coord_socket, zmq.POLLIN)
|
| 850 |
+
|
| 851 |
+
ready_event.set()
|
| 852 |
+
del ready_event
|
| 853 |
+
while True:
|
| 854 |
+
for input_socket, _ in poller.poll():
|
| 855 |
+
# (RequestType, RequestData)
|
| 856 |
+
type_frame, *data_frames = input_socket.recv_multipart(
|
| 857 |
+
copy=False)
|
| 858 |
+
request_type = EngineCoreRequestType(
|
| 859 |
+
bytes(type_frame.buffer))
|
| 860 |
+
|
| 861 |
+
# Deserialize the request data.
|
| 862 |
+
if request_type == EngineCoreRequestType.ADD:
|
| 863 |
+
request = add_request_decoder.decode(data_frames)
|
| 864 |
+
request = self.preprocess_add_request(request)
|
| 865 |
+
else:
|
| 866 |
+
request = generic_decoder.decode(data_frames)
|
| 867 |
+
|
| 868 |
+
# Push to input queue for core busy loop.
|
| 869 |
+
self.input_queue.put_nowait((request_type, request))
|
| 870 |
+
|
| 871 |
+
def process_output_sockets(self, output_paths: list[str],
|
| 872 |
+
coord_output_path: Optional[str],
|
| 873 |
+
engine_index: int):
|
| 874 |
+
"""Output socket IO thread."""
|
| 875 |
+
|
| 876 |
+
# Msgpack serialization encoding.
|
| 877 |
+
encoder = MsgpackEncoder()
|
| 878 |
+
# Send buffers to reuse.
|
| 879 |
+
reuse_buffers: list[bytearray] = []
|
| 880 |
+
# Keep references to outputs and buffers until zmq is finished
|
| 881 |
+
# with them (outputs may contain tensors/np arrays whose
|
| 882 |
+
# backing buffers were extracted for zero-copy send).
|
| 883 |
+
pending = deque[tuple[zmq.MessageTracker, Any, bytearray]]()
|
| 884 |
+
|
| 885 |
+
# We must set linger to ensure the ENGINE_CORE_DEAD
|
| 886 |
+
# message is sent prior to closing the socket.
|
| 887 |
+
with ExitStack() as stack, zmq.Context() as ctx:
|
| 888 |
+
sockets = [
|
| 889 |
+
stack.enter_context(
|
| 890 |
+
make_zmq_socket(ctx, output_path, zmq.PUSH, linger=4000))
|
| 891 |
+
for output_path in output_paths
|
| 892 |
+
]
|
| 893 |
+
coord_socket = stack.enter_context(
|
| 894 |
+
make_zmq_socket(
|
| 895 |
+
ctx, coord_output_path, zmq.PUSH, bind=False,
|
| 896 |
+
linger=4000)) if coord_output_path is not None else None
|
| 897 |
+
max_reuse_bufs = len(sockets) + 1
|
| 898 |
+
|
| 899 |
+
while True:
|
| 900 |
+
output = self.output_queue.get()
|
| 901 |
+
if output == EngineCoreProc.ENGINE_CORE_DEAD:
|
| 902 |
+
for socket in sockets:
|
| 903 |
+
socket.send(output)
|
| 904 |
+
break
|
| 905 |
+
assert not isinstance(output, bytes)
|
| 906 |
+
client_index, outputs = output
|
| 907 |
+
outputs.engine_index = engine_index
|
| 908 |
+
|
| 909 |
+
if client_index == -1:
|
| 910 |
+
# Don't reuse buffer for coordinator message
|
| 911 |
+
# which will be very small.
|
| 912 |
+
assert coord_socket is not None
|
| 913 |
+
coord_socket.send_multipart(encoder.encode(outputs))
|
| 914 |
+
continue
|
| 915 |
+
|
| 916 |
+
# Reclaim buffers that zmq is finished with.
|
| 917 |
+
while pending and pending[-1][0].done:
|
| 918 |
+
reuse_buffers.append(pending.pop()[2])
|
| 919 |
+
|
| 920 |
+
buffer = reuse_buffers.pop() if reuse_buffers else bytearray()
|
| 921 |
+
buffers = encoder.encode_into(outputs, buffer)
|
| 922 |
+
tracker = sockets[client_index].send_multipart(buffers,
|
| 923 |
+
copy=False,
|
| 924 |
+
track=True)
|
| 925 |
+
if not tracker.done:
|
| 926 |
+
ref = outputs if len(buffers) > 1 else None
|
| 927 |
+
pending.appendleft((tracker, ref, buffer))
|
| 928 |
+
elif len(reuse_buffers) < max_reuse_bufs:
|
| 929 |
+
# Limit the number of buffers to reuse.
|
| 930 |
+
reuse_buffers.append(buffer)
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
class DPEngineCoreProc(EngineCoreProc):
|
| 934 |
+
"""ZMQ-wrapper for running EngineCore in background process
|
| 935 |
+
in a data parallel context."""
|
| 936 |
+
|
| 937 |
+
def __init__(
|
| 938 |
+
self,
|
| 939 |
+
vllm_config: VllmConfig,
|
| 940 |
+
local_client: bool,
|
| 941 |
+
handshake_address: str,
|
| 942 |
+
executor_class: type[Executor],
|
| 943 |
+
log_stats: bool,
|
| 944 |
+
client_handshake_address: Optional[str] = None,
|
| 945 |
+
):
|
| 946 |
+
# Counts forward-passes of the model so that we can synchronize
|
| 947 |
+
# finished with DP peers every N steps.
|
| 948 |
+
self.step_counter = 0
|
| 949 |
+
self.current_wave = 0
|
| 950 |
+
self.last_counts = (0, 0)
|
| 951 |
+
|
| 952 |
+
# Initialize the engine.
|
| 953 |
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
| 954 |
+
super().__init__(vllm_config, local_client, handshake_address,
|
| 955 |
+
executor_class, log_stats, client_handshake_address,
|
| 956 |
+
dp_rank)
|
| 957 |
+
|
| 958 |
+
def _init_data_parallel(self, vllm_config: VllmConfig):
|
| 959 |
+
|
| 960 |
+
# Configure GPUs and stateless process group for data parallel.
|
| 961 |
+
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
| 962 |
+
dp_size = vllm_config.parallel_config.data_parallel_size
|
| 963 |
+
local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local
|
| 964 |
+
|
| 965 |
+
assert dp_size > 1
|
| 966 |
+
assert 0 <= local_dp_rank <= dp_rank < dp_size
|
| 967 |
+
|
| 968 |
+
if vllm_config.kv_transfer_config is not None:
|
| 969 |
+
# modify the engine_id and append the local_dp_rank to it to ensure
|
| 970 |
+
# that the kv_transfer_config is unique for each DP rank.
|
| 971 |
+
vllm_config.kv_transfer_config.engine_id = (
|
| 972 |
+
f"{vllm_config.kv_transfer_config.engine_id}_dp{local_dp_rank}"
|
| 973 |
+
)
|
| 974 |
+
logger.debug("Setting kv_transfer_config.engine_id to %s",
|
| 975 |
+
vllm_config.kv_transfer_config.engine_id)
|
| 976 |
+
|
| 977 |
+
self.dp_rank = dp_rank
|
| 978 |
+
self.dp_group = vllm_config.parallel_config.stateless_init_dp_group()
|
| 979 |
+
|
| 980 |
+
def shutdown(self):
|
| 981 |
+
super().shutdown()
|
| 982 |
+
if dp_group := getattr(self, "dp_group", None):
|
| 983 |
+
stateless_destroy_torch_distributed_process_group(dp_group)
|
| 984 |
+
|
| 985 |
+
def add_request(self, request: Request, request_wave: int = 0):
|
| 986 |
+
if self.has_coordinator and request_wave != self.current_wave:
|
| 987 |
+
if request_wave > self.current_wave:
|
| 988 |
+
self.current_wave = request_wave
|
| 989 |
+
elif not self.engines_running:
|
| 990 |
+
# Request received for an already-completed wave, notify
|
| 991 |
+
# front-end that we need to start the next one.
|
| 992 |
+
self.output_queue.put_nowait(
|
| 993 |
+
(-1, EngineCoreOutputs(start_wave=self.current_wave)))
|
| 994 |
+
|
| 995 |
+
super().add_request(request, request_wave)
|
| 996 |
+
|
| 997 |
+
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
| 998 |
+
request: Any) -> None:
|
| 999 |
+
if request_type == EngineCoreRequestType.START_DP_WAVE:
|
| 1000 |
+
new_wave, exclude_eng_index = request
|
| 1001 |
+
if exclude_eng_index != self.engine_index and (
|
| 1002 |
+
new_wave >= self.current_wave):
|
| 1003 |
+
self.current_wave = new_wave
|
| 1004 |
+
if not self.engines_running:
|
| 1005 |
+
logger.debug("EngineCore starting idle loop for wave %d.",
|
| 1006 |
+
new_wave)
|
| 1007 |
+
self.engines_running = True
|
| 1008 |
+
else:
|
| 1009 |
+
super()._handle_client_request(request_type, request)
|
| 1010 |
+
|
| 1011 |
+
def _maybe_publish_request_counts(self):
|
| 1012 |
+
if not self.publish_dp_lb_stats:
|
| 1013 |
+
return
|
| 1014 |
+
|
| 1015 |
+
# Publish our request counts (if they've changed).
|
| 1016 |
+
counts = self.scheduler.get_request_counts()
|
| 1017 |
+
if counts != self.last_counts:
|
| 1018 |
+
self.last_counts = counts
|
| 1019 |
+
stats = SchedulerStats(*counts,
|
| 1020 |
+
step_counter=self.step_counter,
|
| 1021 |
+
current_wave=self.current_wave)
|
| 1022 |
+
self.output_queue.put_nowait(
|
| 1023 |
+
(-1, EngineCoreOutputs(scheduler_stats=stats)))
|
| 1024 |
+
|
| 1025 |
+
def run_busy_loop(self):
|
| 1026 |
+
"""Core busy loop of the EngineCore for data parallel case."""
|
| 1027 |
+
|
| 1028 |
+
# Loop until process is sent a SIGINT or SIGTERM
|
| 1029 |
+
while True:
|
| 1030 |
+
# 1) Poll the input queue until there is work to do.
|
| 1031 |
+
self._process_input_queue()
|
| 1032 |
+
|
| 1033 |
+
# 2) Step the engine core.
|
| 1034 |
+
executed = self._process_engine_step()
|
| 1035 |
+
self._maybe_publish_request_counts()
|
| 1036 |
+
|
| 1037 |
+
local_unfinished_reqs = self.scheduler.has_unfinished_requests()
|
| 1038 |
+
if not executed:
|
| 1039 |
+
if not local_unfinished_reqs and not self.engines_running:
|
| 1040 |
+
# All engines are idle.
|
| 1041 |
+
continue
|
| 1042 |
+
|
| 1043 |
+
# We are in a running state and so must execute a dummy pass
|
| 1044 |
+
# if the model didn't execute any ready requests.
|
| 1045 |
+
self.execute_dummy_batch()
|
| 1046 |
+
|
| 1047 |
+
# 3) All-reduce operation to determine global unfinished reqs.
|
| 1048 |
+
self.engines_running = self._has_global_unfinished_reqs(
|
| 1049 |
+
local_unfinished_reqs)
|
| 1050 |
+
|
| 1051 |
+
if not self.engines_running:
|
| 1052 |
+
if self.dp_rank == 0 or not self.has_coordinator:
|
| 1053 |
+
# Notify client that we are pausing the loop.
|
| 1054 |
+
logger.debug("Wave %d finished, pausing engine loop.",
|
| 1055 |
+
self.current_wave)
|
| 1056 |
+
# In the coordinator case, dp rank 0 sends updates to the
|
| 1057 |
+
# coordinator. Otherwise (offline spmd case), each rank
|
| 1058 |
+
# sends the update to its colocated front-end process.
|
| 1059 |
+
client_index = -1 if self.has_coordinator else 0
|
| 1060 |
+
self.output_queue.put_nowait(
|
| 1061 |
+
(client_index,
|
| 1062 |
+
EngineCoreOutputs(wave_complete=self.current_wave)))
|
| 1063 |
+
# Increment wave count and reset step counter.
|
| 1064 |
+
self.current_wave += 1
|
| 1065 |
+
self.step_counter = 0
|
| 1066 |
+
|
| 1067 |
+
def _has_global_unfinished_reqs(self, local_unfinished: bool) -> bool:
|
| 1068 |
+
|
| 1069 |
+
# Optimization - only perform finish-sync all-reduce every 32 steps.
|
| 1070 |
+
self.step_counter += 1
|
| 1071 |
+
if self.step_counter % 32 != 0:
|
| 1072 |
+
return True
|
| 1073 |
+
|
| 1074 |
+
return ParallelConfig.has_unfinished_dp(self.dp_group,
|
| 1075 |
+
local_unfinished)
|
| 1076 |
+
|
| 1077 |
+
def reinitialize_distributed(
|
| 1078 |
+
self, reconfig_request: ReconfigureDistributedRequest) -> None:
|
| 1079 |
+
stateless_destroy_torch_distributed_process_group(self.dp_group)
|
| 1080 |
+
self.shutdown()
|
| 1081 |
+
|
| 1082 |
+
parallel_config = self.vllm_config.parallel_config
|
| 1083 |
+
old_dp_size = parallel_config.data_parallel_size
|
| 1084 |
+
parallel_config.data_parallel_size = \
|
| 1085 |
+
reconfig_request.new_data_parallel_size
|
| 1086 |
+
if reconfig_request.new_data_parallel_rank != -1:
|
| 1087 |
+
parallel_config.data_parallel_rank = \
|
| 1088 |
+
reconfig_request.new_data_parallel_rank
|
| 1089 |
+
# local rank specifies device visibility, it should not be changed
|
| 1090 |
+
assert reconfig_request.new_data_parallel_rank_local == \
|
| 1091 |
+
ReconfigureRankType.KEEP_CURRENT_RANK
|
| 1092 |
+
parallel_config.data_parallel_master_ip = \
|
| 1093 |
+
reconfig_request.new_data_parallel_master_ip
|
| 1094 |
+
parallel_config.data_parallel_master_port = \
|
| 1095 |
+
reconfig_request.new_data_parallel_master_port
|
| 1096 |
+
if reconfig_request.new_data_parallel_rank != -2:
|
| 1097 |
+
self.dp_rank = parallel_config.data_parallel_rank
|
| 1098 |
+
self.dp_group = parallel_config.stateless_init_dp_group()
|
| 1099 |
+
reconfig_request.new_data_parallel_master_port = \
|
| 1100 |
+
parallel_config.data_parallel_master_port
|
| 1101 |
+
|
| 1102 |
+
self.model_executor.reinitialize_distributed(reconfig_request)
|
| 1103 |
+
if reconfig_request.new_data_parallel_size > old_dp_size:
|
| 1104 |
+
assert self.available_gpu_memory_for_kv_cache > 0
|
| 1105 |
+
# pass available_gpu_memory_for_kv_cache from existing
|
| 1106 |
+
# engine-cores to new engine-cores so they can directly
|
| 1107 |
+
# use it in _initialize_kv_caches() rather than profiling.
|
| 1108 |
+
ParallelConfig.sync_kv_cache_memory_size(
|
| 1109 |
+
self.dp_group, self.available_gpu_memory_for_kv_cache)
|
| 1110 |
+
# NOTE(yongji): newly joined workers require dummy_run even
|
| 1111 |
+
# CUDA graph is not used
|
| 1112 |
+
self.model_executor.collective_rpc("compile_or_warm_up_model")
|
| 1113 |
+
if reconfig_request.new_data_parallel_rank == \
|
| 1114 |
+
ReconfigureRankType.SHUTDOWN_CURRENT_RANK:
|
| 1115 |
+
self.shutdown()
|
| 1116 |
+
logger.info("DPEngineCoreProc %s shutdown", self.dp_rank)
|
| 1117 |
+
else:
|
| 1118 |
+
logger.info("Distributed environment reinitialized for DP rank %s",
|
| 1119 |
+
self.dp_rank)
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
class DPEngineCoreActor(DPEngineCoreProc):
|
| 1123 |
+
"""
|
| 1124 |
+
Ray actor for running EngineCore in a data parallel context
|
| 1125 |
+
"""
|
| 1126 |
+
|
| 1127 |
+
def __init__(
|
| 1128 |
+
self,
|
| 1129 |
+
vllm_config: VllmConfig,
|
| 1130 |
+
local_client: bool,
|
| 1131 |
+
addresses: EngineZmqAddresses,
|
| 1132 |
+
executor_class: type[Executor],
|
| 1133 |
+
log_stats: bool,
|
| 1134 |
+
dp_rank: int = 0,
|
| 1135 |
+
local_dp_rank: int = 0,
|
| 1136 |
+
):
|
| 1137 |
+
self.addresses = addresses
|
| 1138 |
+
vllm_config.parallel_config.data_parallel_rank = dp_rank
|
| 1139 |
+
vllm_config.parallel_config.data_parallel_rank_local = \
|
| 1140 |
+
local_dp_rank
|
| 1141 |
+
|
| 1142 |
+
# Set CUDA_VISIBLE_DEVICES as early as possible in actor life cycle
|
| 1143 |
+
# NOTE: in MP we set CUDA_VISIBLE_DEVICES at process creation time,
|
| 1144 |
+
# and this cannot be done in the same way for Ray because:
|
| 1145 |
+
# 1) Ray manages life cycle of all ray workers (including
|
| 1146 |
+
# DPEngineCoreActor)
|
| 1147 |
+
# 2) Ray sets CUDA_VISIBLE_DEVICES based on num_gpus configuration
|
| 1148 |
+
# To bypass 2, we need to also set
|
| 1149 |
+
# RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES, but vLLM workers created
|
| 1150 |
+
# thereafter would have CUDA_VISIBLE_DEVICES set, which is sticky:
|
| 1151 |
+
# https://github.com/ray-project/ray/blob/e752fc319ddedd9779a0989b6d3613909bad75c9/python/ray/_private/worker.py#L456 # noqa: E501
|
| 1152 |
+
# This is problematic because when the vLLM worker (a Ray actor)
|
| 1153 |
+
# executes a task, it indexes into the sticky CUDA_VISIBLE_DEVICES
|
| 1154 |
+
# rather than directly using the GPU ID, potentially resulting in
|
| 1155 |
+
# index out of bounds error. See:
|
| 1156 |
+
# https://github.com/ray-project/ray/pull/40461/files#diff-31e8159767361e4bc259b6d9883d9c0d5e5db780fcea4a52ead4ee3ee4a59a78R1860 # noqa: E501
|
| 1157 |
+
# and get_accelerator_ids_for_accelerator_resource() in worker.py
|
| 1158 |
+
# of ray.
|
| 1159 |
+
self._set_cuda_visible_devices(vllm_config, local_dp_rank)
|
| 1160 |
+
|
| 1161 |
+
super().__init__(vllm_config, local_client, "", executor_class,
|
| 1162 |
+
log_stats)
|
| 1163 |
+
|
| 1164 |
+
def _set_cuda_visible_devices(self, vllm_config: VllmConfig,
|
| 1165 |
+
local_dp_rank: int):
|
| 1166 |
+
from vllm.platforms import current_platform
|
| 1167 |
+
device_control_env_var = current_platform.device_control_env_var
|
| 1168 |
+
world_size = vllm_config.parallel_config.world_size
|
| 1169 |
+
# Set CUDA_VISIBLE_DEVICES or equivalent.
|
| 1170 |
+
try:
|
| 1171 |
+
os.environ[device_control_env_var] = ",".join(
|
| 1172 |
+
str(current_platform.device_id_to_physical_device_id(i))
|
| 1173 |
+
for i in range(local_dp_rank *
|
| 1174 |
+
world_size, (local_dp_rank + 1) * world_size))
|
| 1175 |
+
except IndexError as e:
|
| 1176 |
+
raise Exception(
|
| 1177 |
+
f"Error setting {device_control_env_var}: "
|
| 1178 |
+
f"local range: [{local_dp_rank * world_size}, "
|
| 1179 |
+
f"{(local_dp_rank + 1) * world_size}) "
|
| 1180 |
+
f"base value: \"{os.getenv(device_control_env_var)}\"") from e
|
| 1181 |
+
|
| 1182 |
+
@contextmanager
|
| 1183 |
+
def _perform_handshakes(self, handshake_address: str, identity: bytes,
|
| 1184 |
+
local_client: bool, vllm_config: VllmConfig,
|
| 1185 |
+
client_handshake_address: Optional[str]):
|
| 1186 |
+
"""
|
| 1187 |
+
For Ray, we don't need to actually perform handshake.
|
| 1188 |
+
All addresses information is known before the actor creation.
|
| 1189 |
+
Therefore, we simply yield these addresses.
|
| 1190 |
+
"""
|
| 1191 |
+
yield self.addresses
|
| 1192 |
+
|
| 1193 |
+
def wait_for_init(self):
|
| 1194 |
+
"""
|
| 1195 |
+
Wait until the engine core is initialized.
|
| 1196 |
+
|
| 1197 |
+
This is just an empty method. When ray.get() on this method
|
| 1198 |
+
(or any other method of the actor) returns, it is guaranteed
|
| 1199 |
+
that actor creation (i.e., __init__) is complete.
|
| 1200 |
+
"""
|
| 1201 |
+
pass
|
| 1202 |
+
|
| 1203 |
+
def run(self):
|
| 1204 |
+
"""
|
| 1205 |
+
Run the engine core busy loop.
|
| 1206 |
+
"""
|
| 1207 |
+
try:
|
| 1208 |
+
self.run_busy_loop()
|
| 1209 |
+
except SystemExit:
|
| 1210 |
+
logger.debug("EngineCore exiting.")
|
| 1211 |
+
raise
|
| 1212 |
+
except Exception:
|
| 1213 |
+
logger.exception("EngineCore encountered a fatal error.")
|
| 1214 |
+
raise
|
| 1215 |
+
finally:
|
| 1216 |
+
self.shutdown()
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/core_client.py
ADDED
|
@@ -0,0 +1,1344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import asyncio
|
| 4 |
+
import contextlib
|
| 5 |
+
import multiprocessing
|
| 6 |
+
import queue
|
| 7 |
+
import sys
|
| 8 |
+
import uuid
|
| 9 |
+
import weakref
|
| 10 |
+
from abc import ABC, abstractmethod
|
| 11 |
+
from collections import defaultdict, deque
|
| 12 |
+
from collections.abc import Awaitable, Sequence
|
| 13 |
+
from concurrent.futures import Future
|
| 14 |
+
from dataclasses import dataclass
|
| 15 |
+
from threading import Thread
|
| 16 |
+
from typing import Any, Callable, Optional, TypeVar, Union
|
| 17 |
+
|
| 18 |
+
import msgspec.msgpack
|
| 19 |
+
import zmq
|
| 20 |
+
import zmq.asyncio
|
| 21 |
+
|
| 22 |
+
from vllm.config import VllmConfig
|
| 23 |
+
from vllm.logger import init_logger
|
| 24 |
+
from vllm.lora.request import LoRARequest
|
| 25 |
+
from vllm.tasks import SupportedTask
|
| 26 |
+
from vllm.utils import (close_sockets, get_open_port, get_open_zmq_inproc_path,
|
| 27 |
+
in_loop, make_zmq_socket)
|
| 28 |
+
from vllm.v1.engine import (EngineCoreOutputs, EngineCoreRequest,
|
| 29 |
+
EngineCoreRequestType,
|
| 30 |
+
ReconfigureDistributedRequest, ReconfigureRankType,
|
| 31 |
+
UtilityOutput)
|
| 32 |
+
from vllm.v1.engine.coordinator import DPCoordinator
|
| 33 |
+
from vllm.v1.engine.core import EngineCore, EngineCoreProc
|
| 34 |
+
from vllm.v1.engine.exceptions import EngineDeadError
|
| 35 |
+
from vllm.v1.engine.utils import (CoreEngineActorManager,
|
| 36 |
+
CoreEngineProcManager, launch_core_engines)
|
| 37 |
+
from vllm.v1.executor.abstract import Executor
|
| 38 |
+
from vllm.v1.serial_utils import MsgpackDecoder, MsgpackEncoder, bytestr
|
| 39 |
+
|
| 40 |
+
logger = init_logger(__name__)
|
| 41 |
+
|
| 42 |
+
AnyFuture = Union[asyncio.Future[Any], Future[Any]]
|
| 43 |
+
|
| 44 |
+
_R = TypeVar('_R') # Return type for collective_rpc
|
| 45 |
+
|
| 46 |
+
EngineIdentity = bytes
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class EngineCoreClient(ABC):
|
| 50 |
+
"""
|
| 51 |
+
EngineCoreClient: subclasses handle different methods for pushing
|
| 52 |
+
and pulling from the EngineCore for asyncio / multiprocessing.
|
| 53 |
+
|
| 54 |
+
Subclasses:
|
| 55 |
+
* InprocClient: In process EngineCore (for V0-style LLMEngine use)
|
| 56 |
+
* SyncMPClient: ZMQ + background proc EngineCore (for LLM)
|
| 57 |
+
* AsyncMPClient: ZMQ + background proc EngineCore w/ asyncio (for AsyncLLM)
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def make_client(
|
| 62 |
+
multiprocess_mode: bool,
|
| 63 |
+
asyncio_mode: bool,
|
| 64 |
+
vllm_config: VllmConfig,
|
| 65 |
+
executor_class: type[Executor],
|
| 66 |
+
log_stats: bool,
|
| 67 |
+
) -> "EngineCoreClient":
|
| 68 |
+
|
| 69 |
+
# TODO: support this for debugging purposes.
|
| 70 |
+
if asyncio_mode and not multiprocess_mode:
|
| 71 |
+
raise NotImplementedError(
|
| 72 |
+
"Running EngineCore in asyncio without multiprocessing "
|
| 73 |
+
"is not currently supported.")
|
| 74 |
+
|
| 75 |
+
if multiprocess_mode and asyncio_mode:
|
| 76 |
+
return EngineCoreClient.make_async_mp_client(
|
| 77 |
+
vllm_config, executor_class, log_stats)
|
| 78 |
+
|
| 79 |
+
if multiprocess_mode and not asyncio_mode:
|
| 80 |
+
return SyncMPClient(vllm_config, executor_class, log_stats)
|
| 81 |
+
|
| 82 |
+
return InprocClient(vllm_config, executor_class, log_stats)
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def make_async_mp_client(
|
| 86 |
+
vllm_config: VllmConfig,
|
| 87 |
+
executor_class: type[Executor],
|
| 88 |
+
log_stats: bool,
|
| 89 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 90 |
+
client_count: int = 1,
|
| 91 |
+
client_index: int = 0,
|
| 92 |
+
) -> "MPClient":
|
| 93 |
+
parallel_config = vllm_config.parallel_config
|
| 94 |
+
client_args = (vllm_config, executor_class, log_stats,
|
| 95 |
+
client_addresses, client_count, client_index)
|
| 96 |
+
if parallel_config.data_parallel_size > 1:
|
| 97 |
+
if parallel_config.data_parallel_external_lb:
|
| 98 |
+
# External load balancer - client per DP rank.
|
| 99 |
+
return DPAsyncMPClient(*client_args)
|
| 100 |
+
# Internal load balancer - client balances to all DP ranks.
|
| 101 |
+
return DPLBAsyncMPClient(*client_args)
|
| 102 |
+
return AsyncMPClient(*client_args)
|
| 103 |
+
|
| 104 |
+
@abstractmethod
|
| 105 |
+
def shutdown(self):
|
| 106 |
+
...
|
| 107 |
+
|
| 108 |
+
def get_output(self) -> EngineCoreOutputs:
|
| 109 |
+
raise NotImplementedError
|
| 110 |
+
|
| 111 |
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 112 |
+
raise NotImplementedError
|
| 113 |
+
|
| 114 |
+
def add_request(self, request: EngineCoreRequest) -> None:
|
| 115 |
+
raise NotImplementedError
|
| 116 |
+
|
| 117 |
+
def profile(self, is_start: bool = True) -> None:
|
| 118 |
+
raise NotImplementedError
|
| 119 |
+
|
| 120 |
+
def reset_mm_cache(self) -> None:
|
| 121 |
+
raise NotImplementedError
|
| 122 |
+
|
| 123 |
+
def reset_prefix_cache(self) -> None:
|
| 124 |
+
raise NotImplementedError
|
| 125 |
+
|
| 126 |
+
def sleep(self, level: int = 1) -> None:
|
| 127 |
+
raise NotImplementedError
|
| 128 |
+
|
| 129 |
+
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
| 130 |
+
raise NotImplementedError
|
| 131 |
+
|
| 132 |
+
def is_sleeping(self) -> bool:
|
| 133 |
+
raise NotImplementedError
|
| 134 |
+
|
| 135 |
+
def execute_dummy_batch(self) -> None:
|
| 136 |
+
raise NotImplementedError
|
| 137 |
+
|
| 138 |
+
async def execute_dummy_batch_async(self) -> None:
|
| 139 |
+
raise NotImplementedError
|
| 140 |
+
|
| 141 |
+
def abort_requests(self, request_ids: list[str]) -> None:
|
| 142 |
+
raise NotImplementedError
|
| 143 |
+
|
| 144 |
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 145 |
+
raise NotImplementedError
|
| 146 |
+
|
| 147 |
+
def remove_lora(self, lora_id: int) -> bool:
|
| 148 |
+
raise NotImplementedError
|
| 149 |
+
|
| 150 |
+
def list_loras(self) -> set[int]:
|
| 151 |
+
raise NotImplementedError
|
| 152 |
+
|
| 153 |
+
def pin_lora(self, lora_id: int) -> bool:
|
| 154 |
+
raise NotImplementedError
|
| 155 |
+
|
| 156 |
+
def save_sharded_state(self,
|
| 157 |
+
path: str,
|
| 158 |
+
pattern: Optional[str] = None,
|
| 159 |
+
max_size: Optional[int] = None) -> None:
|
| 160 |
+
raise NotImplementedError
|
| 161 |
+
|
| 162 |
+
def collective_rpc(self,
|
| 163 |
+
method: Union[str, Callable[..., _R]],
|
| 164 |
+
timeout: Optional[float] = None,
|
| 165 |
+
args: tuple = (),
|
| 166 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 167 |
+
raise NotImplementedError
|
| 168 |
+
|
| 169 |
+
def dp_engines_running(self) -> bool:
|
| 170 |
+
"""Returns True id data parallel engines are collectively in a
|
| 171 |
+
running state."""
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
async def scale_elastic_ep(self, new_data_parallel_size: int) -> None:
|
| 175 |
+
raise NotImplementedError
|
| 176 |
+
|
| 177 |
+
async def get_output_async(self) -> EngineCoreOutputs:
|
| 178 |
+
raise NotImplementedError
|
| 179 |
+
|
| 180 |
+
async def get_supported_tasks_async(self) -> tuple[SupportedTask, ...]:
|
| 181 |
+
raise NotImplementedError
|
| 182 |
+
|
| 183 |
+
async def add_request_async(self, request: EngineCoreRequest) -> None:
|
| 184 |
+
raise NotImplementedError
|
| 185 |
+
|
| 186 |
+
async def profile_async(self, is_start: bool = True) -> None:
|
| 187 |
+
raise NotImplementedError
|
| 188 |
+
|
| 189 |
+
async def reset_mm_cache_async(self) -> None:
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
async def reset_prefix_cache_async(self) -> None:
|
| 193 |
+
raise NotImplementedError
|
| 194 |
+
|
| 195 |
+
async def sleep_async(self, level: int = 1) -> None:
|
| 196 |
+
raise NotImplementedError
|
| 197 |
+
|
| 198 |
+
async def wake_up_async(self, tags: Optional[list[str]] = None) -> None:
|
| 199 |
+
raise NotImplementedError
|
| 200 |
+
|
| 201 |
+
async def is_sleeping_async(self) -> bool:
|
| 202 |
+
raise NotImplementedError
|
| 203 |
+
|
| 204 |
+
async def abort_requests_async(self, request_ids: list[str]) -> None:
|
| 205 |
+
raise NotImplementedError
|
| 206 |
+
|
| 207 |
+
async def add_lora_async(self, lora_request: LoRARequest) -> bool:
|
| 208 |
+
raise NotImplementedError
|
| 209 |
+
|
| 210 |
+
async def remove_lora_async(self, lora_id: int) -> bool:
|
| 211 |
+
raise NotImplementedError
|
| 212 |
+
|
| 213 |
+
async def list_loras_async(self) -> set[int]:
|
| 214 |
+
raise NotImplementedError
|
| 215 |
+
|
| 216 |
+
async def pin_lora_async(self, lora_id: int) -> bool:
|
| 217 |
+
raise NotImplementedError
|
| 218 |
+
|
| 219 |
+
async def save_sharded_state_async(self,
|
| 220 |
+
path: str,
|
| 221 |
+
pattern: Optional[str] = None,
|
| 222 |
+
max_size: Optional[int] = None) -> None:
|
| 223 |
+
raise NotImplementedError
|
| 224 |
+
|
| 225 |
+
async def collective_rpc_async(
|
| 226 |
+
self,
|
| 227 |
+
method: Union[str, Callable[..., _R]],
|
| 228 |
+
timeout: Optional[float] = None,
|
| 229 |
+
args: tuple = (),
|
| 230 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 231 |
+
raise NotImplementedError
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class InprocClient(EngineCoreClient):
|
| 235 |
+
"""
|
| 236 |
+
InprocClient: client for in-process EngineCore. Intended
|
| 237 |
+
for use in LLMEngine for V0-style add_request() and step()
|
| 238 |
+
EngineCore setup in this process (no busy loop).
|
| 239 |
+
|
| 240 |
+
* pushes EngineCoreRequest directly into the EngineCore
|
| 241 |
+
* pulls EngineCoreOutputs by stepping the EngineCore
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self, *args, **kwargs):
|
| 245 |
+
self.engine_core = EngineCore(*args, **kwargs)
|
| 246 |
+
|
| 247 |
+
def get_output(self) -> EngineCoreOutputs:
|
| 248 |
+
outputs, _ = self.engine_core.step()
|
| 249 |
+
return outputs.get(0) or EngineCoreOutputs()
|
| 250 |
+
|
| 251 |
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 252 |
+
return self.engine_core.get_supported_tasks()
|
| 253 |
+
|
| 254 |
+
def add_request(self, request: EngineCoreRequest) -> None:
|
| 255 |
+
req, request_wave = self.engine_core.preprocess_add_request(request)
|
| 256 |
+
self.engine_core.add_request(req, request_wave)
|
| 257 |
+
|
| 258 |
+
def abort_requests(self, request_ids: list[str]) -> None:
|
| 259 |
+
if len(request_ids) > 0:
|
| 260 |
+
self.engine_core.abort_requests(request_ids)
|
| 261 |
+
|
| 262 |
+
def shutdown(self) -> None:
|
| 263 |
+
self.engine_core.shutdown()
|
| 264 |
+
|
| 265 |
+
def profile(self, is_start: bool = True) -> None:
|
| 266 |
+
self.engine_core.profile(is_start)
|
| 267 |
+
|
| 268 |
+
def reset_mm_cache(self) -> None:
|
| 269 |
+
self.engine_core.reset_mm_cache()
|
| 270 |
+
|
| 271 |
+
def reset_prefix_cache(self) -> None:
|
| 272 |
+
self.engine_core.reset_prefix_cache()
|
| 273 |
+
|
| 274 |
+
def sleep(self, level: int = 1) -> None:
|
| 275 |
+
self.engine_core.sleep(level)
|
| 276 |
+
|
| 277 |
+
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
| 278 |
+
self.engine_core.wake_up(tags)
|
| 279 |
+
|
| 280 |
+
def is_sleeping(self) -> bool:
|
| 281 |
+
return self.engine_core.is_sleeping()
|
| 282 |
+
|
| 283 |
+
def execute_dummy_batch(self) -> None:
|
| 284 |
+
self.engine_core.execute_dummy_batch()
|
| 285 |
+
|
| 286 |
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 287 |
+
return self.engine_core.add_lora(lora_request)
|
| 288 |
+
|
| 289 |
+
def remove_lora(self, lora_id: int) -> bool:
|
| 290 |
+
return self.engine_core.remove_lora(lora_id)
|
| 291 |
+
|
| 292 |
+
def list_loras(self) -> set[int]:
|
| 293 |
+
return self.engine_core.list_loras()
|
| 294 |
+
|
| 295 |
+
def pin_lora(self, lora_id: int) -> bool:
|
| 296 |
+
return self.engine_core.pin_lora(lora_id)
|
| 297 |
+
|
| 298 |
+
def save_sharded_state(self,
|
| 299 |
+
path: str,
|
| 300 |
+
pattern: Optional[str] = None,
|
| 301 |
+
max_size: Optional[int] = None) -> None:
|
| 302 |
+
self.engine_core.save_sharded_state(path, pattern, max_size)
|
| 303 |
+
|
| 304 |
+
def collective_rpc(self,
|
| 305 |
+
method: Union[str, Callable[..., _R]],
|
| 306 |
+
timeout: Optional[float] = None,
|
| 307 |
+
args: tuple = (),
|
| 308 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 309 |
+
return self.engine_core.collective_rpc(method, timeout, args, kwargs)
|
| 310 |
+
|
| 311 |
+
def dp_engines_running(self) -> bool:
|
| 312 |
+
return False
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
@dataclass
|
| 316 |
+
class BackgroundResources:
|
| 317 |
+
"""Used as a finalizer for clean shutdown, avoiding
|
| 318 |
+
circular reference back to the client object."""
|
| 319 |
+
|
| 320 |
+
ctx: zmq.Context
|
| 321 |
+
# If CoreEngineProcManager, it manages local engines;
|
| 322 |
+
# if CoreEngineActorManager, it manages all engines.
|
| 323 |
+
engine_manager: Optional[Union[CoreEngineProcManager,
|
| 324 |
+
CoreEngineActorManager]] = None
|
| 325 |
+
coordinator: Optional[DPCoordinator] = None
|
| 326 |
+
output_socket: Optional[Union[zmq.Socket, zmq.asyncio.Socket]] = None
|
| 327 |
+
input_socket: Optional[Union[zmq.Socket, zmq.asyncio.Socket]] = None
|
| 328 |
+
first_req_send_socket: Optional[zmq.asyncio.Socket] = None
|
| 329 |
+
first_req_rcv_socket: Optional[zmq.asyncio.Socket] = None
|
| 330 |
+
stats_update_socket: Optional[zmq.asyncio.Socket] = None
|
| 331 |
+
output_queue_task: Optional[asyncio.Task] = None
|
| 332 |
+
stats_update_task: Optional[asyncio.Task] = None
|
| 333 |
+
shutdown_path: Optional[str] = None
|
| 334 |
+
|
| 335 |
+
# Set if any of the engines are dead. Here so that the output
|
| 336 |
+
# processing threads can access it without holding a ref to the client.
|
| 337 |
+
engine_dead: bool = False
|
| 338 |
+
|
| 339 |
+
def __call__(self):
|
| 340 |
+
"""Clean up background resources."""
|
| 341 |
+
|
| 342 |
+
self.engine_dead = True
|
| 343 |
+
if self.engine_manager is not None:
|
| 344 |
+
self.engine_manager.close()
|
| 345 |
+
if self.coordinator is not None:
|
| 346 |
+
self.coordinator.close()
|
| 347 |
+
|
| 348 |
+
if isinstance(self.output_socket, zmq.asyncio.Socket):
|
| 349 |
+
# Async case.
|
| 350 |
+
loop = self.output_socket._get_loop()
|
| 351 |
+
asyncio.get_running_loop()
|
| 352 |
+
sockets = (self.output_socket, self.input_socket,
|
| 353 |
+
self.first_req_send_socket, self.first_req_rcv_socket,
|
| 354 |
+
self.stats_update_socket)
|
| 355 |
+
|
| 356 |
+
tasks = (self.output_queue_task, self.stats_update_task)
|
| 357 |
+
|
| 358 |
+
def close_sockets_and_tasks():
|
| 359 |
+
close_sockets(sockets)
|
| 360 |
+
for task in tasks:
|
| 361 |
+
if task is not None and not task.done():
|
| 362 |
+
task.cancel()
|
| 363 |
+
|
| 364 |
+
if in_loop(loop):
|
| 365 |
+
close_sockets_and_tasks()
|
| 366 |
+
elif not loop.is_closed():
|
| 367 |
+
loop.call_soon_threadsafe(close_sockets_and_tasks)
|
| 368 |
+
else:
|
| 369 |
+
# Loop has been closed, try to clean up directly.
|
| 370 |
+
del tasks
|
| 371 |
+
del close_sockets_and_tasks
|
| 372 |
+
close_sockets(sockets)
|
| 373 |
+
del self.output_queue_task
|
| 374 |
+
del self.stats_update_task
|
| 375 |
+
else:
|
| 376 |
+
# Sync case.
|
| 377 |
+
|
| 378 |
+
# ZMQ context termination can hang if the sockets
|
| 379 |
+
# aren't explicitly closed first.
|
| 380 |
+
close_sockets((self.output_socket, self.input_socket))
|
| 381 |
+
|
| 382 |
+
if self.shutdown_path is not None:
|
| 383 |
+
# We must ensure that the sync output socket is
|
| 384 |
+
# closed cleanly in its own thread.
|
| 385 |
+
with self.ctx.socket(zmq.PAIR) as shutdown_sender:
|
| 386 |
+
shutdown_sender.connect(self.shutdown_path)
|
| 387 |
+
# Send shutdown signal.
|
| 388 |
+
shutdown_sender.send(b'')
|
| 389 |
+
|
| 390 |
+
def validate_alive(self, frames: Sequence[zmq.Frame]):
|
| 391 |
+
if len(frames) == 1 and (frames[0].buffer
|
| 392 |
+
== EngineCoreProc.ENGINE_CORE_DEAD):
|
| 393 |
+
self.engine_dead = True
|
| 394 |
+
raise EngineDeadError()
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class MPClient(EngineCoreClient):
|
| 398 |
+
"""
|
| 399 |
+
MPClient: base client for multi-proc EngineCore.
|
| 400 |
+
EngineCore runs in a background process busy loop, getting
|
| 401 |
+
new EngineCoreRequests and returning EngineCoreOutputs
|
| 402 |
+
|
| 403 |
+
* pushes EngineCoreRequests via input_socket
|
| 404 |
+
* pulls EngineCoreOutputs via output_socket
|
| 405 |
+
|
| 406 |
+
* AsyncMPClient subclass for AsyncLLM usage
|
| 407 |
+
* SyncMPClient subclass for LLM usage
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
def __init__(
|
| 411 |
+
self,
|
| 412 |
+
asyncio_mode: bool,
|
| 413 |
+
vllm_config: VllmConfig,
|
| 414 |
+
executor_class: type[Executor],
|
| 415 |
+
log_stats: bool,
|
| 416 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 417 |
+
):
|
| 418 |
+
self.vllm_config = vllm_config
|
| 419 |
+
# Serialization setup.
|
| 420 |
+
self.encoder = MsgpackEncoder()
|
| 421 |
+
self.decoder = MsgpackDecoder(EngineCoreOutputs)
|
| 422 |
+
|
| 423 |
+
# ZMQ setup.
|
| 424 |
+
sync_ctx = zmq.Context(io_threads=2)
|
| 425 |
+
self.ctx = zmq.asyncio.Context(sync_ctx) if asyncio_mode else sync_ctx
|
| 426 |
+
|
| 427 |
+
# This will ensure resources created so far are closed
|
| 428 |
+
# when the client is garbage collected, even if an
|
| 429 |
+
# exception is raised mid-construction.
|
| 430 |
+
self.resources = BackgroundResources(ctx=sync_ctx)
|
| 431 |
+
self._finalizer = weakref.finalize(self, self.resources)
|
| 432 |
+
success = False
|
| 433 |
+
try:
|
| 434 |
+
# State used for data parallel.
|
| 435 |
+
self.engines_running = False
|
| 436 |
+
|
| 437 |
+
self.stats_update_address: Optional[str] = None
|
| 438 |
+
if client_addresses is not None:
|
| 439 |
+
# Engines are managed externally to this client.
|
| 440 |
+
input_address = client_addresses["input_address"]
|
| 441 |
+
output_address = client_addresses["output_address"]
|
| 442 |
+
self.stats_update_address = client_addresses.get(
|
| 443 |
+
"stats_update_address")
|
| 444 |
+
else:
|
| 445 |
+
# Engines are managed by this client.
|
| 446 |
+
with launch_core_engines(vllm_config, executor_class,
|
| 447 |
+
log_stats) as (engine_manager,
|
| 448 |
+
coordinator,
|
| 449 |
+
addresses):
|
| 450 |
+
self.resources.coordinator = coordinator
|
| 451 |
+
self.resources.engine_manager = engine_manager
|
| 452 |
+
|
| 453 |
+
(input_address, ) = addresses.inputs
|
| 454 |
+
(output_address, ) = addresses.outputs
|
| 455 |
+
self.stats_update_address = (
|
| 456 |
+
addresses.frontend_stats_publish_address)
|
| 457 |
+
if coordinator is not None:
|
| 458 |
+
assert self.stats_update_address == (
|
| 459 |
+
coordinator.get_stats_publish_address())
|
| 460 |
+
|
| 461 |
+
# Create input and output sockets.
|
| 462 |
+
self.input_socket = self.resources.input_socket = make_zmq_socket(
|
| 463 |
+
self.ctx, input_address, zmq.ROUTER, bind=True)
|
| 464 |
+
self.resources.output_socket = make_zmq_socket(
|
| 465 |
+
self.ctx, output_address, zmq.PULL)
|
| 466 |
+
|
| 467 |
+
parallel_config = vllm_config.parallel_config
|
| 468 |
+
dp_size = parallel_config.data_parallel_size
|
| 469 |
+
dp_rank = parallel_config.data_parallel_rank
|
| 470 |
+
dp_local_size = parallel_config.data_parallel_size_local
|
| 471 |
+
offline_mode = parallel_config.data_parallel_rank_local is not None
|
| 472 |
+
# Client manages local+remote EngineCores in pure internal LB case.
|
| 473 |
+
# Client manages local EngineCores in hybrid and external LB case.
|
| 474 |
+
local_engines_only = (parallel_config.data_parallel_hybrid_lb
|
| 475 |
+
or parallel_config.data_parallel_external_lb)
|
| 476 |
+
|
| 477 |
+
num_ranks = dp_local_size if local_engines_only else dp_size
|
| 478 |
+
self.engine_ranks_managed = [dp_rank] if offline_mode else list(
|
| 479 |
+
range(dp_rank, dp_rank + num_ranks))
|
| 480 |
+
assert parallel_config.data_parallel_size_local <= len(
|
| 481 |
+
self.engine_ranks_managed)
|
| 482 |
+
|
| 483 |
+
# ZMQ identity of each engine that this client will talk to.
|
| 484 |
+
self.core_engines: list[EngineIdentity] = [
|
| 485 |
+
rank.to_bytes(2, "little")
|
| 486 |
+
for rank in self.engine_ranks_managed
|
| 487 |
+
]
|
| 488 |
+
|
| 489 |
+
# Wait for ready messages from each engine on the input socket.
|
| 490 |
+
identities = set(self.core_engines)
|
| 491 |
+
sync_input_socket = zmq.Socket.shadow(self.input_socket)
|
| 492 |
+
while identities:
|
| 493 |
+
if not sync_input_socket.poll(timeout=600_000):
|
| 494 |
+
raise TimeoutError("Timed out waiting for engines to send"
|
| 495 |
+
"initial message on input socket.")
|
| 496 |
+
identity, _ = sync_input_socket.recv_multipart()
|
| 497 |
+
identities.remove(identity)
|
| 498 |
+
|
| 499 |
+
self.core_engine: EngineIdentity = self.core_engines[0]
|
| 500 |
+
self.utility_results: dict[int, AnyFuture] = {}
|
| 501 |
+
|
| 502 |
+
# Request objects which may contain pytorch-allocated tensors
|
| 503 |
+
# that we need to keep references to until zmq is done with the
|
| 504 |
+
# underlying data.
|
| 505 |
+
self.pending_messages = deque[tuple[zmq.MessageTracker, Any]]()
|
| 506 |
+
|
| 507 |
+
# Start monitoring engine core processes for unexpected failures
|
| 508 |
+
self.start_engine_core_monitor()
|
| 509 |
+
|
| 510 |
+
success = True
|
| 511 |
+
finally:
|
| 512 |
+
if not success:
|
| 513 |
+
self._finalizer()
|
| 514 |
+
|
| 515 |
+
def shutdown(self):
|
| 516 |
+
# Terminate background resources.
|
| 517 |
+
self._finalizer()
|
| 518 |
+
|
| 519 |
+
def _format_exception(self, e: Exception) -> Exception:
|
| 520 |
+
"""If errored, use EngineDeadError so root cause is clear."""
|
| 521 |
+
return EngineDeadError(
|
| 522 |
+
suppress_context=True) if self.resources.engine_dead else e
|
| 523 |
+
|
| 524 |
+
def ensure_alive(self):
|
| 525 |
+
if self.resources.engine_dead:
|
| 526 |
+
raise EngineDeadError()
|
| 527 |
+
|
| 528 |
+
def add_pending_message(self, tracker: zmq.MessageTracker, msg: Any):
|
| 529 |
+
if not tracker.done:
|
| 530 |
+
self.pending_messages.appendleft((tracker, msg))
|
| 531 |
+
|
| 532 |
+
def free_pending_messages(self):
|
| 533 |
+
while self.pending_messages and self.pending_messages[-1][0].done:
|
| 534 |
+
self.pending_messages.pop()
|
| 535 |
+
|
| 536 |
+
def dp_engines_running(self) -> bool:
|
| 537 |
+
return self.engines_running
|
| 538 |
+
|
| 539 |
+
def start_engine_core_monitor(self):
|
| 540 |
+
"""Start a monitor thread for engine core processes."""
|
| 541 |
+
engine_manager = self.resources.engine_manager
|
| 542 |
+
if (engine_manager is None or not hasattr(engine_manager, 'processes')
|
| 543 |
+
or not engine_manager.processes):
|
| 544 |
+
# No engine processes to monitor
|
| 545 |
+
return
|
| 546 |
+
|
| 547 |
+
engine_processes = engine_manager.processes
|
| 548 |
+
self_ref = weakref.ref(self)
|
| 549 |
+
|
| 550 |
+
# Monitor engine core process liveness. If any die unexpectedly,
|
| 551 |
+
# logs an error, shuts down the client and invokes the failure
|
| 552 |
+
# callback to inform the engine.
|
| 553 |
+
def monitor_engine_cores():
|
| 554 |
+
sentinels = [proc.sentinel for proc in engine_processes]
|
| 555 |
+
died = multiprocessing.connection.wait(sentinels)
|
| 556 |
+
_self = self_ref()
|
| 557 |
+
if not _self or _self.resources.engine_dead:
|
| 558 |
+
return
|
| 559 |
+
_self.resources.engine_dead = True
|
| 560 |
+
proc_name = next(proc.name for proc in engine_processes
|
| 561 |
+
if proc.sentinel == died[0])
|
| 562 |
+
logger.error(
|
| 563 |
+
"Engine core proc %s died unexpectedly, "
|
| 564 |
+
"shutting down client.", proc_name)
|
| 565 |
+
_self.shutdown()
|
| 566 |
+
# Note: For MPClient, we don't have a failure callback mechanism
|
| 567 |
+
# like MultiprocExecutor, but we set engine_dead flag which will
|
| 568 |
+
# cause subsequent operations to raise EngineDeadError
|
| 569 |
+
|
| 570 |
+
Thread(target=monitor_engine_cores,
|
| 571 |
+
daemon=True,
|
| 572 |
+
name="MPClientEngineMonitor").start()
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def _process_utility_output(output: UtilityOutput,
|
| 576 |
+
utility_results: dict[int, AnyFuture]):
|
| 577 |
+
"""Set the result from a utility method in the waiting future."""
|
| 578 |
+
future = utility_results.pop(output.call_id)
|
| 579 |
+
failure_message = output.failure_message
|
| 580 |
+
try:
|
| 581 |
+
if failure_message is not None:
|
| 582 |
+
future.set_exception(Exception(failure_message))
|
| 583 |
+
else:
|
| 584 |
+
assert output.result is not None
|
| 585 |
+
future.set_result(output.result.result)
|
| 586 |
+
except asyncio.InvalidStateError:
|
| 587 |
+
# This can happen if the future is cancelled due to the
|
| 588 |
+
# original calling task being cancelled.
|
| 589 |
+
if failure_message is not None:
|
| 590 |
+
logger.error(
|
| 591 |
+
"Cancelled call to utility method failed "
|
| 592 |
+
"with error: %s", failure_message)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
class SyncMPClient(MPClient):
|
| 596 |
+
"""Synchronous client for multi-proc EngineCore."""
|
| 597 |
+
|
| 598 |
+
def __init__(self, vllm_config: VllmConfig, executor_class: type[Executor],
|
| 599 |
+
log_stats: bool):
|
| 600 |
+
super().__init__(
|
| 601 |
+
asyncio_mode=False,
|
| 602 |
+
vllm_config=vllm_config,
|
| 603 |
+
executor_class=executor_class,
|
| 604 |
+
log_stats=log_stats,
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
self.is_dp = self.vllm_config.parallel_config.data_parallel_size > 1
|
| 608 |
+
self.outputs_queue = queue.Queue[Union[EngineCoreOutputs, Exception]]()
|
| 609 |
+
|
| 610 |
+
# Ensure that the outputs socket processing thread does not have
|
| 611 |
+
# a ref to the client which prevents gc.
|
| 612 |
+
ctx = self.ctx
|
| 613 |
+
out_socket = self.resources.output_socket
|
| 614 |
+
decoder = self.decoder
|
| 615 |
+
utility_results = self.utility_results
|
| 616 |
+
outputs_queue = self.outputs_queue
|
| 617 |
+
|
| 618 |
+
shutdown_path = get_open_zmq_inproc_path()
|
| 619 |
+
resources = self.resources
|
| 620 |
+
resources.shutdown_path = shutdown_path
|
| 621 |
+
|
| 622 |
+
def process_outputs_socket():
|
| 623 |
+
assert isinstance(out_socket, zmq.Socket)
|
| 624 |
+
shutdown_socket = ctx.socket(zmq.PAIR)
|
| 625 |
+
try:
|
| 626 |
+
shutdown_socket.bind(shutdown_path)
|
| 627 |
+
poller = zmq.Poller()
|
| 628 |
+
poller.register(shutdown_socket, zmq.POLLIN)
|
| 629 |
+
poller.register(out_socket, zmq.POLLIN)
|
| 630 |
+
while True:
|
| 631 |
+
socks = poller.poll()
|
| 632 |
+
if not socks:
|
| 633 |
+
continue
|
| 634 |
+
if len(socks) == 2 or socks[0][0] == shutdown_socket:
|
| 635 |
+
# shutdown signal, exit thread.
|
| 636 |
+
break
|
| 637 |
+
|
| 638 |
+
frames = out_socket.recv_multipart(copy=False)
|
| 639 |
+
resources.validate_alive(frames)
|
| 640 |
+
outputs: EngineCoreOutputs = decoder.decode(frames)
|
| 641 |
+
if outputs.utility_output:
|
| 642 |
+
_process_utility_output(outputs.utility_output,
|
| 643 |
+
utility_results)
|
| 644 |
+
else:
|
| 645 |
+
outputs_queue.put_nowait(outputs)
|
| 646 |
+
except Exception as e:
|
| 647 |
+
outputs_queue.put_nowait(e)
|
| 648 |
+
finally:
|
| 649 |
+
# Close sockets.
|
| 650 |
+
shutdown_socket.close(linger=0)
|
| 651 |
+
out_socket.close(linger=0)
|
| 652 |
+
|
| 653 |
+
# Process outputs from engine in separate thread.
|
| 654 |
+
self.output_queue_thread = Thread(target=process_outputs_socket,
|
| 655 |
+
name="EngineCoreOutputQueueThread",
|
| 656 |
+
daemon=True)
|
| 657 |
+
self.output_queue_thread.start()
|
| 658 |
+
|
| 659 |
+
# The thread takes on responsibility for closing the socket.
|
| 660 |
+
self.resources.output_socket = None
|
| 661 |
+
|
| 662 |
+
def get_output(self) -> EngineCoreOutputs:
|
| 663 |
+
# If an exception arises in process_outputs_socket task,
|
| 664 |
+
# it is forwarded to the outputs_queue so we can raise it
|
| 665 |
+
# from this (run_output_handler) task to shut down the server.
|
| 666 |
+
outputs = self.outputs_queue.get()
|
| 667 |
+
if isinstance(outputs, Exception):
|
| 668 |
+
raise self._format_exception(outputs) from None
|
| 669 |
+
if outputs.wave_complete is not None:
|
| 670 |
+
self.engines_running = False
|
| 671 |
+
return outputs
|
| 672 |
+
|
| 673 |
+
def _send_input(self, request_type: EngineCoreRequestType, request: Any):
|
| 674 |
+
self.ensure_alive()
|
| 675 |
+
self.free_pending_messages()
|
| 676 |
+
# (Identity, RequestType, SerializedRequest)
|
| 677 |
+
msg = (self.core_engine, request_type.value,
|
| 678 |
+
*self.encoder.encode(request))
|
| 679 |
+
|
| 680 |
+
if len(msg) <= 3:
|
| 681 |
+
# No auxiliary buffers => no tensor backing buffers in request.
|
| 682 |
+
self.input_socket.send_multipart(msg, copy=False)
|
| 683 |
+
return
|
| 684 |
+
|
| 685 |
+
tracker = self.input_socket.send_multipart(msg, copy=False, track=True)
|
| 686 |
+
self.add_pending_message(tracker, request)
|
| 687 |
+
|
| 688 |
+
def call_utility(self, method: str, *args) -> Any:
|
| 689 |
+
call_id = uuid.uuid1().int >> 64
|
| 690 |
+
future: Future[Any] = Future()
|
| 691 |
+
self.utility_results[call_id] = future
|
| 692 |
+
self._send_input(EngineCoreRequestType.UTILITY,
|
| 693 |
+
(0, call_id, method, args))
|
| 694 |
+
|
| 695 |
+
return future.result()
|
| 696 |
+
|
| 697 |
+
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
|
| 698 |
+
return self.call_utility("get_supported_tasks")
|
| 699 |
+
|
| 700 |
+
def add_request(self, request: EngineCoreRequest) -> None:
|
| 701 |
+
if self.is_dp:
|
| 702 |
+
self.engines_running = True
|
| 703 |
+
self._send_input(EngineCoreRequestType.ADD, request)
|
| 704 |
+
|
| 705 |
+
def abort_requests(self, request_ids: list[str]) -> None:
|
| 706 |
+
if request_ids and not self.resources.engine_dead:
|
| 707 |
+
self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
| 708 |
+
|
| 709 |
+
def profile(self, is_start: bool = True) -> None:
|
| 710 |
+
self.call_utility("profile", is_start)
|
| 711 |
+
|
| 712 |
+
def reset_mm_cache(self) -> None:
|
| 713 |
+
self.call_utility("reset_mm_cache")
|
| 714 |
+
|
| 715 |
+
def reset_prefix_cache(self) -> None:
|
| 716 |
+
self.call_utility("reset_prefix_cache")
|
| 717 |
+
|
| 718 |
+
def add_lora(self, lora_request: LoRARequest) -> bool:
|
| 719 |
+
return self.call_utility("add_lora", lora_request)
|
| 720 |
+
|
| 721 |
+
def remove_lora(self, lora_id: int) -> bool:
|
| 722 |
+
return self.call_utility("remove_lora", lora_id)
|
| 723 |
+
|
| 724 |
+
def list_loras(self) -> set[int]:
|
| 725 |
+
return self.call_utility("list_loras")
|
| 726 |
+
|
| 727 |
+
def pin_lora(self, lora_id: int) -> bool:
|
| 728 |
+
return self.call_utility("pin_lora", lora_id)
|
| 729 |
+
|
| 730 |
+
def sleep(self, level: int = 1) -> None:
|
| 731 |
+
self.call_utility("sleep", level)
|
| 732 |
+
|
| 733 |
+
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
| 734 |
+
self.call_utility("wake_up", tags)
|
| 735 |
+
|
| 736 |
+
def is_sleeping(self) -> bool:
|
| 737 |
+
return self.call_utility("is_sleeping")
|
| 738 |
+
|
| 739 |
+
def execute_dummy_batch(self) -> None:
|
| 740 |
+
self.call_utility("execute_dummy_batch")
|
| 741 |
+
|
| 742 |
+
def collective_rpc(self,
|
| 743 |
+
method: Union[str, Callable[..., _R]],
|
| 744 |
+
timeout: Optional[float] = None,
|
| 745 |
+
args: tuple = (),
|
| 746 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 747 |
+
return self.call_utility("collective_rpc", method, timeout, args,
|
| 748 |
+
kwargs)
|
| 749 |
+
|
| 750 |
+
def save_sharded_state(self,
|
| 751 |
+
path: str,
|
| 752 |
+
pattern: Optional[str] = None,
|
| 753 |
+
max_size: Optional[int] = None) -> None:
|
| 754 |
+
self.call_utility("save_sharded_state", path, pattern, max_size)
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
class AsyncMPClient(MPClient):
|
| 758 |
+
"""Asyncio-compatible client for multi-proc EngineCore."""
|
| 759 |
+
|
| 760 |
+
def __init__(self,
|
| 761 |
+
vllm_config: VllmConfig,
|
| 762 |
+
executor_class: type[Executor],
|
| 763 |
+
log_stats: bool,
|
| 764 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 765 |
+
client_count: int = 1,
|
| 766 |
+
client_index: int = 0):
|
| 767 |
+
super().__init__(
|
| 768 |
+
asyncio_mode=True,
|
| 769 |
+
vllm_config=vllm_config,
|
| 770 |
+
executor_class=executor_class,
|
| 771 |
+
log_stats=log_stats,
|
| 772 |
+
client_addresses=client_addresses,
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
self.client_index = client_index
|
| 776 |
+
self.outputs_queue = asyncio.Queue[Union[EngineCoreOutputs,
|
| 777 |
+
Exception]]()
|
| 778 |
+
try:
|
| 779 |
+
# If we are running in an asyncio event loop, start the queue task.
|
| 780 |
+
# Otherwise, it will be started lazily. If it is not started here,
|
| 781 |
+
# we could miss EXECUTOR_FAILED messages from engine core if they
|
| 782 |
+
# occur prior to any requests being sent.
|
| 783 |
+
asyncio.get_running_loop()
|
| 784 |
+
self._ensure_output_queue_task()
|
| 785 |
+
except RuntimeError:
|
| 786 |
+
pass
|
| 787 |
+
|
| 788 |
+
def _ensure_output_queue_task(self):
|
| 789 |
+
resources = self.resources
|
| 790 |
+
if resources.output_queue_task is not None:
|
| 791 |
+
return
|
| 792 |
+
|
| 793 |
+
# Perform IO in separate task to parallelize as much as possible.
|
| 794 |
+
# Avoid task having direct reference back to the client.
|
| 795 |
+
decoder = self.decoder
|
| 796 |
+
utility_results = self.utility_results
|
| 797 |
+
outputs_queue = self.outputs_queue
|
| 798 |
+
output_handler: Optional[Callable[[AsyncMPClient, EngineCoreOutputs],
|
| 799 |
+
Awaitable[None]]] = getattr(
|
| 800 |
+
self.__class__,
|
| 801 |
+
"process_engine_outputs", None)
|
| 802 |
+
_self_ref = weakref.ref(self) if output_handler else None
|
| 803 |
+
output_socket = resources.output_socket
|
| 804 |
+
assert output_socket is not None
|
| 805 |
+
|
| 806 |
+
async def process_outputs_socket():
|
| 807 |
+
try:
|
| 808 |
+
while True:
|
| 809 |
+
frames = await output_socket.recv_multipart(copy=False)
|
| 810 |
+
resources.validate_alive(frames)
|
| 811 |
+
outputs: EngineCoreOutputs = decoder.decode(frames)
|
| 812 |
+
if outputs.utility_output:
|
| 813 |
+
_process_utility_output(outputs.utility_output,
|
| 814 |
+
utility_results)
|
| 815 |
+
continue
|
| 816 |
+
|
| 817 |
+
if output_handler is not None:
|
| 818 |
+
assert _self_ref is not None
|
| 819 |
+
_self = _self_ref()
|
| 820 |
+
if not _self:
|
| 821 |
+
# Client has been garbage collected, abort.
|
| 822 |
+
return
|
| 823 |
+
await output_handler(_self, outputs)
|
| 824 |
+
|
| 825 |
+
if outputs.outputs or outputs.scheduler_stats:
|
| 826 |
+
outputs_queue.put_nowait(outputs)
|
| 827 |
+
except Exception as e:
|
| 828 |
+
outputs_queue.put_nowait(e)
|
| 829 |
+
except asyncio.CancelledError:
|
| 830 |
+
outputs_queue.put_nowait(EngineDeadError())
|
| 831 |
+
|
| 832 |
+
resources.output_queue_task = asyncio.create_task(
|
| 833 |
+
process_outputs_socket(), name="EngineCoreOutputQueueTask")
|
| 834 |
+
|
| 835 |
+
async def get_output_async(self) -> EngineCoreOutputs:
|
| 836 |
+
self._ensure_output_queue_task()
|
| 837 |
+
# If an exception arises in process_outputs_socket task,
|
| 838 |
+
# it is forwarded to the outputs_queue so we can raise it
|
| 839 |
+
# from this (run_output_handler) task to shut down the server.
|
| 840 |
+
assert self.outputs_queue is not None
|
| 841 |
+
outputs = await self.outputs_queue.get()
|
| 842 |
+
if isinstance(outputs, Exception):
|
| 843 |
+
raise self._format_exception(outputs) from None
|
| 844 |
+
return outputs
|
| 845 |
+
|
| 846 |
+
def _send_input(self,
|
| 847 |
+
request_type: EngineCoreRequestType,
|
| 848 |
+
request: Any,
|
| 849 |
+
engine: Optional[EngineIdentity] = None) -> Awaitable[Any]:
|
| 850 |
+
if engine is None:
|
| 851 |
+
engine = self.core_engine
|
| 852 |
+
|
| 853 |
+
message = (request_type.value, *self.encoder.encode(request))
|
| 854 |
+
return self._send_input_message(message, engine, request)
|
| 855 |
+
|
| 856 |
+
def _send_input_message(self, message: tuple[bytestr,
|
| 857 |
+
...], engine: EngineIdentity,
|
| 858 |
+
objects: Any) -> Awaitable[Any]:
|
| 859 |
+
"""
|
| 860 |
+
objects is a reference to retain until zmq is finished with the
|
| 861 |
+
buffers, in case they were extracted from tensors in the request.
|
| 862 |
+
"""
|
| 863 |
+
self.ensure_alive()
|
| 864 |
+
self.free_pending_messages()
|
| 865 |
+
|
| 866 |
+
msg = (engine, ) + message
|
| 867 |
+
if not objects or len(msg) <= 3:
|
| 868 |
+
# No auxiliary buffers => no tensor backing buffers in request.
|
| 869 |
+
return self.input_socket.send_multipart(msg, copy=False)
|
| 870 |
+
|
| 871 |
+
future: asyncio.Future[zmq.MessageTracker]
|
| 872 |
+
future = self.input_socket.send_multipart(msg, copy=False, track=True)
|
| 873 |
+
|
| 874 |
+
def add_pending(f: asyncio.Future[zmq.MessageTracker]):
|
| 875 |
+
with contextlib.suppress(BaseException):
|
| 876 |
+
self.add_pending_message(f.result(), objects)
|
| 877 |
+
|
| 878 |
+
future.add_done_callback(add_pending)
|
| 879 |
+
return future
|
| 880 |
+
|
| 881 |
+
async def call_utility_async(self, method: str, *args) -> Any:
|
| 882 |
+
return await self._call_utility_async(method,
|
| 883 |
+
*args,
|
| 884 |
+
engine=self.core_engine)
|
| 885 |
+
|
| 886 |
+
async def _call_utility_async(self, method: str, *args,
|
| 887 |
+
engine: EngineIdentity) -> Any:
|
| 888 |
+
call_id = uuid.uuid1().int >> 64
|
| 889 |
+
future = asyncio.get_running_loop().create_future()
|
| 890 |
+
self.utility_results[call_id] = future
|
| 891 |
+
message = (EngineCoreRequestType.UTILITY.value, *self.encoder.encode(
|
| 892 |
+
(self.client_index, call_id, method, args)))
|
| 893 |
+
await self._send_input_message(message, engine, args)
|
| 894 |
+
self._ensure_output_queue_task()
|
| 895 |
+
return await future
|
| 896 |
+
|
| 897 |
+
async def get_supported_tasks_async(self) -> tuple[SupportedTask, ...]:
|
| 898 |
+
return await self.call_utility_async("get_supported_tasks")
|
| 899 |
+
|
| 900 |
+
async def add_request_async(self, request: EngineCoreRequest) -> None:
|
| 901 |
+
request.client_index = self.client_index
|
| 902 |
+
await self._send_input(EngineCoreRequestType.ADD, request)
|
| 903 |
+
self._ensure_output_queue_task()
|
| 904 |
+
|
| 905 |
+
async def abort_requests_async(self, request_ids: list[str]) -> None:
|
| 906 |
+
if request_ids and not self.resources.engine_dead:
|
| 907 |
+
await self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
| 908 |
+
|
| 909 |
+
async def profile_async(self, is_start: bool = True) -> None:
|
| 910 |
+
await self.call_utility_async("profile", is_start)
|
| 911 |
+
|
| 912 |
+
async def reset_mm_cache_async(self) -> None:
|
| 913 |
+
await self.call_utility_async("reset_mm_cache")
|
| 914 |
+
|
| 915 |
+
async def reset_prefix_cache_async(self) -> None:
|
| 916 |
+
await self.call_utility_async("reset_prefix_cache")
|
| 917 |
+
|
| 918 |
+
async def sleep_async(self, level: int = 1) -> None:
|
| 919 |
+
await self.call_utility_async("sleep", level)
|
| 920 |
+
|
| 921 |
+
async def wake_up_async(self, tags: Optional[list[str]] = None) -> None:
|
| 922 |
+
await self.call_utility_async("wake_up", tags)
|
| 923 |
+
|
| 924 |
+
async def is_sleeping_async(self) -> bool:
|
| 925 |
+
return await self.call_utility_async("is_sleeping")
|
| 926 |
+
|
| 927 |
+
async def execute_dummy_batch_async(self) -> None:
|
| 928 |
+
await self.call_utility_async("execute_dummy_batch")
|
| 929 |
+
|
| 930 |
+
async def add_lora_async(self, lora_request: LoRARequest) -> bool:
|
| 931 |
+
return await self.call_utility_async("add_lora", lora_request)
|
| 932 |
+
|
| 933 |
+
async def remove_lora_async(self, lora_id: int) -> bool:
|
| 934 |
+
return await self.call_utility_async("remove_lora", lora_id)
|
| 935 |
+
|
| 936 |
+
async def list_loras_async(self) -> set[int]:
|
| 937 |
+
return await self.call_utility_async("list_loras")
|
| 938 |
+
|
| 939 |
+
async def pin_lora_async(self, lora_id: int) -> bool:
|
| 940 |
+
return await self.call_utility_async("pin_lora", lora_id)
|
| 941 |
+
|
| 942 |
+
async def save_sharded_state_async(self,
|
| 943 |
+
path: str,
|
| 944 |
+
pattern: Optional[str] = None,
|
| 945 |
+
max_size: Optional[int] = None) -> None:
|
| 946 |
+
await self.call_utility_async("save_sharded_state", path, pattern,
|
| 947 |
+
max_size)
|
| 948 |
+
|
| 949 |
+
async def collective_rpc_async(
|
| 950 |
+
self,
|
| 951 |
+
method: Union[str, Callable[..., _R]],
|
| 952 |
+
timeout: Optional[float] = None,
|
| 953 |
+
args: tuple = (),
|
| 954 |
+
kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
|
| 955 |
+
return await self.call_utility_async("collective_rpc", method, timeout,
|
| 956 |
+
args, kwargs)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
class DPAsyncMPClient(AsyncMPClient):
|
| 960 |
+
"""Asyncio-compatible client for multi-proc, multi-engine (data parallel)
|
| 961 |
+
EngineCore. Assumes external load-balancing by default."""
|
| 962 |
+
|
| 963 |
+
def __init__(self,
|
| 964 |
+
vllm_config: VllmConfig,
|
| 965 |
+
executor_class: type[Executor],
|
| 966 |
+
log_stats: bool,
|
| 967 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 968 |
+
client_count: int = 1,
|
| 969 |
+
client_index: int = 0):
|
| 970 |
+
self.current_wave = 0
|
| 971 |
+
|
| 972 |
+
super().__init__(vllm_config, executor_class, log_stats,
|
| 973 |
+
client_addresses, client_count, client_index)
|
| 974 |
+
|
| 975 |
+
# List of [waiting, running] pair per engine.
|
| 976 |
+
# Used only by DPLBAsyncMPClient subclass.
|
| 977 |
+
self.lb_engines: list[list[int]] = [[0, 0] for _ in self.core_engines]
|
| 978 |
+
|
| 979 |
+
self.first_req_sock_addr = get_open_zmq_inproc_path()
|
| 980 |
+
self.first_req_send_socket = self.resources.first_req_send_socket = (
|
| 981 |
+
make_zmq_socket(self.ctx,
|
| 982 |
+
self.first_req_sock_addr,
|
| 983 |
+
zmq.PAIR,
|
| 984 |
+
bind=True))
|
| 985 |
+
try:
|
| 986 |
+
# If we are running in an asyncio event loop, start the stats task.
|
| 987 |
+
# Otherwise, it will be started lazily.
|
| 988 |
+
asyncio.get_running_loop()
|
| 989 |
+
self._ensure_stats_update_task()
|
| 990 |
+
except RuntimeError:
|
| 991 |
+
pass
|
| 992 |
+
|
| 993 |
+
def _ensure_stats_update_task(self):
|
| 994 |
+
resources = self.resources
|
| 995 |
+
if resources.stats_update_task is not None:
|
| 996 |
+
return
|
| 997 |
+
|
| 998 |
+
assert self.stats_update_address is not None
|
| 999 |
+
assert len(self.engine_ranks_managed) > 0
|
| 1000 |
+
# NOTE: running and waiting counts are all global from
|
| 1001 |
+
# the Coordinator include all global EngineCores. This
|
| 1002 |
+
# slice includes just the cores managed by this client.
|
| 1003 |
+
count_slice = slice(self.engine_ranks_managed[0],
|
| 1004 |
+
self.engine_ranks_managed[-1] + 1)
|
| 1005 |
+
|
| 1006 |
+
async def run_engine_stats_update_task():
|
| 1007 |
+
with (make_zmq_socket(self.ctx,
|
| 1008 |
+
self.stats_update_address,
|
| 1009 |
+
zmq.XSUB,
|
| 1010 |
+
linger=0) as socket,
|
| 1011 |
+
make_zmq_socket(self.ctx,
|
| 1012 |
+
self.first_req_sock_addr,
|
| 1013 |
+
zmq.PAIR,
|
| 1014 |
+
bind=False,
|
| 1015 |
+
linger=0) as first_req_rcv_socket):
|
| 1016 |
+
assert isinstance(socket, zmq.asyncio.Socket)
|
| 1017 |
+
assert isinstance(first_req_rcv_socket, zmq.asyncio.Socket)
|
| 1018 |
+
self.resources.stats_update_socket = socket
|
| 1019 |
+
self.resources.first_req_rcv_socket = first_req_rcv_socket
|
| 1020 |
+
# Send subscription message.
|
| 1021 |
+
await socket.send(b'\x01')
|
| 1022 |
+
|
| 1023 |
+
poller = zmq.asyncio.Poller()
|
| 1024 |
+
poller.register(socket, zmq.POLLIN)
|
| 1025 |
+
poller.register(first_req_rcv_socket, zmq.POLLIN)
|
| 1026 |
+
|
| 1027 |
+
while True:
|
| 1028 |
+
events = await poller.poll()
|
| 1029 |
+
if not self.engines_running and len(events) == 2 or (
|
| 1030 |
+
events[0][0] == first_req_rcv_socket):
|
| 1031 |
+
# Check if this is a regular request notification or
|
| 1032 |
+
# scale up notification
|
| 1033 |
+
buf = first_req_rcv_socket.recv(
|
| 1034 |
+
flags=zmq.NOBLOCK).result()
|
| 1035 |
+
|
| 1036 |
+
decoded = msgspec.msgpack.decode(buf)
|
| 1037 |
+
if isinstance(
|
| 1038 |
+
decoded,
|
| 1039 |
+
(list, tuple)) and len(decoded) == 2 and decoded[
|
| 1040 |
+
0] == "SCALE_ELASTIC_EP":
|
| 1041 |
+
# Extract new engine count from the decoded message
|
| 1042 |
+
new_engine_count = decoded[1]
|
| 1043 |
+
# Send scale up notification to coordinator
|
| 1044 |
+
scale_msg = msgspec.msgpack.encode(
|
| 1045 |
+
("SCALE_ELASTIC_EP", new_engine_count))
|
| 1046 |
+
await socket.send(scale_msg)
|
| 1047 |
+
continue
|
| 1048 |
+
|
| 1049 |
+
# we're sending a request while the engines are
|
| 1050 |
+
# paused, so that it can wake the others up
|
| 1051 |
+
# (to run dummy EP loop).
|
| 1052 |
+
assert decoded[0] == "FIRST_REQ"
|
| 1053 |
+
target_eng_index = decoded[1]
|
| 1054 |
+
self.engines_running = True
|
| 1055 |
+
msg = msgspec.msgpack.encode(
|
| 1056 |
+
(target_eng_index, self.current_wave))
|
| 1057 |
+
await socket.send(msg)
|
| 1058 |
+
|
| 1059 |
+
buf = None
|
| 1060 |
+
while True:
|
| 1061 |
+
# Drain all stats events (we only care about latest).
|
| 1062 |
+
future: asyncio.Future[bytes] = socket.recv(
|
| 1063 |
+
flags=zmq.NOBLOCK)
|
| 1064 |
+
if isinstance(future.exception(), zmq.Again):
|
| 1065 |
+
break
|
| 1066 |
+
buf = future.result()
|
| 1067 |
+
if buf is None:
|
| 1068 |
+
continue
|
| 1069 |
+
|
| 1070 |
+
# Update local load-balancing state.
|
| 1071 |
+
counts, wave, running = msgspec.msgpack.decode(buf)
|
| 1072 |
+
self.current_wave = wave
|
| 1073 |
+
self.engines_running = running
|
| 1074 |
+
if counts is not None:
|
| 1075 |
+
sliced_counts = counts[count_slice]
|
| 1076 |
+
self.lb_engines = sliced_counts
|
| 1077 |
+
logger.debug("Received counts: %s (%s)", sliced_counts,
|
| 1078 |
+
count_slice)
|
| 1079 |
+
|
| 1080 |
+
resources.stats_update_task = asyncio.create_task(
|
| 1081 |
+
run_engine_stats_update_task())
|
| 1082 |
+
|
| 1083 |
+
async def add_request_async(self, request: EngineCoreRequest) -> None:
|
| 1084 |
+
self._ensure_stats_update_task()
|
| 1085 |
+
|
| 1086 |
+
request.current_wave = self.current_wave
|
| 1087 |
+
request.client_index = self.client_index
|
| 1088 |
+
|
| 1089 |
+
chosen_engine = self.get_core_engine_for_request(request)
|
| 1090 |
+
to_await = self._send_input(EngineCoreRequestType.ADD, request,
|
| 1091 |
+
chosen_engine)
|
| 1092 |
+
if not self.engines_running:
|
| 1093 |
+
# Notify coordinator that we're sending a request
|
| 1094 |
+
req_msg = msgspec.msgpack.encode(("FIRST_REQ", chosen_engine))
|
| 1095 |
+
await self.first_req_send_socket.send(req_msg)
|
| 1096 |
+
|
| 1097 |
+
await to_await
|
| 1098 |
+
|
| 1099 |
+
self._ensure_output_queue_task()
|
| 1100 |
+
|
| 1101 |
+
def get_core_engine_for_request(self, request: EngineCoreRequest):
|
| 1102 |
+
return self.core_engine
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
class DPLBAsyncMPClient(DPAsyncMPClient):
|
| 1106 |
+
"""Asyncio-compatible client for multi-proc, multi-engine (data parallel)
|
| 1107 |
+
EngineCore. Load-balances between multiple engine processes."""
|
| 1108 |
+
|
| 1109 |
+
def __init__(self,
|
| 1110 |
+
vllm_config: VllmConfig,
|
| 1111 |
+
executor_class: type[Executor],
|
| 1112 |
+
log_stats: bool,
|
| 1113 |
+
client_addresses: Optional[dict[str, str]] = None,
|
| 1114 |
+
client_count: int = 1,
|
| 1115 |
+
client_index: int = 0):
|
| 1116 |
+
|
| 1117 |
+
self.client_count = client_count
|
| 1118 |
+
|
| 1119 |
+
# To route aborts to the correct engine.
|
| 1120 |
+
self.reqs_in_flight: dict[str, EngineIdentity] = {}
|
| 1121 |
+
|
| 1122 |
+
super().__init__(vllm_config, executor_class, log_stats,
|
| 1123 |
+
client_addresses, client_count, client_index)
|
| 1124 |
+
|
| 1125 |
+
assert len(self.core_engines) > 1
|
| 1126 |
+
|
| 1127 |
+
self.eng_start_index = (len(self.core_engines) *
|
| 1128 |
+
self.client_index) // client_count
|
| 1129 |
+
|
| 1130 |
+
def get_core_engine_for_request(
|
| 1131 |
+
self, request: EngineCoreRequest) -> EngineIdentity:
|
| 1132 |
+
# Engines are in rank order.
|
| 1133 |
+
if (eng_index := request.data_parallel_rank) is None:
|
| 1134 |
+
current_counts = self.lb_engines
|
| 1135 |
+
# TODO use P2C alg for larger DP sizes
|
| 1136 |
+
num_engines = len(current_counts)
|
| 1137 |
+
min_score = sys.maxsize
|
| 1138 |
+
eng_index = 0
|
| 1139 |
+
for i in range(num_engines):
|
| 1140 |
+
# Start from client_index to help with balancing when engines
|
| 1141 |
+
# are empty.
|
| 1142 |
+
idx = (self.eng_start_index + i) % num_engines
|
| 1143 |
+
waiting, running = current_counts[idx]
|
| 1144 |
+
score = waiting * 4 + running
|
| 1145 |
+
if score < min_score:
|
| 1146 |
+
min_score = score
|
| 1147 |
+
eng_index = idx
|
| 1148 |
+
# Increment local waiting count for better balancing between stats
|
| 1149 |
+
# updates from the coordinator (which happen every 100ms).
|
| 1150 |
+
current_counts[eng_index][0] += self.client_count
|
| 1151 |
+
|
| 1152 |
+
chosen_engine = self.core_engines[eng_index]
|
| 1153 |
+
# Record which engine is chosen for this request, to handle aborts.
|
| 1154 |
+
self.reqs_in_flight[request.request_id] = chosen_engine
|
| 1155 |
+
return chosen_engine
|
| 1156 |
+
|
| 1157 |
+
async def call_utility_async(self, method: str, *args) -> Any:
|
| 1158 |
+
# Only the result from the first engine is returned.
|
| 1159 |
+
return (await asyncio.gather(*[
|
| 1160 |
+
self._call_utility_async(method, *args, engine=engine)
|
| 1161 |
+
for engine in self.core_engines
|
| 1162 |
+
]))[0]
|
| 1163 |
+
|
| 1164 |
+
@staticmethod
|
| 1165 |
+
async def process_engine_outputs(self: "DPLBAsyncMPClient",
|
| 1166 |
+
outputs: EngineCoreOutputs):
|
| 1167 |
+
if outputs.finished_requests and self.reqs_in_flight:
|
| 1168 |
+
for req_id in outputs.finished_requests:
|
| 1169 |
+
self.reqs_in_flight.pop(req_id, None)
|
| 1170 |
+
|
| 1171 |
+
async def abort_requests_async(self, request_ids: list[str]) -> None:
|
| 1172 |
+
if not request_ids or self.resources.engine_dead:
|
| 1173 |
+
return
|
| 1174 |
+
|
| 1175 |
+
if len(request_ids) == 1:
|
| 1176 |
+
# Fast-path common case.
|
| 1177 |
+
if engine := self.reqs_in_flight.get(request_ids[0]):
|
| 1178 |
+
await self._abort_requests(request_ids, engine)
|
| 1179 |
+
return
|
| 1180 |
+
|
| 1181 |
+
by_engine = defaultdict[EngineIdentity, list[str]](list)
|
| 1182 |
+
for req_id in request_ids:
|
| 1183 |
+
if engine := self.reqs_in_flight.get(req_id):
|
| 1184 |
+
by_engine[engine].append(req_id)
|
| 1185 |
+
for engine, req_ids in by_engine.items():
|
| 1186 |
+
await self._abort_requests(req_ids, engine)
|
| 1187 |
+
|
| 1188 |
+
async def _abort_requests(self, request_ids: list[str],
|
| 1189 |
+
engine: EngineIdentity) -> None:
|
| 1190 |
+
await self._send_input(EngineCoreRequestType.ABORT, request_ids,
|
| 1191 |
+
engine)
|
| 1192 |
+
|
| 1193 |
+
async def _send_reconfig_message(
|
| 1194 |
+
self, reconfig_request: ReconfigureDistributedRequest,
|
| 1195 |
+
engine: EngineIdentity) -> asyncio.Future:
|
| 1196 |
+
"""Send reconfiguration message and return the result future without
|
| 1197 |
+
waiting for completion."""
|
| 1198 |
+
call_id = uuid.uuid1().int >> 64
|
| 1199 |
+
future = asyncio.get_running_loop().create_future()
|
| 1200 |
+
self.utility_results[call_id] = future
|
| 1201 |
+
message = (EngineCoreRequestType.UTILITY.value, *self.encoder.encode(
|
| 1202 |
+
(self.client_index, call_id, "reinitialize_distributed",
|
| 1203 |
+
(reconfig_request, ))))
|
| 1204 |
+
await self._send_input_message(message, engine, reconfig_request)
|
| 1205 |
+
self._ensure_output_queue_task()
|
| 1206 |
+
return future
|
| 1207 |
+
|
| 1208 |
+
async def scale_elastic_ep(self, new_data_parallel_size: int) -> None:
|
| 1209 |
+
"""Scale elastic EP data parallel size"""
|
| 1210 |
+
cur_data_parallel_size = len(self.core_engines)
|
| 1211 |
+
|
| 1212 |
+
assert new_data_parallel_size != cur_data_parallel_size, (
|
| 1213 |
+
f"new_data_parallel_size {new_data_parallel_size} must be "
|
| 1214 |
+
f"different from cur_data_parallel_size {cur_data_parallel_size}")
|
| 1215 |
+
|
| 1216 |
+
assert self.vllm_config.parallel_config.data_parallel_backend == \
|
| 1217 |
+
"ray", ("Only ray DP backend supports scaling elastic EP")
|
| 1218 |
+
|
| 1219 |
+
scale_up = new_data_parallel_size > cur_data_parallel_size
|
| 1220 |
+
|
| 1221 |
+
if scale_up:
|
| 1222 |
+
await self._scale_up_elastic_ep(cur_data_parallel_size,
|
| 1223 |
+
new_data_parallel_size)
|
| 1224 |
+
else:
|
| 1225 |
+
await self._scale_down_elastic_ep(cur_data_parallel_size,
|
| 1226 |
+
new_data_parallel_size)
|
| 1227 |
+
|
| 1228 |
+
async def _scale_up_elastic_ep(self, cur_data_parallel_size: int,
|
| 1229 |
+
new_data_parallel_size: int) -> None:
|
| 1230 |
+
"""Scale up the data parallel size by creating new engine cores
|
| 1231 |
+
and reconfiguring existing ones."""
|
| 1232 |
+
cur_data_parallel_size = len(self.core_engines)
|
| 1233 |
+
|
| 1234 |
+
# Phase 1: Send reconfigure messages to all existing engines and wait
|
| 1235 |
+
# for them to be sent
|
| 1236 |
+
reconfig_futures = []
|
| 1237 |
+
self.vllm_config.parallel_config.data_parallel_master_port = \
|
| 1238 |
+
get_open_port()
|
| 1239 |
+
for engine in self.core_engines:
|
| 1240 |
+
reconfig_request = ReconfigureDistributedRequest(
|
| 1241 |
+
new_data_parallel_size=new_data_parallel_size,
|
| 1242 |
+
new_data_parallel_rank=ReconfigureRankType.KEEP_CURRENT_RANK,
|
| 1243 |
+
new_data_parallel_rank_local=\
|
| 1244 |
+
ReconfigureRankType.KEEP_CURRENT_RANK,
|
| 1245 |
+
new_data_parallel_master_ip=self.vllm_config.parallel_config.
|
| 1246 |
+
data_parallel_master_ip,
|
| 1247 |
+
new_data_parallel_master_port=self.vllm_config.parallel_config.
|
| 1248 |
+
data_parallel_master_port)
|
| 1249 |
+
future = await self._send_reconfig_message(reconfig_request,
|
| 1250 |
+
engine)
|
| 1251 |
+
reconfig_futures.append(future)
|
| 1252 |
+
|
| 1253 |
+
logger.info("All reconfigure messages sent, starting engine creation")
|
| 1254 |
+
|
| 1255 |
+
# Phase 2: Create new engines now that reconfig messages have been sent
|
| 1256 |
+
# self.resources.engine_manager is guaranteed to be
|
| 1257 |
+
# CoreEngineActorManager for RayDPClient
|
| 1258 |
+
assert isinstance(self.resources.engine_manager,
|
| 1259 |
+
CoreEngineActorManager)
|
| 1260 |
+
self.resources.engine_manager.scale_up_elastic_ep(
|
| 1261 |
+
self.vllm_config, new_data_parallel_size)
|
| 1262 |
+
|
| 1263 |
+
# Create new CoreEngine objects for the new engines
|
| 1264 |
+
new_engine_identities = set()
|
| 1265 |
+
for i in range(cur_data_parallel_size, new_data_parallel_size):
|
| 1266 |
+
new_engine = i.to_bytes(2, "little")
|
| 1267 |
+
self.core_engines.append(new_engine)
|
| 1268 |
+
new_engine_identities.add(new_engine)
|
| 1269 |
+
|
| 1270 |
+
# Wait for ready messages from new engines on the input socket
|
| 1271 |
+
sync_input_socket = zmq.Socket.shadow(self.input_socket)
|
| 1272 |
+
while new_engine_identities:
|
| 1273 |
+
if not sync_input_socket.poll(timeout=600_000):
|
| 1274 |
+
raise TimeoutError(
|
| 1275 |
+
"Timed out waiting for new engines to send initial "
|
| 1276 |
+
"message on input socket.")
|
| 1277 |
+
identity, _ = sync_input_socket.recv_multipart()
|
| 1278 |
+
new_engine_identities.discard(identity)
|
| 1279 |
+
|
| 1280 |
+
# Phase 3: Wait for all existing engines to complete reconfiguration
|
| 1281 |
+
logger.info("Waiting for existing engines to complete reconfiguration")
|
| 1282 |
+
await asyncio.gather(*reconfig_futures)
|
| 1283 |
+
|
| 1284 |
+
# Notify coordinator about scale up through existing
|
| 1285 |
+
# stats_update_task connection
|
| 1286 |
+
self._ensure_stats_update_task()
|
| 1287 |
+
scale_up_marker = msgspec.msgpack.encode(
|
| 1288 |
+
("SCALE_ELASTIC_EP", new_data_parallel_size))
|
| 1289 |
+
await self.first_req_send_socket.send(scale_up_marker)
|
| 1290 |
+
|
| 1291 |
+
# Update the parallel config
|
| 1292 |
+
self.vllm_config.parallel_config.data_parallel_size = \
|
| 1293 |
+
new_data_parallel_size
|
| 1294 |
+
logger.info(
|
| 1295 |
+
"[Elastic EP] Scale up completed, new data parallel size: %s",
|
| 1296 |
+
new_data_parallel_size)
|
| 1297 |
+
|
| 1298 |
+
async def _scale_down_elastic_ep(self, cur_data_parallel_size: int,
|
| 1299 |
+
new_data_parallel_size: int) -> None:
|
| 1300 |
+
"""Scale down the data parallel size by shutting down and
|
| 1301 |
+
reconfiguring existing engine cores."""
|
| 1302 |
+
cur_data_parallel_size = len(self.core_engines)
|
| 1303 |
+
|
| 1304 |
+
self.vllm_config.parallel_config.data_parallel_master_port = \
|
| 1305 |
+
get_open_port()
|
| 1306 |
+
|
| 1307 |
+
reconfig_futures = []
|
| 1308 |
+
for cur_dp_rank, engine in enumerate(self.core_engines):
|
| 1309 |
+
reconfig_request = ReconfigureDistributedRequest(
|
| 1310 |
+
new_data_parallel_size=new_data_parallel_size,
|
| 1311 |
+
new_data_parallel_rank=ReconfigureRankType.KEEP_CURRENT_RANK,
|
| 1312 |
+
new_data_parallel_rank_local=\
|
| 1313 |
+
ReconfigureRankType.KEEP_CURRENT_RANK,
|
| 1314 |
+
new_data_parallel_master_ip=self.vllm_config.parallel_config.
|
| 1315 |
+
data_parallel_master_ip,
|
| 1316 |
+
new_data_parallel_master_port=self.vllm_config.parallel_config.
|
| 1317 |
+
data_parallel_master_port)
|
| 1318 |
+
if cur_dp_rank >= new_data_parallel_size:
|
| 1319 |
+
reconfig_request.new_data_parallel_rank = \
|
| 1320 |
+
ReconfigureRankType.SHUTDOWN_CURRENT_RANK
|
| 1321 |
+
future = await self._send_reconfig_message(reconfig_request,
|
| 1322 |
+
engine)
|
| 1323 |
+
reconfig_futures.append(future)
|
| 1324 |
+
|
| 1325 |
+
for _ in range(new_data_parallel_size, cur_data_parallel_size):
|
| 1326 |
+
self.core_engines.pop()
|
| 1327 |
+
|
| 1328 |
+
await asyncio.gather(*reconfig_futures)
|
| 1329 |
+
|
| 1330 |
+
assert isinstance(self.resources.engine_manager,
|
| 1331 |
+
CoreEngineActorManager)
|
| 1332 |
+
self.resources.engine_manager.scale_down_elastic_ep(
|
| 1333 |
+
cur_data_parallel_size, new_data_parallel_size)
|
| 1334 |
+
|
| 1335 |
+
self._ensure_stats_update_task()
|
| 1336 |
+
scale_down_marker = msgspec.msgpack.encode(
|
| 1337 |
+
("SCALE_ELASTIC_EP", new_data_parallel_size))
|
| 1338 |
+
await self.first_req_send_socket.send(scale_down_marker)
|
| 1339 |
+
|
| 1340 |
+
self.vllm_config.parallel_config.data_parallel_size = \
|
| 1341 |
+
new_data_parallel_size
|
| 1342 |
+
logger.info(
|
| 1343 |
+
"[Elastic EP] Scale down completed, new data parallel size: %s",
|
| 1344 |
+
new_data_parallel_size)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/detokenizer.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import tokenizers
|
| 7 |
+
from packaging import version
|
| 8 |
+
from tokenizers import Tokenizer
|
| 9 |
+
from tokenizers.decoders import DecodeStream
|
| 10 |
+
from transformers import PreTrainedTokenizerFast
|
| 11 |
+
|
| 12 |
+
from vllm.engine.output_processor.stop_checker import StopChecker
|
| 13 |
+
from vllm.logger import init_logger
|
| 14 |
+
from vllm.transformers_utils.detokenizer_utils import (
|
| 15 |
+
AnyTokenizer, convert_prompt_ids_to_tokens, detokenize_incrementally)
|
| 16 |
+
from vllm.v1.engine import EngineCoreRequest
|
| 17 |
+
|
| 18 |
+
logger = init_logger(__name__)
|
| 19 |
+
|
| 20 |
+
# Only tokenizers >= 0.21.1 supports DecodeStream used for
|
| 21 |
+
# FastIncrementalDetokenizer.
|
| 22 |
+
USE_FAST_DETOKENIZER = version.parse(
|
| 23 |
+
tokenizers.__version__) >= version.parse("0.21.1")
|
| 24 |
+
|
| 25 |
+
# Error string from https://github.com/huggingface/tokenizers/blob/909fdde2a4ffedd9295206f705eb612be2a91b12/tokenizers/src/tokenizer/mod.rs#L1042
|
| 26 |
+
INVALID_PREFIX_ERR_MSG = "Invalid prefix encountered"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class IncrementalDetokenizer:
|
| 30 |
+
|
| 31 |
+
def __init__(self):
|
| 32 |
+
self.token_ids: list[int] = []
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def output_token_ids(self) -> list[int]:
|
| 36 |
+
return self.token_ids
|
| 37 |
+
|
| 38 |
+
def update(self, new_token_ids: list[int],
|
| 39 |
+
stop_terminated: bool) -> Optional[str]:
|
| 40 |
+
self.token_ids.extend(new_token_ids)
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
def get_next_output_text(self, finished: bool, delta: bool) -> str:
|
| 44 |
+
return ""
|
| 45 |
+
|
| 46 |
+
@classmethod
|
| 47 |
+
def from_new_request(
|
| 48 |
+
cls,
|
| 49 |
+
tokenizer: Optional[AnyTokenizer],
|
| 50 |
+
request: EngineCoreRequest,
|
| 51 |
+
) -> "IncrementalDetokenizer":
|
| 52 |
+
|
| 53 |
+
assert request.sampling_params is not None
|
| 54 |
+
|
| 55 |
+
if tokenizer is None:
|
| 56 |
+
# No tokenizer => skipping detokenization.
|
| 57 |
+
return IncrementalDetokenizer()
|
| 58 |
+
|
| 59 |
+
if USE_FAST_DETOKENIZER and isinstance(tokenizer,
|
| 60 |
+
PreTrainedTokenizerFast):
|
| 61 |
+
# Fast tokenizer => use tokenizers library DecodeStream.
|
| 62 |
+
return FastIncrementalDetokenizer(tokenizer, request)
|
| 63 |
+
|
| 64 |
+
# Fall back to slow python-based incremental detokenization.
|
| 65 |
+
return SlowIncrementalDetokenizer(tokenizer, request)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class BaseIncrementalDetokenizer(IncrementalDetokenizer, ABC):
|
| 69 |
+
|
| 70 |
+
def __init__(self, request: EngineCoreRequest):
|
| 71 |
+
super().__init__()
|
| 72 |
+
|
| 73 |
+
# Stop strings
|
| 74 |
+
params = request.sampling_params
|
| 75 |
+
assert params is not None
|
| 76 |
+
self.stop = stop = params.stop
|
| 77 |
+
self.min_tokens = params.min_tokens
|
| 78 |
+
self.include_stop_str_in_output = params.include_stop_str_in_output
|
| 79 |
+
|
| 80 |
+
# Number of chars to hold back when stop strings are to be excluded
|
| 81 |
+
# from streamed output.
|
| 82 |
+
if stop and not self.include_stop_str_in_output:
|
| 83 |
+
self.stop_buffer_length = max(len(s) for s in stop) - 1
|
| 84 |
+
else:
|
| 85 |
+
self.stop_buffer_length = 0
|
| 86 |
+
self._last_output_text_offset: int = 0
|
| 87 |
+
|
| 88 |
+
# Generation data
|
| 89 |
+
self.output_text = ""
|
| 90 |
+
|
| 91 |
+
def update(self, new_token_ids: list[int],
|
| 92 |
+
stop_terminated: bool) -> Optional[str]:
|
| 93 |
+
"""
|
| 94 |
+
Update RequestState for the request_id by:
|
| 95 |
+
1) Detokenize the new token ids incrementally.
|
| 96 |
+
2) Evaluate stop criteria.
|
| 97 |
+
|
| 98 |
+
Return matched stop string or None.
|
| 99 |
+
"""
|
| 100 |
+
if not new_token_ids:
|
| 101 |
+
# Skip detokenization if no new token ids.
|
| 102 |
+
return None
|
| 103 |
+
|
| 104 |
+
if stop_terminated and not self.include_stop_str_in_output:
|
| 105 |
+
# If stop-terminated, exclude last token from detokenization
|
| 106 |
+
# based on include_stop_str_in_output parameter.
|
| 107 |
+
skipped_stop_token_id = new_token_ids[-1]
|
| 108 |
+
new_token_ids = new_token_ids[:-1]
|
| 109 |
+
else:
|
| 110 |
+
skipped_stop_token_id = None
|
| 111 |
+
|
| 112 |
+
# 1) Detokenize the new token ids incrementally.
|
| 113 |
+
# TODO(woosuk): This method becomes very inefficient when the number of
|
| 114 |
+
# new_token_ids is more than 1. We need to optimize this.
|
| 115 |
+
stop_check_offset = len(self.output_text)
|
| 116 |
+
for new_token_id in new_token_ids:
|
| 117 |
+
self.token_ids.append(new_token_id)
|
| 118 |
+
self.output_text += self.decode_next(new_token_id)
|
| 119 |
+
# Support min_tokens, see https://github.com/vllm-project/vllm/pull/22014
|
| 120 |
+
if self.min_tokens and len(
|
| 121 |
+
self.output_token_ids) <= self.min_tokens:
|
| 122 |
+
stop_check_offset = len(self.output_text)
|
| 123 |
+
|
| 124 |
+
if stop_terminated:
|
| 125 |
+
if skipped_stop_token_id is not None:
|
| 126 |
+
# Cleanup after skipping detokenization.
|
| 127 |
+
self.token_ids.append(skipped_stop_token_id)
|
| 128 |
+
# Stop token triggered; skip stop string check.
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
# 2) Evaluate stop strings.
|
| 132 |
+
stop_string = None
|
| 133 |
+
if self.stop and len(self.output_token_ids) > self.min_tokens:
|
| 134 |
+
stop = StopChecker.check_stop_strings(
|
| 135 |
+
output_text=self.output_text,
|
| 136 |
+
new_char_count=len(self.output_text) - stop_check_offset,
|
| 137 |
+
stop=self.stop,
|
| 138 |
+
include_in_output=self.include_stop_str_in_output,
|
| 139 |
+
)
|
| 140 |
+
if stop is not None:
|
| 141 |
+
stop_string, truncate_to = stop
|
| 142 |
+
if truncate_to != -1:
|
| 143 |
+
self.output_text = self.output_text[:truncate_to]
|
| 144 |
+
|
| 145 |
+
return stop_string
|
| 146 |
+
|
| 147 |
+
@abstractmethod
|
| 148 |
+
def decode_next(self, next_token_id: int) -> str:
|
| 149 |
+
raise NotImplementedError
|
| 150 |
+
|
| 151 |
+
def get_next_output_text(self, finished: bool, delta: bool) -> str:
|
| 152 |
+
"""If delta is True, only new text since the last call to
|
| 153 |
+
this method is returned"""
|
| 154 |
+
|
| 155 |
+
# We return the full output text if the sequence is finished.
|
| 156 |
+
buffer_length = 0 if finished else self.stop_buffer_length
|
| 157 |
+
if not delta:
|
| 158 |
+
return self.output_text[:-buffer_length] if buffer_length else (
|
| 159 |
+
self.output_text)
|
| 160 |
+
length = len(self.output_text) - buffer_length
|
| 161 |
+
last_offset = self._last_output_text_offset
|
| 162 |
+
if last_offset < length:
|
| 163 |
+
self._last_output_text_offset = length
|
| 164 |
+
return self.output_text[last_offset:length]
|
| 165 |
+
return ""
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class FastIncrementalDetokenizer(BaseIncrementalDetokenizer):
|
| 169 |
+
|
| 170 |
+
def __init__(self, tokenizer: PreTrainedTokenizerFast,
|
| 171 |
+
request: EngineCoreRequest):
|
| 172 |
+
super().__init__(request)
|
| 173 |
+
|
| 174 |
+
sampling_params = request.sampling_params
|
| 175 |
+
assert sampling_params is not None
|
| 176 |
+
|
| 177 |
+
self.request_id = request.request_id
|
| 178 |
+
self.skip_special_tokens = sampling_params.skip_special_tokens
|
| 179 |
+
self.stream = DecodeStream(
|
| 180 |
+
skip_special_tokens=self.skip_special_tokens)
|
| 181 |
+
|
| 182 |
+
self.tokenizer: Tokenizer = tokenizer._tokenizer
|
| 183 |
+
|
| 184 |
+
# Find a safe place to start.
|
| 185 |
+
prompt_suffix = request.prompt_token_ids
|
| 186 |
+
prompt_len = len(prompt_suffix)
|
| 187 |
+
if prompt_len > 4:
|
| 188 |
+
for i in range(4, min(prompt_len + 1, 24)):
|
| 189 |
+
suffix = request.prompt_token_ids[-i:]
|
| 190 |
+
if '�' not in self.tokenizer.decode(suffix):
|
| 191 |
+
prompt_suffix = suffix
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
# Prime the stream.
|
| 195 |
+
for tid in prompt_suffix:
|
| 196 |
+
self._protected_step(tid)
|
| 197 |
+
|
| 198 |
+
self.spaces_between_special_tokens = (
|
| 199 |
+
sampling_params.skip_special_tokens
|
| 200 |
+
or sampling_params.spaces_between_special_tokens)
|
| 201 |
+
|
| 202 |
+
if not self.spaces_between_special_tokens:
|
| 203 |
+
# Store dict of added token ids so that we can suppress
|
| 204 |
+
# the spaces between them.
|
| 205 |
+
if (added_token_ids := getattr(self.tokenizer, "added_token_ids",
|
| 206 |
+
None)) is None:
|
| 207 |
+
self.tokenizer.added_token_ids = added_token_ids = {
|
| 208 |
+
tid: tok.content
|
| 209 |
+
for tid, tok in
|
| 210 |
+
self.tokenizer.get_added_tokens_decoder().items()
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
if added_token_ids:
|
| 214 |
+
self.last_special = False
|
| 215 |
+
self.added_token_ids = added_token_ids
|
| 216 |
+
else:
|
| 217 |
+
# No added tokens.
|
| 218 |
+
self.spaces_between_special_tokens = True
|
| 219 |
+
|
| 220 |
+
def decode_next(self, next_token_id: int) -> str:
|
| 221 |
+
token = self._protected_step(next_token_id)
|
| 222 |
+
|
| 223 |
+
if not self.spaces_between_special_tokens:
|
| 224 |
+
special_token = self.added_token_ids.get(next_token_id)
|
| 225 |
+
is_special = special_token is not None
|
| 226 |
+
if is_special and self.last_special:
|
| 227 |
+
# Return raw token string without any prefixed spaces.
|
| 228 |
+
token = special_token
|
| 229 |
+
self.last_special = is_special
|
| 230 |
+
|
| 231 |
+
return token or ""
|
| 232 |
+
|
| 233 |
+
def _protected_step(self, next_token_id: int) -> Optional[str]:
|
| 234 |
+
try:
|
| 235 |
+
token = self.stream.step(self.tokenizer, next_token_id)
|
| 236 |
+
except Exception as e:
|
| 237 |
+
if str(e) != INVALID_PREFIX_ERR_MSG:
|
| 238 |
+
raise e
|
| 239 |
+
# Recover from edge case where tokenizer can produce non-monotonic,
|
| 240 |
+
# invalid UTF-8 output, which breaks the internal state of
|
| 241 |
+
# tokenizers' DecodeStream.
|
| 242 |
+
# See https://github.com/vllm-project/vllm/issues/17448.
|
| 243 |
+
logger.warning(
|
| 244 |
+
"Encountered invalid prefix detokenization error"
|
| 245 |
+
" for request %s, resetting decode stream.", self.request_id)
|
| 246 |
+
self.stream = DecodeStream(self.skip_special_tokens)
|
| 247 |
+
token = self.stream.step(self.tokenizer, next_token_id)
|
| 248 |
+
return token
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class SlowIncrementalDetokenizer(BaseIncrementalDetokenizer):
|
| 252 |
+
|
| 253 |
+
def __init__(self, tokenizer: AnyTokenizer, request: EngineCoreRequest):
|
| 254 |
+
super().__init__(request)
|
| 255 |
+
|
| 256 |
+
self.tokenizer = tokenizer
|
| 257 |
+
params = request.sampling_params
|
| 258 |
+
assert params is not None
|
| 259 |
+
|
| 260 |
+
# Metadata for incremental detokenization.
|
| 261 |
+
self.tokens, self.prefix_offset, self.read_offset = (
|
| 262 |
+
convert_prompt_ids_to_tokens(
|
| 263 |
+
tokenizer=tokenizer,
|
| 264 |
+
prompt_ids=request.prompt_token_ids,
|
| 265 |
+
skip_special_tokens=params.skip_special_tokens,
|
| 266 |
+
))
|
| 267 |
+
|
| 268 |
+
self.token_ids.extend(request.prompt_token_ids)
|
| 269 |
+
self.prompt_len = len(request.prompt_token_ids)
|
| 270 |
+
|
| 271 |
+
self.skip_special_tokens = params.skip_special_tokens
|
| 272 |
+
self.spaces_between_special_tokens = (
|
| 273 |
+
params.spaces_between_special_tokens)
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def output_token_ids(self) -> list[int]:
|
| 277 |
+
return self.token_ids if not self.prompt_len else (
|
| 278 |
+
self.token_ids[self.prompt_len:])
|
| 279 |
+
|
| 280 |
+
def decode_next(self, next_token_id: int) -> str:
|
| 281 |
+
new_tokens, decoded_text, prefix_offset, read_offset = (
|
| 282 |
+
detokenize_incrementally(
|
| 283 |
+
tokenizer=self.tokenizer,
|
| 284 |
+
all_input_ids=self.token_ids,
|
| 285 |
+
prev_tokens=self.tokens,
|
| 286 |
+
prefix_offset=self.prefix_offset,
|
| 287 |
+
read_offset=self.read_offset,
|
| 288 |
+
skip_special_tokens=self.skip_special_tokens,
|
| 289 |
+
spaces_between_special_tokens=self.
|
| 290 |
+
spaces_between_special_tokens,
|
| 291 |
+
))
|
| 292 |
+
|
| 293 |
+
self.tokens.extend(new_tokens)
|
| 294 |
+
self.prefix_offset = prefix_offset
|
| 295 |
+
self.read_offset = read_offset
|
| 296 |
+
|
| 297 |
+
return decoded_text
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/engine/exceptions.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
class EngineGenerateError(Exception):
|
| 4 |
+
"""Raised when a AsyncLLM.generate() fails. Recoverable."""
|
| 5 |
+
pass
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class EngineDeadError(Exception):
|
| 9 |
+
"""Raised when the EngineCore dies. Unrecoverable."""
|
| 10 |
+
|
| 11 |
+
def __init__(self, *args, suppress_context: bool = False, **kwargs):
|
| 12 |
+
ENGINE_DEAD_MESSAGE = "EngineCore encountered an issue. See stack trace (above) for the root cause." # noqa: E501
|
| 13 |
+
|
| 14 |
+
super().__init__(ENGINE_DEAD_MESSAGE, *args, **kwargs)
|
| 15 |
+
# Make stack trace clearer when using with LLMEngine by
|
| 16 |
+
# silencing irrelevant ZMQError.
|
| 17 |
+
self.__suppress_context__ = suppress_context
|