ADAPT-Chase commited on
Commit
16da1d3
·
verified ·
1 Parent(s): e925c2b

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/__init__.cpython-312.pyc +0 -0
  2. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/cudagraph_dispatcher.cpython-312.pyc +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/kv_cache_interface.cpython-312.pyc +0 -0
  4. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/outputs.cpython-312.pyc +0 -0
  5. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/request.cpython-312.pyc +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/serial_utils.cpython-312.pyc +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/utils.cpython-312.pyc +0 -0
  8. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/cpu_worker.py +173 -0
  9. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_input_batch.py +796 -0
  10. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_model_runner.py +0 -0
  11. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_worker.py +632 -0
  12. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/kv_connector_model_runner_mixin.py +115 -0
  13. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/lora_model_runner_mixin.py +177 -0
  14. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_input_batch.py +585 -0
  15. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_model_runner.py +2033 -0
  16. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_worker.py +333 -0
  17. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/utils.py +300 -0
  18. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/worker_base.py +65 -0
  19. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/xpu_model_runner.py +33 -0
  20. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/xpu_worker.py +178 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (177 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/cudagraph_dispatcher.cpython-312.pyc ADDED
Binary file (6.01 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/kv_cache_interface.cpython-312.pyc ADDED
Binary file (11.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/outputs.cpython-312.pyc ADDED
Binary file (4.07 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/request.cpython-312.pyc ADDED
Binary file (11.2 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/serial_utils.cpython-312.pyc ADDED
Binary file (19 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/__pycache__/utils.cpython-312.pyc ADDED
Binary file (14.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/cpu_worker.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import os
4
+ import platform
5
+ from typing import Callable, Optional
6
+
7
+ import torch
8
+
9
+ from vllm import envs
10
+ from vllm.config import VllmConfig
11
+ from vllm.distributed.parallel_state import get_pp_group, get_tp_group
12
+ from vllm.logger import init_logger
13
+ from vllm.model_executor.utils import set_random_seed
14
+ from vllm.platforms import CpuArchEnum, current_platform
15
+ from vllm.platforms.cpu import CpuPlatform, LogicalCPUInfo
16
+ from vllm.sequence import IntermediateTensors
17
+ from vllm.v1.core.sched.output import SchedulerOutput
18
+ from vllm.v1.outputs import ModelRunnerOutput
19
+ from vllm.v1.worker.cpu_model_runner import CPUModelRunner
20
+ from vllm.v1.worker.gpu_worker import (Worker,
21
+ init_worker_distributed_environment)
22
+
23
+ logger = init_logger(__name__)
24
+
25
+
26
+ class CPUWorker(Worker):
27
+
28
+ def __init__(self,
29
+ vllm_config: VllmConfig,
30
+ local_rank: int,
31
+ rank: int,
32
+ distributed_init_method: str,
33
+ is_driver_worker: bool = False):
34
+ super().__init__(vllm_config,
35
+ local_rank,
36
+ rank,
37
+ distributed_init_method,
38
+ is_driver_worker=is_driver_worker)
39
+
40
+ self.parallel_config.disable_custom_all_reduce = True
41
+
42
+ def init_device(self):
43
+ # Setup OpenMP threads affinity.
44
+ omp_cpuids = envs.VLLM_CPU_OMP_THREADS_BIND
45
+ if omp_cpuids == "auto" and platform.system() == "Linux":
46
+ if current_platform.get_cpu_architecture() == CpuArchEnum.POWERPC:
47
+ # For POWERPC SMT-8/4/2
48
+ self.local_omp_cpuid = self._get_autobind_cpu_ids(
49
+ lambda cpus: [cpu for cpu in cpus if cpu.id % 8 < 4])
50
+ elif current_platform.get_cpu_architecture() == CpuArchEnum.X86:
51
+ # For x86 SMT-2, use 1 CPU per core
52
+ self.local_omp_cpuid = self._get_autobind_cpu_ids(
53
+ lambda cpus: cpus[-1:])
54
+ else:
55
+ self.local_omp_cpuid = "all"
56
+ else:
57
+ self.local_omp_cpuid = omp_cpuids.split("|")[self.rank]
58
+
59
+ if self.local_omp_cpuid != "all":
60
+ ret = torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid)
61
+ if ret:
62
+ logger.info(ret)
63
+
64
+ # Note: unique identifier for creating allreduce shared memory
65
+ os.environ["VLLM_DIST_IDENT"] = self.distributed_init_method.split(
66
+ ":")[-1]
67
+ # Initialize the distributed environment.
68
+ init_worker_distributed_environment(self.vllm_config, self.rank,
69
+ self.distributed_init_method,
70
+ self.local_rank,
71
+ current_platform.dist_backend)
72
+ # Set random seed.
73
+ set_random_seed(self.model_config.seed)
74
+
75
+ # Construct the model runner
76
+ self.model_runner: CPUModelRunner = CPUModelRunner(
77
+ self.vllm_config, torch.device("cpu"))
78
+
79
+ def sleep(self, level: int = 1) -> None:
80
+ logger.warning("sleep mode is not supported on CPU, ignore it.")
81
+ pass
82
+
83
+ def wake_up(self, tags: Optional[list[str]] = None) -> None:
84
+ logger.warning("sleep mode is not supported on CPU, ignore it.")
85
+ pass
86
+
87
+ def determine_available_memory(self) -> int:
88
+ return self.cache_config.cpu_kvcache_space_bytes # type: ignore
89
+
90
+ def compile_or_warm_up_model(self) -> None:
91
+ # Reset the seed to ensure that the random state is not affected by
92
+ # the model initialization and profiling.
93
+ set_random_seed(self.model_config.seed)
94
+ self.model_runner.warming_up_model()
95
+
96
+ @torch.inference_mode()
97
+ def execute_model(
98
+ self,
99
+ scheduler_output: "SchedulerOutput",
100
+ ) -> Optional[ModelRunnerOutput]:
101
+ intermediate_tensors = None
102
+ if not get_pp_group().is_first_rank:
103
+ intermediate_tensors = IntermediateTensors(
104
+ get_pp_group().recv_tensor_dict(
105
+ all_gather_group=get_tp_group()))
106
+
107
+ output = self.model_runner.execute_model(scheduler_output,
108
+ intermediate_tensors)
109
+
110
+ if not get_pp_group().is_last_rank:
111
+ assert isinstance(output, IntermediateTensors)
112
+ get_pp_group().send_tensor_dict(output.tensors,
113
+ all_gather_group=get_tp_group())
114
+ return None
115
+
116
+ assert isinstance(output, ModelRunnerOutput)
117
+ return output if self.is_driver_worker else None
118
+
119
+ def _get_autobind_cpu_ids(
120
+ self, cpu_selector: Callable[[list[LogicalCPUInfo]],
121
+ list[LogicalCPUInfo]]
122
+ ) -> str:
123
+ """
124
+ Return CPU ids to bind based on NUMA nodes.
125
+ Currently for rank N, only CPU ids on the N-th node in available NUMA
126
+ node list will be selected.
127
+ Args:
128
+ cpu_selector: a callable object to select CPUs from a CPU list
129
+ of a physical core. The input is a LogicalCPUInfo list, sorted by
130
+ the LogicalCPUInfo.id. A selected LogicalCPUInfo list should be
131
+ returned.
132
+ """
133
+
134
+ allowed_numa_nodes, logical_cpu_list = \
135
+ CpuPlatform.get_allowed_cpu_memory_node_list()
136
+ assert len(allowed_numa_nodes) >= self.parallel_config.world_size, (
137
+ f"No enough allowed NUMA nodes to bind threads of "
138
+ f"{self.parallel_config.world_size} CPUWorkers. "
139
+ f"Allowed NUMA nodes are {allowed_numa_nodes}. "
140
+ "Please try to bind threads manually.")
141
+
142
+ # Get CPUs on NUMA node `allowed_numa_nodes[local_rank]``
143
+ selected_numa_node = allowed_numa_nodes[
144
+ self.local_rank] # type: ignore
145
+ logical_cpu_list = [
146
+ x for x in logical_cpu_list if x.numa_node == selected_numa_node
147
+ ]
148
+
149
+ # Select CPUs from each physical core via cpu_selector
150
+ core_to_cpus: dict[int, list[LogicalCPUInfo]] = {}
151
+ for cpu_info in logical_cpu_list:
152
+ if cpu_info.physical_core not in core_to_cpus:
153
+ core_to_cpus[cpu_info.physical_core] = []
154
+ core_to_cpus[cpu_info.physical_core].append(cpu_info)
155
+ logical_cpu_list = []
156
+ for cpu_list in core_to_cpus.values():
157
+ cpu_list = sorted(cpu_list, key=lambda x: x.id)
158
+ logical_cpu_list.extend(cpu_selector(cpu_list))
159
+ logical_cpu_list = sorted(logical_cpu_list, key=lambda x: x.id)
160
+
161
+ # Reserve CPUs for other processes
162
+ reserve_cpu_num = envs.VLLM_CPU_NUM_OF_RESERVED_CPU
163
+ if reserve_cpu_num is None:
164
+ reserve_cpu_num = 1 if self.parallel_config.world_size > 1 else 0
165
+ assert len(logical_cpu_list) > reserve_cpu_num, (
166
+ f"VLLM_CPU_NUM_OF_RESERVED_CPU ({reserve_cpu_num}) "
167
+ f"should less than {len(logical_cpu_list)}.")
168
+ if reserve_cpu_num != 0:
169
+ logical_cpu_list = logical_cpu_list[:-reserve_cpu_num]
170
+
171
+ logger.info("auto thread-binding list (id, physical core): %s",
172
+ [(x.id, x.physical_core) for x in logical_cpu_list])
173
+ return ",".join([str(x.id) for x in logical_cpu_list])
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_input_batch.py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ # Datastructures defining a GPU input batch
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Optional, cast
7
+
8
+ import numpy as np
9
+ import torch
10
+ from typing_extensions import deprecated
11
+
12
+ from vllm.lora.request import LoRARequest
13
+ from vllm.multimodal.inputs import (MultiModalKwargs, MultiModalKwargsItem,
14
+ PlaceholderRange)
15
+ from vllm.pooling_params import PoolingParams
16
+ from vllm.sampling_params import SamplingParams, SamplingType
17
+ from vllm.utils import swap_dict_values
18
+ from vllm.v1.outputs import LogprobsTensors
19
+ from vllm.v1.pool.metadata import PoolingMetadata
20
+ from vllm.v1.sample.logits_processor import (BatchUpdateBuilder,
21
+ LogitsProcessors,
22
+ MoveDirectionality)
23
+ from vllm.v1.sample.metadata import SamplingMetadata
24
+ from vllm.v1.spec_decode.utils import is_spec_decode_unsupported
25
+ from vllm.v1.utils import copy_slice
26
+ from vllm.v1.worker.block_table import MultiGroupBlockTable
27
+
28
+
29
+ @dataclass
30
+ class CachedRequestState:
31
+
32
+ req_id: str
33
+ prompt_token_ids: list[int]
34
+ mm_kwargs: list[MultiModalKwargsItem]
35
+ mm_positions: list[PlaceholderRange]
36
+ sampling_params: Optional[SamplingParams]
37
+ pooling_params: Optional[PoolingParams]
38
+ generator: Optional[torch.Generator]
39
+
40
+ block_ids: tuple[list[int], ...]
41
+ num_computed_tokens: int
42
+ output_token_ids: list[int]
43
+
44
+ mrope_positions: Optional[torch.Tensor] = None
45
+ mrope_position_delta: Optional[int] = None
46
+
47
+ lora_request: Optional[LoRARequest] = None
48
+
49
+ def __post_init__(self):
50
+ self.num_prompt_tokens = len(self.prompt_token_ids)
51
+
52
+ @property
53
+ def num_tokens(self) -> int:
54
+ return self.num_prompt_tokens + len(self.output_token_ids)
55
+
56
+ # Temporary back-compatibility for plugins that define model runner
57
+ @property
58
+ @deprecated("`mm_inputs` is superseded by `mm_kwargs` and will be "
59
+ "removed in v0.13. Please use `mm_kwargs` instead.")
60
+ def mm_inputs(self) -> list[MultiModalKwargs]:
61
+ return [MultiModalKwargs([item]) for item in self.mm_kwargs]
62
+
63
+ def get_token_id(self, idx: int) -> int:
64
+ if idx < self.num_prompt_tokens:
65
+ return self.prompt_token_ids[idx]
66
+ else:
67
+ return self.output_token_ids[idx - self.num_prompt_tokens]
68
+
69
+
70
+ class InputBatch:
71
+
72
+ def __init__(
73
+ self,
74
+ max_num_reqs: int,
75
+ max_model_len: int,
76
+ max_num_batched_tokens: int,
77
+ device: torch.device,
78
+ pin_memory: bool,
79
+ vocab_size: int,
80
+ block_sizes: list[int], # The block_size of each kv cache group
81
+ logitsprocs: Optional[LogitsProcessors] = None,
82
+ is_spec_decode: bool = False,
83
+ is_pooling_model: bool = False,
84
+ ):
85
+ self.is_pooling_model = is_pooling_model
86
+ self.is_spec_decode = is_spec_decode
87
+ self.max_num_reqs = max_num_reqs
88
+ self.max_model_len = max_model_len
89
+ self.max_num_batched_tokens = max_num_batched_tokens
90
+ self.device = device
91
+ self.pin_memory = pin_memory
92
+ self.vocab_size = vocab_size
93
+
94
+ self._req_ids: list[Optional[str]] = []
95
+ self.req_id_to_index: dict[str, int] = {}
96
+
97
+ # TODO(woosuk): This buffer could be too large if max_model_len is big.
98
+ # Find a way to reduce the CPU memory usage.
99
+ # This buffer is not directly transferred to the GPU, so it does not
100
+ # need to be pinned.
101
+ self.token_ids_cpu_tensor = torch.zeros(
102
+ (max_num_reqs, max_model_len),
103
+ device="cpu",
104
+ dtype=torch.int32,
105
+ pin_memory=False,
106
+ )
107
+ self.token_ids_cpu = self.token_ids_cpu_tensor.numpy()
108
+ self.num_tokens = np.zeros(max_num_reqs, dtype=np.int32)
109
+ self.num_tokens_no_spec = np.zeros(max_num_reqs, dtype=np.int32)
110
+ self.num_prompt_tokens = np.zeros(max_num_reqs, dtype=np.int32)
111
+ self.num_computed_tokens_cpu_tensor = torch.zeros(
112
+ (max_num_reqs, ),
113
+ device="cpu",
114
+ dtype=torch.int32,
115
+ pin_memory=pin_memory,
116
+ )
117
+ self.num_computed_tokens_cpu = \
118
+ self.num_computed_tokens_cpu_tensor.numpy()
119
+
120
+ # Block table.
121
+ self.block_table = MultiGroupBlockTable(
122
+ max_num_reqs=max_num_reqs,
123
+ max_model_len=max_model_len,
124
+ max_num_batched_tokens=max_num_batched_tokens,
125
+ pin_memory=pin_memory,
126
+ device=device,
127
+ block_sizes=block_sizes,
128
+ )
129
+
130
+ # Sampling-related.
131
+ self.temperature = torch.empty((max_num_reqs, ),
132
+ dtype=torch.float32,
133
+ device=device)
134
+ self.temperature_cpu_tensor = torch.empty((max_num_reqs, ),
135
+ dtype=torch.float32,
136
+ device="cpu",
137
+ pin_memory=pin_memory)
138
+ self.temperature_cpu = self.temperature_cpu_tensor.numpy()
139
+ self.greedy_reqs: set[str] = set()
140
+ self.random_reqs: set[str] = set()
141
+
142
+ self.top_p = torch.empty((max_num_reqs, ),
143
+ dtype=torch.float32,
144
+ device=device)
145
+ self.top_p_cpu_tensor = torch.empty((max_num_reqs, ),
146
+ dtype=torch.float32,
147
+ device="cpu",
148
+ pin_memory=pin_memory)
149
+ self.top_p_cpu = self.top_p_cpu_tensor.numpy()
150
+ self.top_p_reqs: set[str] = set()
151
+
152
+ self.top_k = torch.empty((max_num_reqs, ),
153
+ dtype=torch.int32,
154
+ device=device)
155
+ self.top_k_cpu_tensor = torch.empty((max_num_reqs, ),
156
+ dtype=torch.int32,
157
+ device="cpu",
158
+ pin_memory=pin_memory)
159
+ self.top_k_cpu = self.top_k_cpu_tensor.numpy()
160
+ self.top_k_reqs: set[str] = set()
161
+
162
+ # IDs of requests which do not support spec decoding
163
+ self.spec_decode_unsupported_reqs: set[str] = set()
164
+
165
+ # Frequency penalty related data structures
166
+ self.frequency_penalties = torch.empty((max_num_reqs, ),
167
+ dtype=torch.float,
168
+ device=device)
169
+ self.frequency_penalties_cpu_tensor = torch.empty(
170
+ (max_num_reqs, ),
171
+ dtype=torch.float,
172
+ device="cpu",
173
+ pin_memory=pin_memory)
174
+ self.frequency_penalties_cpu = \
175
+ self.frequency_penalties_cpu_tensor.numpy()
176
+ self.frequency_penalties_reqs: set[str] = set()
177
+
178
+ # Presence penalty related data structures
179
+ self.presence_penalties = torch.empty((max_num_reqs, ),
180
+ dtype=torch.float,
181
+ device=device)
182
+ self.presence_penalties_cpu_tensor = torch.empty((max_num_reqs, ),
183
+ dtype=torch.float,
184
+ device="cpu",
185
+ pin_memory=pin_memory)
186
+ self.presence_penalties_cpu = self.presence_penalties_cpu_tensor.numpy(
187
+ )
188
+ self.presence_penalties_reqs: set[str] = set()
189
+
190
+ # Repetition penalty related data structures
191
+ self.repetition_penalties = torch.empty((max_num_reqs, ),
192
+ dtype=torch.float,
193
+ device=device)
194
+ self.repetition_penalties_cpu_tensor = torch.empty(
195
+ (max_num_reqs, ),
196
+ dtype=torch.float,
197
+ device="cpu",
198
+ pin_memory=pin_memory)
199
+ self.repetition_penalties_cpu = \
200
+ self.repetition_penalties_cpu_tensor.numpy()
201
+ self.repetition_penalties_reqs: set[str] = set()
202
+
203
+ # lora related
204
+ self.request_lora_mapping = np.zeros((self.max_num_reqs, ),
205
+ dtype=np.int32)
206
+ self.lora_id_to_request_ids: dict[int, set[str]] = {}
207
+ self.lora_id_to_lora_request: dict[int, LoRARequest] = {}
208
+
209
+ # req_index -> generator
210
+ # NOTE(woosuk): The indices of the requests that do not have their own
211
+ # generator should not be included in the dictionary.
212
+ self.generators: dict[int, torch.Generator] = {}
213
+
214
+ self.num_logprobs: dict[str, int] = {}
215
+ # NOTE(rob): num_prompt_logprobs only includes reqs
216
+ # that are currently in the prefill phase.
217
+ self.num_prompt_logprobs: dict[str, int] = {}
218
+
219
+ # To accumulate prompt logprobs tensor chunks across prefill steps.
220
+ self.in_progress_prompt_logprobs_cpu: dict[str, LogprobsTensors] = {}
221
+
222
+ # Internal representation of per-step batch state changes, used for
223
+ # reordering persistent batch and generating logitsprocs batch state
224
+ # updates. Should reset each step.
225
+ self.batch_update_builder = BatchUpdateBuilder()
226
+
227
+ # TODO convert this to LogitsProcessor
228
+ self.has_allowed_token_ids: set[str] = set()
229
+ # NOTE(lufang): In the mask tensor, if the corresponding token allowed,
230
+ # the value is False. Since we use masked_fill_ to set -inf.
231
+ self.allowed_token_ids_mask: Optional[torch.Tensor] = None
232
+ self.allowed_token_ids_mask_cpu_tensor: Optional[torch.Tensor] = None
233
+
234
+ # req_index -> bad_words_token_ids
235
+ self.bad_words_token_ids: dict[int, list[list[int]]] = {}
236
+
237
+ self.logits_processing_needs_token_ids = np.zeros(max_num_reqs,
238
+ dtype=bool)
239
+
240
+ self.req_output_token_ids: list[Optional[list[int]]] = []
241
+
242
+ # Store provided logitsprocs. If none are provided, initialize empty
243
+ # data structure
244
+ self.logitsprocs = logitsprocs or LogitsProcessors()
245
+
246
+ # This is updated each time the batch constituents change.
247
+ self.sampling_metadata = self._make_sampling_metadata()
248
+
249
+ self.pooling_params: dict[str, PoolingParams] = {}
250
+
251
+ @property
252
+ def req_ids(self) -> list[str]:
253
+ # None elements should only be present transiently
254
+ # while performing state updates to the batch.
255
+ return cast(list[str], self._req_ids)
256
+
257
+ def _register_add_request(self, request: "CachedRequestState") -> int:
258
+ """Track add-request operations for logits processors.
259
+ Not applicable to pooling models.
260
+ """
261
+
262
+ # Detailed added request metadata is only required for non-pooling
263
+ # models, to support logitsprocs
264
+ assert request.sampling_params
265
+
266
+ # Fill the next empty index if there is one.
267
+ if (new_req_index := self.batch_update_builder.pop_removed()) is None:
268
+ # Append to end otherwise.
269
+ new_req_index = self.num_reqs
270
+
271
+ assert new_req_index < self.max_num_reqs
272
+ self.batch_update_builder.added.append(
273
+ (new_req_index, request.sampling_params, request.prompt_token_ids,
274
+ request.output_token_ids))
275
+ return new_req_index
276
+
277
+ def add_request(
278
+ self,
279
+ request: "CachedRequestState",
280
+ ) -> int:
281
+ if not self.is_pooling_model:
282
+ # New request index bookkeeping for autoregressive models.
283
+ req_index = self._register_add_request(request)
284
+ else:
285
+ req_index = self.num_reqs
286
+
287
+ req_id = request.req_id
288
+ if req_index == len(self._req_ids):
289
+ self._req_ids.append(req_id)
290
+ self.req_output_token_ids.append(request.output_token_ids)
291
+ else:
292
+ self._req_ids[req_index] = req_id
293
+ self.req_output_token_ids[req_index] = request.output_token_ids
294
+
295
+ self.req_id_to_index[req_id] = req_index
296
+
297
+ # Copy the prompt token ids and output token ids.
298
+ num_prompt_tokens = len(request.prompt_token_ids)
299
+ self.num_prompt_tokens[req_index] = num_prompt_tokens
300
+ self.token_ids_cpu[
301
+ req_index, :num_prompt_tokens] = request.prompt_token_ids
302
+ start_idx = num_prompt_tokens
303
+ end_idx = start_idx + len(request.output_token_ids)
304
+ self.token_ids_cpu[req_index,
305
+ start_idx:end_idx] = request.output_token_ids
306
+ # Number of token ids in token_ids_cpu.
307
+ # NOTE(woosuk): This may include spec decode tokens.
308
+ self.num_tokens[req_index] = request.num_tokens
309
+ # Number of tokens without spec decode tokens.
310
+ self.num_tokens_no_spec[req_index] = request.num_tokens
311
+
312
+ self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens
313
+ self.block_table.add_row(request.block_ids, req_index)
314
+
315
+ if sampling_params := request.sampling_params:
316
+ if (self.is_spec_decode
317
+ and is_spec_decode_unsupported(sampling_params)):
318
+ self.spec_decode_unsupported_reqs.add(req_id)
319
+ if sampling_params.sampling_type == SamplingType.GREEDY:
320
+ # Avoid later division by zero.
321
+ self.temperature_cpu[req_index] = -1.0
322
+ self.greedy_reqs.add(req_id)
323
+ else:
324
+ self.temperature_cpu[req_index] = sampling_params.temperature
325
+ self.random_reqs.add(req_id)
326
+
327
+ self.top_p_cpu[req_index] = sampling_params.top_p
328
+ if sampling_params.top_p < 1:
329
+ self.top_p_reqs.add(req_id)
330
+ top_k = sampling_params.top_k
331
+ if 0 < top_k < self.vocab_size:
332
+ self.top_k_reqs.add(req_id)
333
+ else:
334
+ top_k = self.vocab_size
335
+ self.top_k_cpu[req_index] = top_k
336
+ self.frequency_penalties_cpu[
337
+ req_index] = sampling_params.frequency_penalty
338
+ if sampling_params.frequency_penalty != 0.0:
339
+ self.frequency_penalties_reqs.add(req_id)
340
+ self.presence_penalties_cpu[
341
+ req_index] = sampling_params.presence_penalty
342
+ if sampling_params.presence_penalty != 0.0:
343
+ self.presence_penalties_reqs.add(req_id)
344
+ self.repetition_penalties_cpu[
345
+ req_index] = sampling_params.repetition_penalty
346
+ if sampling_params.repetition_penalty != 1.0:
347
+ self.repetition_penalties_reqs.add(req_id)
348
+
349
+ # NOTE(woosuk): self.generators should not include the requests that
350
+ # do not have their own generator.
351
+ if request.generator is not None:
352
+ self.generators[req_index] = request.generator
353
+
354
+ if sampling_params.logprobs is not None:
355
+ self.num_logprobs[req_id] = (self.vocab_size
356
+ if sampling_params.logprobs == -1
357
+ else sampling_params.logprobs)
358
+ if sampling_params.prompt_logprobs is not None:
359
+ self.num_prompt_logprobs[
360
+ req_id] = sampling_params.prompt_logprobs
361
+
362
+ if sampling_params.allowed_token_ids:
363
+ self.has_allowed_token_ids.add(req_id)
364
+ if self.allowed_token_ids_mask_cpu_tensor is None:
365
+ # Lazy allocation for this tensor, which can be large.
366
+ # False means we don't fill with -inf.
367
+ self.allowed_token_ids_mask = torch.zeros(
368
+ self.max_num_reqs,
369
+ self.vocab_size,
370
+ dtype=torch.bool,
371
+ device=self.device)
372
+ self.allowed_token_ids_mask_cpu_tensor = torch.zeros(
373
+ self.max_num_reqs,
374
+ self.vocab_size,
375
+ dtype=torch.bool,
376
+ device="cpu")
377
+ self.allowed_token_ids_mask_cpu_tensor[req_index] = True
378
+ # False means we don't fill with -inf.
379
+ self.allowed_token_ids_mask_cpu_tensor[req_index][
380
+ sampling_params.allowed_token_ids] = False
381
+
382
+ if sampling_params.bad_words_token_ids:
383
+ self.bad_words_token_ids[
384
+ req_index] = sampling_params.bad_words_token_ids
385
+ elif pooling_params := request.pooling_params:
386
+ self.pooling_params[req_id] = pooling_params
387
+ self.logits_processing_needs_token_ids[req_index] = (
388
+ pooling_params.requires_token_ids)
389
+ else:
390
+ raise NotImplementedError(request)
391
+
392
+ # Add request lora ID
393
+ if request.lora_request:
394
+ lora_id = request.lora_request.lora_int_id
395
+ if lora_id not in self.lora_id_to_request_ids:
396
+ self.lora_id_to_request_ids[lora_id] = set()
397
+
398
+ self.request_lora_mapping[req_index] = lora_id
399
+ self.lora_id_to_request_ids[lora_id].add(request.req_id)
400
+ self.lora_id_to_lora_request[lora_id] = request.lora_request
401
+ else:
402
+ # No LoRA
403
+ self.request_lora_mapping[req_index] = 0
404
+
405
+ return req_index
406
+
407
+ def remove_request(self, req_id: str) -> Optional[int]:
408
+ """This method must always be followed by a call to condense().
409
+
410
+ Args:
411
+ req_id: request to remove
412
+
413
+ Returns:
414
+ Removed request index, or `None` if `req_id` not recognized
415
+ """
416
+
417
+ req_index = self.req_id_to_index.pop(req_id, None)
418
+ if req_index is None:
419
+ return None
420
+ if not self.is_pooling_model:
421
+ # Autoregressive models require bookkeeping of removed requests to
422
+ # support logitsprocs.
423
+ self.batch_update_builder.removed_append(req_index)
424
+ self._req_ids[req_index] = None
425
+ self.req_output_token_ids[req_index] = None
426
+
427
+ self.greedy_reqs.discard(req_id)
428
+ self.random_reqs.discard(req_id)
429
+ self.top_p_reqs.discard(req_id)
430
+ self.top_k_reqs.discard(req_id)
431
+ self.spec_decode_unsupported_reqs.discard(req_id)
432
+ self.frequency_penalties_reqs.discard(req_id)
433
+ self.presence_penalties_reqs.discard(req_id)
434
+ self.repetition_penalties_reqs.discard(req_id)
435
+ self.generators.pop(req_index, None)
436
+ self.num_logprobs.pop(req_id, None)
437
+ self.num_prompt_logprobs.pop(req_id, None)
438
+ self.in_progress_prompt_logprobs_cpu.pop(req_id, None)
439
+
440
+ # LoRA
441
+ lora_id = self.request_lora_mapping[req_index]
442
+ if lora_id != 0:
443
+ self.lora_id_to_request_ids[lora_id].discard(req_id)
444
+ if len(self.lora_id_to_request_ids[lora_id]) == 0:
445
+ self.lora_id_to_request_ids.pop(lora_id)
446
+ self.lora_id_to_lora_request.pop(lora_id)
447
+ self.request_lora_mapping[req_index] = 0
448
+
449
+ self.has_allowed_token_ids.discard(req_id)
450
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
451
+ # False means we don't fill with -inf.
452
+ self.allowed_token_ids_mask_cpu_tensor[req_index].fill_(False)
453
+ self.bad_words_token_ids.pop(req_index, None)
454
+ self.pooling_params.pop(req_id, None)
455
+ return req_index
456
+
457
+ def swap_states(self, i1: int, i2: int) -> None:
458
+ # For autoregressive models, track detailed request reordering info
459
+ # to support logitsprocs
460
+ self.batch_update_builder.moved.append(
461
+ (i1, i2, MoveDirectionality.SWAP))
462
+ old_id_i1 = self._req_ids[i1]
463
+ old_id_i2 = self._req_ids[i2]
464
+ self._req_ids[i1], self._req_ids[i2] =\
465
+ self._req_ids[i2], self._req_ids[i1] # noqa
466
+ self.req_output_token_ids[i1], self.req_output_token_ids[i2] =\
467
+ self.req_output_token_ids[i2], self.req_output_token_ids[i1]
468
+ assert old_id_i1 is not None and old_id_i2 is not None
469
+ self.req_id_to_index[old_id_i1], self.req_id_to_index[old_id_i2] =\
470
+ self.req_id_to_index[old_id_i2], self.req_id_to_index[old_id_i1]
471
+ self.num_tokens[i1], self.num_tokens[i2] =\
472
+ self.num_tokens[i2], self.num_tokens[i1]
473
+ self.num_tokens_no_spec[i1], self.num_tokens_no_spec[i2] =\
474
+ self.num_tokens_no_spec[i2], self.num_tokens_no_spec[i1]
475
+ self.num_prompt_tokens[i1], self.num_prompt_tokens[i2] =\
476
+ self.num_prompt_tokens[i2], self.num_prompt_tokens[i1]
477
+ self.num_computed_tokens_cpu[i1], self.num_computed_tokens_cpu[i2] =\
478
+ self.num_computed_tokens_cpu[i2], self.num_computed_tokens_cpu[i1]
479
+ self.temperature_cpu[i1], self.temperature_cpu[i2] =\
480
+ self.temperature_cpu[i2], self.temperature_cpu[i1]
481
+ self.top_p_cpu[i1], self.top_p_cpu[i2] =\
482
+ self.top_p_cpu[i2], self.top_p_cpu[i1]
483
+ self.top_k_cpu[i1], self.top_k_cpu[i2] =\
484
+ self.top_k_cpu[i2], self.top_k_cpu[i1]
485
+ self.frequency_penalties_cpu[i1], self.frequency_penalties_cpu[i2] =\
486
+ self.frequency_penalties_cpu[i2], self.frequency_penalties_cpu[i1]
487
+ self.presence_penalties_cpu[i1], self.presence_penalties_cpu[i2] =\
488
+ self.presence_penalties_cpu[i2], self.presence_penalties_cpu[i1]
489
+ self.repetition_penalties_cpu[i1], self.repetition_penalties_cpu[i2] =\
490
+ self.repetition_penalties_cpu[i2], self.repetition_penalties_cpu[i1]
491
+
492
+ # NOTE: the following is unsafe
493
+ # self.token_ids_cpu[i1, ...], self.token_ids_cpu[i2, ...], =\
494
+ # self.token_ids_cpu[i2, ...], self.token_ids_cpu[i1, ...]
495
+ # instead, we need to temporiarily copy the data for one of the indices
496
+ # TODO(lucas): optimize this by only copying valid indices
497
+ tmp = self.token_ids_cpu[i1, ...].copy()
498
+ self.token_ids_cpu[i1, ...] = self.token_ids_cpu[i2, ...]
499
+ self.token_ids_cpu[i2, ...] = tmp
500
+
501
+ swap_dict_values(self.generators, i1, i2)
502
+ swap_dict_values(self.bad_words_token_ids, i1, i2)
503
+
504
+ self.request_lora_mapping[i1], self.request_lora_mapping[i2] =\
505
+ self.request_lora_mapping[i2], self.request_lora_mapping[i1]
506
+
507
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
508
+ self.allowed_token_ids_mask_cpu_tensor[i1], \
509
+ self.allowed_token_ids_mask_cpu_tensor[i2] =\
510
+ self.allowed_token_ids_mask_cpu_tensor[i2], \
511
+ self.allowed_token_ids_mask_cpu_tensor[i1]
512
+ self.block_table.swap_row(i1, i2)
513
+
514
+ def condense(self) -> None:
515
+ """Slide non-empty requests down into lower, empty indices.
516
+
517
+ Any consecutive empty indices at the very end of the list are not
518
+ filled.
519
+
520
+ Args:
521
+ empty_req_indices: empty indices which may be filled.
522
+
523
+ Returns:
524
+ swaps: list of (from,to) swap tuples for moved requests
525
+ empty_req_indices: indices not filled by condensation
526
+ """
527
+ num_reqs = self.num_reqs
528
+
529
+ if self.is_pooling_model:
530
+ # Will be contiguous in pooling case, just trim the lists.
531
+ del self._req_ids[num_reqs:]
532
+ del self.req_output_token_ids[num_reqs:]
533
+ return
534
+
535
+ if not (empty_req_indices := self.batch_update_builder.removed):
536
+ # All removed requests were replaced by added requests, or else no
537
+ # requests were removed at all. No condense() needed
538
+ return
539
+ if num_reqs == 0:
540
+ # The batched states are empty.
541
+ self._req_ids.clear()
542
+ self.req_output_token_ids.clear()
543
+ return
544
+
545
+ # NOTE(woosuk): This function assumes that the empty_req_indices
546
+ # is sorted in descending order.
547
+ last_req_index = num_reqs + len(empty_req_indices) - 1
548
+ while empty_req_indices:
549
+ # Find the largest non-empty index.
550
+ while last_req_index in empty_req_indices:
551
+ last_req_index -= 1
552
+
553
+ # Find the smallest empty index.
554
+ empty_index = self.batch_update_builder.peek_removed()
555
+ assert empty_index is not None
556
+ if empty_index >= last_req_index:
557
+ break
558
+
559
+ # Move active request down into empty request
560
+ # index.
561
+ self.batch_update_builder.pop_removed()
562
+ # Autoregressive models require detailed tracking of condense
563
+ # operations to support logitsprocs
564
+ self.batch_update_builder.moved.append(
565
+ (last_req_index, empty_index,
566
+ MoveDirectionality.UNIDIRECTIONAL))
567
+ req_id = self._req_ids[last_req_index]
568
+ output_token_ids = self.req_output_token_ids[last_req_index]
569
+ assert req_id is not None
570
+ self._req_ids[empty_index] = req_id
571
+ self._req_ids[last_req_index] = None
572
+ self.req_output_token_ids[empty_index] = output_token_ids
573
+ self.req_output_token_ids[last_req_index] = None
574
+ self.req_id_to_index[req_id] = empty_index
575
+
576
+ num_tokens = self.num_tokens[last_req_index]
577
+ self.token_ids_cpu[empty_index, :num_tokens] = self.token_ids_cpu[
578
+ last_req_index, :num_tokens]
579
+ self.num_tokens[empty_index] = num_tokens
580
+ self.num_tokens_no_spec[empty_index] = self.num_tokens_no_spec[
581
+ last_req_index]
582
+ self.num_prompt_tokens[empty_index] = self.num_prompt_tokens[
583
+ last_req_index]
584
+ self.num_computed_tokens_cpu[
585
+ empty_index] = self.num_computed_tokens_cpu[last_req_index]
586
+ self.block_table.move_row(last_req_index, empty_index)
587
+ self.temperature_cpu[empty_index] = self.temperature_cpu[
588
+ last_req_index]
589
+ self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index]
590
+ self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index]
591
+ self.frequency_penalties_cpu[
592
+ empty_index] = self.frequency_penalties_cpu[last_req_index]
593
+ self.presence_penalties_cpu[
594
+ empty_index] = self.presence_penalties_cpu[last_req_index]
595
+ self.repetition_penalties_cpu[
596
+ empty_index] = self.repetition_penalties_cpu[last_req_index]
597
+ generator = self.generators.pop(last_req_index, None)
598
+ if generator is not None:
599
+ self.generators[empty_index] = generator
600
+
601
+ self.request_lora_mapping[empty_index] = self.request_lora_mapping[
602
+ last_req_index]
603
+
604
+ # TODO convert these to LogitsProcessors
605
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
606
+ self.allowed_token_ids_mask_cpu_tensor[
607
+ empty_index] = self.allowed_token_ids_mask_cpu_tensor[
608
+ last_req_index]
609
+
610
+ bad_words_token_ids = self.bad_words_token_ids.pop(
611
+ last_req_index, None)
612
+ if bad_words_token_ids is not None:
613
+ self.bad_words_token_ids[empty_index] = bad_words_token_ids
614
+
615
+ # Decrement last_req_index since it is now empty.
616
+ last_req_index -= 1
617
+
618
+ # Trim lists to the batch size.
619
+ del self._req_ids[num_reqs:]
620
+ del self.req_output_token_ids[num_reqs:]
621
+
622
+ def refresh_metadata(self):
623
+ """Apply any batch updates to sampling metadata."""
624
+
625
+ if self.is_pooling_model:
626
+ # Batch changes every step for pooling models.
627
+ self.sampling_metadata = self._make_sampling_metadata()
628
+ return
629
+
630
+ # For non-pooling models - generate and apply logitsprocs update;
631
+ # reset batch update tracking.
632
+ # Update sampling metadata if batch state is changed.
633
+ batch_update = self.batch_update_builder.get_and_reset(self.num_reqs)
634
+ for logit_proc in self.logitsprocs.all:
635
+ logit_proc.update_state(batch_update)
636
+ if batch_update:
637
+ self.sampling_metadata = self._make_sampling_metadata()
638
+
639
+ def _make_sampling_metadata(self) -> SamplingMetadata:
640
+ num_reqs = self.num_reqs
641
+ if not self.all_greedy:
642
+ temperature = copy_slice(self.temperature_cpu_tensor,
643
+ self.temperature, num_reqs)
644
+ else:
645
+ temperature = None
646
+ if not self.no_top_p:
647
+ copy_slice(self.top_p_cpu_tensor, self.top_p, num_reqs)
648
+ if not self.no_top_k:
649
+ copy_slice(self.top_k_cpu_tensor, self.top_k, num_reqs)
650
+
651
+ if not self.no_penalties:
652
+ # Since syncing these tensors is expensive only copy them
653
+ # if necessary i.e. if there are requests which require
654
+ # penalties to be applied during sampling.
655
+ copy_slice(self.frequency_penalties_cpu_tensor,
656
+ self.frequency_penalties, num_reqs)
657
+ copy_slice(self.presence_penalties_cpu_tensor,
658
+ self.presence_penalties, num_reqs)
659
+ copy_slice(self.repetition_penalties_cpu_tensor,
660
+ self.repetition_penalties, num_reqs)
661
+
662
+ needs_prompt_token_ids = (
663
+ not self.no_penalties
664
+ or self.logits_processing_needs_token_ids[:num_reqs].any())
665
+ if needs_prompt_token_ids:
666
+ # The prompt tokens are used only for applying penalties or
667
+ # step pooling during the sampling/pooling process.
668
+ # Hence copy these tensors only when there are requests which
669
+ # need penalties/step_pooler to be applied.
670
+ prompt_token_ids = self._make_prompt_token_ids_tensor()
671
+ else:
672
+ prompt_token_ids = None
673
+
674
+ allowed_token_ids_mask: Optional[torch.Tensor] = None
675
+ if not self.no_allowed_token_ids:
676
+ assert self.allowed_token_ids_mask is not None
677
+ copy_slice(self.allowed_token_ids_mask_cpu_tensor,
678
+ self.allowed_token_ids_mask, num_reqs)
679
+ allowed_token_ids_mask = self.allowed_token_ids_mask[:num_reqs]
680
+
681
+ return SamplingMetadata(
682
+ temperature=temperature,
683
+ all_greedy=self.all_greedy,
684
+ all_random=self.all_random,
685
+ top_p=None if self.no_top_p else self.top_p[:num_reqs],
686
+ top_k=None if self.no_top_k else self.top_k[:num_reqs],
687
+ generators=self.generators,
688
+ max_num_logprobs=self.max_num_logprobs,
689
+ prompt_token_ids=prompt_token_ids,
690
+ frequency_penalties=self.frequency_penalties[:num_reqs],
691
+ presence_penalties=self.presence_penalties[:num_reqs],
692
+ repetition_penalties=self.repetition_penalties[:num_reqs],
693
+ output_token_ids=cast(list[list[int]], self.req_output_token_ids),
694
+ no_penalties=self.no_penalties,
695
+ allowed_token_ids_mask=allowed_token_ids_mask,
696
+ bad_words_token_ids=self.bad_words_token_ids,
697
+ logitsprocs=self.logitsprocs,
698
+ )
699
+
700
+ @property
701
+ def pooling_metadata(self) -> PoolingMetadata:
702
+ if len(self.pooling_params) == 0:
703
+ pooling_params = []
704
+ else:
705
+ # Note, for now this assumes that all request in the batch
706
+ # are either sampling or pooling requests
707
+ assert len(self.req_ids) == len(self.pooling_params)
708
+ pooling_params = [
709
+ self.pooling_params[req_id] for req_id in self.req_ids
710
+ ]
711
+
712
+ return PoolingMetadata(
713
+ prompt_lens=torch.from_numpy(
714
+ self.num_prompt_tokens[:self.num_reqs]).to(self.device),
715
+ prompt_token_ids=self.sampling_metadata.prompt_token_ids,
716
+ pooling_params=pooling_params,
717
+ )
718
+
719
+ def _make_prompt_token_ids_tensor(self) -> torch.Tensor:
720
+ max_prompt_len = self.num_prompt_tokens[:self.num_reqs].max()
721
+ prompt_token_ids_cpu_tensor = torch.empty(
722
+ (self.num_reqs, max_prompt_len),
723
+ device="cpu",
724
+ dtype=torch.int64,
725
+ pin_memory=self.pin_memory,
726
+ )
727
+ prompt_token_ids = prompt_token_ids_cpu_tensor.numpy()
728
+ prompt_token_ids[:] = self.token_ids_cpu[:self.
729
+ num_reqs, :max_prompt_len]
730
+ # Use the value of vocab_size as a pad since we don't have a
731
+ # token_id of this value.
732
+ for i in range(self.num_reqs):
733
+ prompt_token_ids[i, self.num_prompt_tokens[i]:] = self.vocab_size
734
+ return prompt_token_ids_cpu_tensor.to(device=self.device,
735
+ non_blocking=True)
736
+
737
+ def make_lora_inputs(
738
+ self, num_scheduled_tokens: np.ndarray
739
+ ) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]:
740
+ """
741
+ Given the num_scheduled_tokens for each request in the batch, return
742
+ datastructures used to activate the current LoRAs.
743
+ Returns:
744
+ 1. prompt_lora_mapping: A tuple of size self.num_reqs where,
745
+ prompt_lora_mapping[i] is the LoRA id to use for the ith prompt.
746
+ 2. token_lora_mapping: A tuple of size np.sum(num_scheduled_tokens)
747
+ where, token_lora_mapping[i] is the LoRA id to use for ith token.
748
+ 3. lora_requests: Set of relevant LoRA requests.
749
+ """
750
+
751
+ req_lora_mapping = self.request_lora_mapping[:self.num_reqs]
752
+ prompt_lora_mapping = tuple(req_lora_mapping)
753
+ token_lora_mapping = tuple(
754
+ req_lora_mapping.repeat(num_scheduled_tokens))
755
+ active_lora_requests: set[LoRARequest] = set(
756
+ self.lora_id_to_lora_request.values())
757
+
758
+ return prompt_lora_mapping, token_lora_mapping, active_lora_requests
759
+
760
+ @property
761
+ def num_reqs(self) -> int:
762
+ return len(self.req_id_to_index)
763
+
764
+ @property
765
+ def all_greedy(self) -> bool:
766
+ return len(self.random_reqs) == 0
767
+
768
+ @property
769
+ def all_random(self) -> bool:
770
+ return len(self.greedy_reqs) == 0
771
+
772
+ @property
773
+ def no_top_p(self) -> bool:
774
+ return len(self.top_p_reqs) == 0
775
+
776
+ @property
777
+ def no_top_k(self) -> bool:
778
+ return len(self.top_k_reqs) == 0
779
+
780
+ @property
781
+ def no_penalties(self) -> bool:
782
+ return (len(self.presence_penalties_reqs) == 0
783
+ and len(self.frequency_penalties_reqs) == 0
784
+ and len(self.repetition_penalties_reqs) == 0)
785
+
786
+ @property
787
+ def max_num_logprobs(self) -> Optional[int]:
788
+ return max(self.num_logprobs.values()) if self.num_logprobs else None
789
+
790
+ @property
791
+ def no_prompt_logprob(self) -> bool:
792
+ return not self.num_prompt_logprobs
793
+
794
+ @property
795
+ def no_allowed_token_ids(self) -> bool:
796
+ return len(self.has_allowed_token_ids) == 0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_model_runner.py ADDED
The diff for this file is too large to render. See raw diff
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/gpu_worker.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """A GPU worker class."""
4
+ import copy
5
+ import gc
6
+ import os
7
+ from contextlib import AbstractContextManager, nullcontext
8
+ from typing import TYPE_CHECKING, Any, Optional
9
+
10
+ import torch
11
+ import torch.distributed
12
+ import torch.nn as nn
13
+
14
+ import vllm.envs as envs
15
+ from vllm.config import VllmConfig
16
+ from vllm.distributed import (ensure_model_parallel_initialized,
17
+ init_distributed_environment,
18
+ set_custom_all_reduce)
19
+ from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized
20
+ from vllm.distributed.parallel_state import get_pp_group, get_tp_group
21
+ from vllm.logger import init_logger
22
+ from vllm.lora.request import LoRARequest
23
+ from vllm.model_executor import set_random_seed
24
+ from vllm.model_executor.warmup.kernel_warmup import kernel_warmup
25
+ from vllm.platforms import current_platform
26
+ from vllm.sequence import IntermediateTensors
27
+ from vllm.tasks import SupportedTask
28
+ from vllm.utils import GiB_bytes, MemorySnapshot, memory_profiling
29
+ from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
30
+ from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec
31
+ from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, ModelRunnerOutput
32
+ from vllm.v1.utils import report_usage_stats
33
+ from vllm.v1.worker.gpu_model_runner import GPUModelRunner
34
+ from vllm.v1.worker.worker_base import WorkerBase
35
+
36
+ logger = init_logger(__name__)
37
+
38
+ if TYPE_CHECKING:
39
+ from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
40
+ from vllm.v1.core.sched.output import SchedulerOutput
41
+
42
+
43
+ class Worker(WorkerBase):
44
+
45
+ def __init__(
46
+ self,
47
+ vllm_config: VllmConfig,
48
+ local_rank: int,
49
+ rank: int,
50
+ distributed_init_method: str,
51
+ is_driver_worker: bool = False,
52
+ ):
53
+
54
+ super().__init__(vllm_config=vllm_config,
55
+ local_rank=local_rank,
56
+ rank=rank,
57
+ distributed_init_method=distributed_init_method,
58
+ is_driver_worker=is_driver_worker)
59
+
60
+ if self.model_config.trust_remote_code:
61
+ # note: lazy import to avoid importing torch before initializing
62
+ from vllm.utils import init_cached_hf_modules
63
+ init_cached_hf_modules()
64
+
65
+ # Buffers saved before sleep
66
+ self._sleep_saved_buffers: dict[str, torch.Tensor] = {}
67
+
68
+ # Torch profiler. Enabled and configured through env vars:
69
+ # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace
70
+ if envs.VLLM_TORCH_PROFILER_DIR:
71
+ torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR
72
+ logger.info("Profiling enabled. Traces will be saved to: %s",
73
+ torch_profiler_trace_dir)
74
+ logger.debug(
75
+ "Profiler config: record_shapes=%s,"
76
+ "profile_memory=%s,with_stack=%s,with_flops=%s",
77
+ envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
78
+ envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
79
+ envs.VLLM_TORCH_PROFILER_WITH_STACK,
80
+ envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
81
+ )
82
+ self.profiler = torch.profiler.profile(
83
+ activities=[
84
+ torch.profiler.ProfilerActivity.CPU,
85
+ torch.profiler.ProfilerActivity.CUDA,
86
+ ],
87
+ record_shapes=envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
88
+ profile_memory=envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
89
+ with_stack=envs.VLLM_TORCH_PROFILER_WITH_STACK,
90
+ with_flops=envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
91
+ on_trace_ready=torch.profiler.tensorboard_trace_handler(
92
+ torch_profiler_trace_dir, use_gzip=True))
93
+ else:
94
+ self.profiler = None
95
+
96
+ def sleep(self, level: int = 1) -> None:
97
+ from vllm.device_allocator.cumem import CuMemAllocator
98
+
99
+ free_bytes_before_sleep = torch.cuda.mem_get_info()[0]
100
+
101
+ # Save the buffers before level 2 sleep
102
+ if level == 2:
103
+ model = self.model_runner.model
104
+ self._sleep_saved_buffers = {
105
+ name: buffer.cpu().clone()
106
+ for name, buffer in model.named_buffers()
107
+ }
108
+
109
+ allocator = CuMemAllocator.get_instance()
110
+ allocator.sleep(offload_tags=("weights", ) if level == 1 else tuple())
111
+ free_bytes_after_sleep, total = torch.cuda.mem_get_info()
112
+ freed_bytes = free_bytes_after_sleep - free_bytes_before_sleep
113
+ used_bytes = total - free_bytes_after_sleep
114
+ assert freed_bytes >= 0, "Memory usage increased after sleeping."
115
+ logger.info(
116
+ "Sleep mode freed %.2f GiB memory, "
117
+ "%.2f GiB memory is still in use.", freed_bytes / GiB_bytes,
118
+ used_bytes / GiB_bytes)
119
+
120
+ def wake_up(self, tags: Optional[list[str]] = None) -> None:
121
+ from vllm.device_allocator.cumem import CuMemAllocator
122
+
123
+ allocator = CuMemAllocator.get_instance()
124
+ allocator.wake_up(tags)
125
+
126
+ # Restore the buffers after level 2 sleep
127
+ if len(self._sleep_saved_buffers):
128
+ model = self.model_runner.model
129
+ for name, buffer in model.named_buffers():
130
+ if name in self._sleep_saved_buffers:
131
+ buffer.data.copy_(self._sleep_saved_buffers[name].data)
132
+ self._sleep_saved_buffers = {}
133
+
134
+ def _maybe_get_memory_pool_context(self,
135
+ tag: str) -> AbstractContextManager:
136
+ if self.vllm_config.model_config.enable_sleep_mode:
137
+ from vllm.device_allocator.cumem import CuMemAllocator
138
+
139
+ allocator = CuMemAllocator.get_instance()
140
+ if tag == "weights":
141
+ assert allocator.get_current_usage() == 0, (
142
+ "Sleep mode can only be "
143
+ "used for one instance per process.")
144
+ context = allocator.use_memory_pool(tag=tag)
145
+ else:
146
+ context = nullcontext()
147
+ return context
148
+
149
+ def initialize_cache(self, num_gpu_blocks: int,
150
+ num_cpu_blocks: int) -> None:
151
+ self.cache_config.num_gpu_blocks = num_gpu_blocks
152
+ self.cache_config.num_cpu_blocks = num_cpu_blocks
153
+
154
+ def init_device(self):
155
+ if self.device_config.device.type == "cuda":
156
+ # torch.distributed.all_reduce does not free the input tensor until
157
+ # the synchronization point. This causes the memory usage to grow
158
+ # as the number of all_reduce calls increases. This env var disables
159
+ # this behavior.
160
+ # Related issue:
161
+ # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
162
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
163
+
164
+ # This env var set by Ray causes exceptions with graph building.
165
+ os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
166
+ self.device = torch.device(f"cuda:{self.local_rank}")
167
+ current_platform.set_device(self.device)
168
+
169
+ _check_if_gpu_supports_dtype(self.model_config.dtype)
170
+ gc.collect()
171
+ torch.cuda.empty_cache()
172
+
173
+ # take current memory snapshot
174
+ self.init_snapshot = MemorySnapshot()
175
+ self.requested_memory = (self.init_snapshot.total_memory *
176
+ self.cache_config.gpu_memory_utilization)
177
+ if self.init_snapshot.free_memory < self.requested_memory:
178
+ GiB = lambda b: round(b / GiB_bytes, 2)
179
+ raise ValueError(
180
+ f"Free memory on device "
181
+ f"({GiB(self.init_snapshot.free_memory)}/"
182
+ f"{GiB(self.init_snapshot.total_memory)} GiB) on startup "
183
+ f"is less than desired GPU memory utilization "
184
+ f"({self.cache_config.gpu_memory_utilization}, "
185
+ f"{GiB(self.requested_memory)} GiB). Decrease GPU memory "
186
+ f"utilization or reduce GPU memory used by other processes."
187
+ )
188
+ else:
189
+ raise RuntimeError(
190
+ f"Not support device type: {self.device_config.device}")
191
+ # Initialize the distributed environment.
192
+ init_worker_distributed_environment(self.vllm_config, self.rank,
193
+ self.distributed_init_method,
194
+ self.local_rank,
195
+ current_platform.dist_backend)
196
+ # Set random seed.
197
+ set_random_seed(self.model_config.seed)
198
+
199
+ # Construct the model runner
200
+ self.model_runner: GPUModelRunner = GPUModelRunner(
201
+ self.vllm_config, self.device)
202
+
203
+ if self.rank == 0:
204
+ # If usage stat is enabled, collect relevant info.
205
+ report_usage_stats(self.vllm_config)
206
+
207
+ # FIXME(youkaichao & ywang96): Use TorchDispatchMode instead of memory pool
208
+ # to hijack tensor allocation.
209
+ def load_model(self) -> None:
210
+ eep_scale_up = os.environ.get("VLLM_ELASTIC_EP_SCALE_UP_LAUNCH") == "1"
211
+ with self._maybe_get_memory_pool_context(tag="weights"):
212
+ self.model_runner.load_model(eep_scale_up=eep_scale_up)
213
+
214
+ def update_config(self, overrides: dict[str, Any]) -> None:
215
+ self.model_runner.update_config(overrides)
216
+
217
+ def reload_weights(self) -> None:
218
+ with self._maybe_get_memory_pool_context(tag="weights"):
219
+ self.model_runner.reload_weights()
220
+
221
+ @torch.inference_mode()
222
+ def determine_available_memory(self) -> int:
223
+ """Profiles the peak memory usage of the model to determine how much
224
+ memory can be used for KV cache without OOMs.
225
+
226
+ The engine will first conduct a profiling of the existing memory usage.
227
+ Then, it calculate the free memory that can be used for KV cache in
228
+ bytes.
229
+
230
+ Tip:
231
+ You may limit the usage of GPU memory
232
+ by adjusting the `gpu_memory_utilization` parameter.
233
+ """
234
+ torch.cuda.empty_cache()
235
+ torch.cuda.reset_peak_memory_stats()
236
+ GiB = lambda b: b / GiB_bytes
237
+
238
+ # Execute a forward pass with dummy inputs to profile the memory usage
239
+ # of the model.
240
+ with memory_profiling(
241
+ self.init_snapshot,
242
+ weights_memory=int(
243
+ self.model_runner.model_memory_usage)) as profile_result:
244
+ self.model_runner.profile_run()
245
+
246
+ free_gpu_memory = profile_result.after_profile.free_memory
247
+ # NOTE(woosuk): Here we assume that the other processes using the same
248
+ # GPU did not change their memory usage during the profiling.
249
+ assert self.init_snapshot.free_memory > free_gpu_memory, (
250
+ "Error in memory profiling. "
251
+ f"Initial free memory {GiB(self.init_snapshot.free_memory)} GiB, "
252
+ f"current free memory {GiB(free_gpu_memory)} GiB. "
253
+ "This happens when other processes sharing the same container "
254
+ "release GPU memory while vLLM is profiling during initialization. "
255
+ "To fix this, ensure consistent GPU memory allocation or "
256
+ "isolate vLLM in its own container.")
257
+ available_kv_cache_memory = self.requested_memory \
258
+ - profile_result.non_kv_cache_memory
259
+
260
+ unrequested_memory = self.init_snapshot.free_memory \
261
+ - self.requested_memory
262
+ logger.debug(
263
+ "Initial free memory: %.2f GiB; "
264
+ "Requested memory: %.2f (util), %.2f GiB",
265
+ GiB(self.init_snapshot.free_memory),
266
+ self.cache_config.gpu_memory_utilization,
267
+ GiB(self.requested_memory),
268
+ )
269
+ logger.debug(
270
+ "Free memory after profiling: %.2f GiB (total), "
271
+ "%.2f GiB (within requested)",
272
+ GiB(free_gpu_memory),
273
+ GiB(free_gpu_memory - unrequested_memory),
274
+ )
275
+ logger.debug(profile_result)
276
+ logger.info("Available KV cache memory: %.2f GiB",
277
+ GiB(available_kv_cache_memory))
278
+ gc.collect()
279
+
280
+ return int(available_kv_cache_memory)
281
+
282
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
283
+ return self.model_runner.get_kv_cache_spec()
284
+
285
+ def initialize_from_config(self, kv_cache_config: KVCacheConfig) -> None:
286
+ """Allocate GPU KV cache with the specified kv_cache_config."""
287
+
288
+ if self.vllm_config.model_config.enable_sleep_mode:
289
+ from vllm.device_allocator.cumem import CuMemAllocator
290
+
291
+ allocator = CuMemAllocator.get_instance()
292
+ context = allocator.use_memory_pool(tag="kv_cache")
293
+ else:
294
+ from contextlib import nullcontext
295
+ context = nullcontext()
296
+ with context:
297
+ self.model_runner.initialize_kv_cache(kv_cache_config)
298
+
299
+ def compile_or_warm_up_model(self) -> None:
300
+ # warm up sizes that are not in cudagraph capture sizes,
301
+ # but users still want to compile for better performance,
302
+ # e.g. for the max-num-batched token size in chunked prefill.
303
+ warmup_sizes = self.vllm_config.compilation_config.compile_sizes.copy()
304
+ if not self.model_config.enforce_eager:
305
+ warmup_sizes = [
306
+ x for x in warmup_sizes if x not in
307
+ self.vllm_config.compilation_config.cudagraph_capture_sizes
308
+ ]
309
+ # We skip EPLB here since we don't want to record dummy metrics
310
+ for size in sorted(warmup_sizes, reverse=True):
311
+ logger.info("Compile and warming up model for size %d", size)
312
+ self.model_runner._dummy_run(size, skip_eplb=True)
313
+
314
+ if not self.model_config.enforce_eager:
315
+ self.model_runner.capture_model()
316
+
317
+ # Warm up sampler and preallocate memory buffer for logits and other
318
+ # sampling related tensors of max possible shape to avoid memory
319
+ # fragmentation issue.
320
+ # NOTE: This is called after `capture_model` on purpose to prevent
321
+ # memory buffers from being cleared by `torch.cuda.empty_cache`.
322
+ if get_pp_group().is_last_rank:
323
+ max_num_reqs = min(self.scheduler_config.max_num_seqs,
324
+ self.scheduler_config.max_num_batched_tokens)
325
+
326
+ # We skip EPLB here since we don't want to record dummy metrics
327
+ hidden_states, last_hidden_states = \
328
+ self.model_runner._dummy_run(
329
+ num_tokens=max_num_reqs,
330
+ skip_eplb=True,
331
+ )
332
+ if self.model_runner.is_pooling_model:
333
+ self.model_runner._dummy_pooler_run(hidden_states)
334
+ else:
335
+ self.model_runner._dummy_sampler_run(
336
+ hidden_states=last_hidden_states)
337
+
338
+ # Warmup kernels used during model execution
339
+ kernel_warmup(self)
340
+
341
+ # Reset the seed to ensure that the random state is not affected by
342
+ # the model initialization and profiling.
343
+ set_random_seed(self.model_config.seed)
344
+
345
+ def get_model(self) -> nn.Module:
346
+ return self.model_runner.get_model()
347
+
348
+ def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
349
+ return self.model_runner.get_supported_tasks()
350
+
351
+ @torch.inference_mode()
352
+ def execute_model(
353
+ self,
354
+ scheduler_output: "SchedulerOutput",
355
+ ) -> Optional[ModelRunnerOutput]:
356
+ intermediate_tensors = None
357
+ if not get_pp_group().is_first_rank:
358
+ intermediate_tensors = IntermediateTensors(
359
+ get_pp_group().recv_tensor_dict(
360
+ all_gather_group=get_tp_group()))
361
+
362
+ output = self.model_runner.execute_model(scheduler_output,
363
+ intermediate_tensors)
364
+
365
+ parallel_config = self.vllm_config.parallel_config
366
+ if parallel_config.distributed_executor_backend != "external_launcher" \
367
+ and not get_pp_group().is_last_rank:
368
+ assert isinstance(output, IntermediateTensors)
369
+ get_pp_group().send_tensor_dict(output.tensors,
370
+ all_gather_group=get_tp_group())
371
+
372
+ kv_connector_output = output.kv_connector_output
373
+ if not kv_connector_output:
374
+ return None
375
+
376
+ # In case of PP with kv transfer, we need to pass through the
377
+ # kv_connector_output
378
+ if (not kv_connector_output.finished_sending
379
+ and not kv_connector_output.finished_recving):
380
+ return EMPTY_MODEL_RUNNER_OUTPUT
381
+
382
+ output = copy.copy(EMPTY_MODEL_RUNNER_OUTPUT)
383
+ output.kv_connector_output = kv_connector_output
384
+ return output
385
+
386
+ assert isinstance(output, ModelRunnerOutput)
387
+ return output
388
+
389
+ def profile(self, is_start: bool = True):
390
+ if self.profiler is None:
391
+ raise RuntimeError("Profiler is not enabled.")
392
+ if is_start:
393
+ self.profiler.start()
394
+ else:
395
+ self.profiler.stop()
396
+ print(self.profiler.key_averages().table(
397
+ sort_by="self_cuda_time_total"))
398
+
399
+ def execute_dummy_batch(self) -> None:
400
+ self.model_runner._dummy_run(1)
401
+
402
+ def add_lora(self, lora_request: LoRARequest) -> bool:
403
+ return self.model_runner.add_lora(lora_request)
404
+
405
+ def remove_lora(self, lora_id: int) -> bool:
406
+ return self.model_runner.remove_lora(lora_id)
407
+
408
+ def list_loras(self) -> set[int]:
409
+ return self.model_runner.list_loras()
410
+
411
+ def pin_lora(self, lora_id: int) -> bool:
412
+ return self.model_runner.pin_lora(lora_id)
413
+
414
+ def check_health(self) -> None:
415
+ # worker will always be healthy as long as it's running.
416
+ return
417
+
418
+ def _eplb_before_scale_down(self, old_ep_size: int,
419
+ new_ep_size: int) -> None:
420
+ from vllm.distributed.parallel_state import get_ep_group
421
+ if get_ep_group().rank == 0:
422
+ logger.info("[Elastic EP] Starting expert resharding "
423
+ "before scaling down...")
424
+ rank_mapping = {
425
+ old_ep_rank: old_ep_rank if old_ep_rank < new_ep_size else -1
426
+ for old_ep_rank in range(old_ep_size)
427
+ }
428
+ assert self.model_runner.eplb_state is not None
429
+ self.model_runner.eplb_state.rearrange(self.model_runner.model,
430
+ execute_shuffle=True,
431
+ global_expert_load=None,
432
+ rank_mapping=rank_mapping)
433
+ torch.cuda.synchronize()
434
+ if get_ep_group().rank == 0:
435
+ logger.info("[Elastic EP] Expert resharding completed!")
436
+
437
+ def _eplb_after_scale_up(
438
+ self, old_ep_size: int, new_ep_size: int,
439
+ global_expert_load: Optional[torch.Tensor]) -> None:
440
+ from vllm.distributed.parallel_state import get_ep_group
441
+ if get_ep_group().rank == 0:
442
+ logger.info("[Elastic EP] Starting expert resharding "
443
+ "after scaling up...")
444
+ rank_mapping = {
445
+ old_ep_rank: old_ep_rank
446
+ for old_ep_rank in range(old_ep_size)
447
+ }
448
+ assert self.model_runner.eplb_state is not None
449
+ self.model_runner.eplb_state.rearrange(
450
+ self.model_runner.model,
451
+ execute_shuffle=True,
452
+ global_expert_load=global_expert_load,
453
+ rank_mapping=rank_mapping)
454
+ if get_ep_group().rank == 0:
455
+ logger.info("[Elastic EP] Expert resharding completed!")
456
+
457
+ def _reconfigure_parallel_config(
458
+ self, reconfig_request: ReconfigureDistributedRequest) -> None:
459
+ """
460
+ Update parallel config with provided reconfig_request
461
+ """
462
+ parallel_config = self.vllm_config.parallel_config
463
+ parallel_config.data_parallel_size = \
464
+ reconfig_request.new_data_parallel_size
465
+ if reconfig_request.new_data_parallel_rank != \
466
+ ReconfigureRankType.KEEP_CURRENT_RANK:
467
+ parallel_config.data_parallel_rank = \
468
+ reconfig_request.new_data_parallel_rank
469
+ if reconfig_request.new_data_parallel_rank_local != \
470
+ ReconfigureRankType.KEEP_CURRENT_RANK:
471
+ parallel_config.data_parallel_rank_local = \
472
+ reconfig_request.new_data_parallel_rank_local
473
+ parallel_config.data_parallel_master_ip = \
474
+ reconfig_request.new_data_parallel_master_ip
475
+ parallel_config.data_parallel_master_port = \
476
+ reconfig_request.new_data_parallel_master_port
477
+
478
+ def _reconfigure_moe(self, old_ep_size: int,
479
+ new_ep_size: int) -> Optional[torch.Tensor]:
480
+ """
481
+ Reconfigure MoE modules with provided reconfig_request
482
+
483
+ Return the global expert load if new_ep_size > old_ep_size,
484
+ otherwise None
485
+ """
486
+ from vllm.distributed.parallel_state import (
487
+ get_dp_group, get_ep_group, prepare_communication_buffer_for_model)
488
+ from vllm.model_executor.layers.fused_moe.layer import (
489
+ FusedMoEParallelConfig)
490
+
491
+ parallel_config = self.vllm_config.parallel_config
492
+ moe_modules = [
493
+ module for module in self.model_runner.model.modules()
494
+ if module.__class__.__name__ == "FusedMoE"
495
+ ]
496
+ num_local_experts = moe_modules[0].moe_config.num_local_experts
497
+ assert all(module.moe_config.num_local_experts == num_local_experts
498
+ for module in moe_modules), (
499
+ "All MoE modules must have the same number of experts")
500
+ for module in moe_modules:
501
+ module.moe_config.num_experts = num_local_experts * new_ep_size
502
+ module.global_num_experts = module.moe_config.num_experts
503
+ module.moe_parallel_config = FusedMoEParallelConfig.make(
504
+ tp_size_=get_tp_group().world_size,
505
+ dp_size_=get_dp_group().world_size,
506
+ vllm_parallel_config=parallel_config,
507
+ )
508
+ module.moe_config.moe_parallel_config = module.moe_parallel_config
509
+ if new_ep_size < old_ep_size:
510
+ num_local_physical_experts = num_local_experts
511
+ assert self.model_runner.eplb_state is not None
512
+ new_physical_experts = \
513
+ self.model_runner.eplb_state.physical_to_logical_map.shape[1]
514
+ parallel_config.num_redundant_experts = (
515
+ new_physical_experts -
516
+ self.model_runner.eplb_state.logical_replica_count.shape[1])
517
+ global_expert_load = None
518
+ else:
519
+ num_local_physical_experts = torch.tensor([num_local_experts],
520
+ dtype=torch.int32,
521
+ device="cpu")
522
+ torch.distributed.broadcast(num_local_physical_experts,
523
+ group=get_ep_group().cpu_group,
524
+ group_src=0)
525
+ num_local_physical_experts = num_local_physical_experts.item()
526
+ new_physical_experts = num_local_physical_experts * new_ep_size
527
+ assert self.model_runner.eplb_state is not None
528
+ global_expert_load = self.model_runner.eplb_state.rearrange(
529
+ self.model_runner.model, execute_shuffle=False)
530
+ parallel_config.num_redundant_experts = (
531
+ new_physical_experts - global_expert_load.shape[1])
532
+ prepare_communication_buffer_for_model(self.model_runner.model)
533
+ self.model_runner.model.update_physical_experts_metadata(
534
+ num_physical_experts=new_physical_experts,
535
+ num_local_physical_experts=num_local_physical_experts)
536
+ return global_expert_load
537
+
538
+ def reinitialize_distributed(
539
+ self, reconfig_request: ReconfigureDistributedRequest) -> None:
540
+ from vllm.config import set_current_vllm_config
541
+ from vllm.distributed.parallel_state import (
542
+ cleanup_dist_env_and_memory, get_ep_group)
543
+
544
+ old_ep_size = get_ep_group().world_size
545
+ old_ep_rank = get_ep_group().rank
546
+ new_ep_size = reconfig_request.new_data_parallel_size * get_tp_group(
547
+ ).world_size * get_pp_group().world_size
548
+ if new_ep_size < old_ep_size:
549
+ self._eplb_before_scale_down(old_ep_size, new_ep_size)
550
+
551
+ cleanup_dist_env_and_memory()
552
+
553
+ if reconfig_request.new_data_parallel_rank == \
554
+ ReconfigureRankType.SHUTDOWN_CURRENT_RANK:
555
+ assert old_ep_rank >= new_ep_size
556
+ # shutdown
557
+ return
558
+
559
+ self._reconfigure_parallel_config(reconfig_request)
560
+
561
+ with set_current_vllm_config(self.vllm_config):
562
+ init_worker_distributed_environment(self.vllm_config, self.rank,
563
+ self.distributed_init_method,
564
+ self.local_rank)
565
+
566
+ global_expert_load = self._reconfigure_moe(old_ep_size, new_ep_size)
567
+
568
+ if new_ep_size > old_ep_size:
569
+ assert global_expert_load is not None
570
+ self._eplb_after_scale_up(old_ep_size, new_ep_size,
571
+ global_expert_load)
572
+
573
+ def save_sharded_state(
574
+ self,
575
+ path: str,
576
+ pattern: Optional[str] = None,
577
+ max_size: Optional[int] = None,
578
+ ) -> None:
579
+ from vllm.model_executor.model_loader import ShardedStateLoader
580
+ ShardedStateLoader.save_model(
581
+ self.model_runner.model,
582
+ path,
583
+ pattern=pattern,
584
+ max_size=max_size,
585
+ )
586
+
587
+ def save_tensorized_model(
588
+ self,
589
+ tensorizer_config: "TensorizerConfig",
590
+ ) -> None:
591
+ self.model_runner.save_tensorized_model(
592
+ tensorizer_config=tensorizer_config, )
593
+
594
+
595
+ def init_worker_distributed_environment(
596
+ vllm_config: VllmConfig,
597
+ rank: int,
598
+ distributed_init_method: Optional[str] = None,
599
+ local_rank: int = -1,
600
+ backend: str = "nccl",
601
+ ) -> None:
602
+ """Initialize the distributed environment."""
603
+ parallel_config = vllm_config.parallel_config
604
+ set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)
605
+
606
+ init_distributed_environment(parallel_config.world_size, rank,
607
+ distributed_init_method, local_rank, backend)
608
+
609
+ ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
610
+ parallel_config.pipeline_parallel_size)
611
+
612
+ ensure_kv_transfer_initialized(vllm_config)
613
+
614
+
615
+ def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
616
+ # Check if the GPU supports the dtype.
617
+ if torch_dtype == torch.bfloat16: # noqa: SIM102
618
+ if not current_platform.has_device_capability(80):
619
+ capability = current_platform.get_device_capability()
620
+ gpu_name = current_platform.get_device_name()
621
+
622
+ if capability is None:
623
+ compute_str = "does not have a compute capability"
624
+ else:
625
+ version_str = capability.as_version_str()
626
+ compute_str = f"has compute capability {version_str}"
627
+
628
+ raise ValueError(
629
+ "Bfloat16 is only supported on GPUs with compute capability "
630
+ f"of at least 8.0. Your {gpu_name} GPU {compute_str}. "
631
+ "You can use float16 instead by explicitly setting the "
632
+ "`dtype` flag in CLI, for example: --dtype=half.")
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/kv_connector_model_runner_mixin.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """
4
+ Define KV connector functionality mixin for model runners.
5
+ """
6
+ import copy
7
+ from contextlib import AbstractContextManager, contextmanager, nullcontext
8
+ from typing import Generator # noqa: UP035
9
+ from typing import TYPE_CHECKING, Optional
10
+
11
+ from vllm.config import VllmConfig
12
+ from vllm.distributed.kv_transfer import (get_kv_transfer_group,
13
+ has_kv_transfer_group)
14
+ from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase
15
+ from vllm.forward_context import get_forward_context, set_forward_context
16
+ from vllm.logger import init_logger
17
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput,
18
+ ModelRunnerOutput)
19
+
20
+ if TYPE_CHECKING:
21
+ from vllm.v1.core.sched.output import SchedulerOutput
22
+
23
+ logger = init_logger(__name__)
24
+
25
+
26
+ # Defined as a kv connector functionality mixin for ModelRunner (GPU, TPU)
27
+ class KVConnectorModelRunnerMixin:
28
+
29
+ @staticmethod
30
+ def maybe_setup_kv_connector(scheduler_output: "SchedulerOutput"):
31
+ # Update KVConnector with the KVConnector metadata forward().
32
+ if has_kv_transfer_group():
33
+ kv_connector = get_kv_transfer_group()
34
+ assert isinstance(kv_connector, KVConnectorBase)
35
+ assert scheduler_output.kv_connector_metadata is not None
36
+ kv_connector.bind_connector_metadata(
37
+ scheduler_output.kv_connector_metadata)
38
+
39
+ # Background KV cache transfers happen here.
40
+ # These transfers are designed to be async and the requests
41
+ # involved may be disjoint from the running requests.
42
+ # Do this here to save a collective_rpc.
43
+ kv_connector.start_load_kv(get_forward_context())
44
+
45
+ @staticmethod
46
+ def maybe_wait_for_kv_save() -> None:
47
+ if has_kv_transfer_group():
48
+ get_kv_transfer_group().wait_for_save()
49
+
50
+ @staticmethod
51
+ def get_finished_kv_transfers(
52
+ scheduler_output: "SchedulerOutput",
53
+ ) -> tuple[Optional[set[str]], Optional[set[str]]]:
54
+ if has_kv_transfer_group():
55
+ return get_kv_transfer_group().get_finished(
56
+ scheduler_output.finished_req_ids)
57
+ return None, None
58
+
59
+ @staticmethod
60
+ def kv_connector_no_forward(scheduler_output: "SchedulerOutput",
61
+ vllm_config: VllmConfig) -> ModelRunnerOutput:
62
+ # KV send/recv even if no work to do.
63
+ with set_forward_context(
64
+ None, vllm_config
65
+ ), KVConnectorModelRunnerMixin._get_kv_connector_output(
66
+ scheduler_output, wait_for_save=False) as kv_connector_output:
67
+ pass
68
+
69
+ if (not kv_connector_output.finished_sending
70
+ and not kv_connector_output.finished_recving):
71
+ return EMPTY_MODEL_RUNNER_OUTPUT
72
+
73
+ output = copy.copy(EMPTY_MODEL_RUNNER_OUTPUT)
74
+ output.kv_connector_output = kv_connector_output
75
+ return output
76
+
77
+ @staticmethod
78
+ def maybe_get_kv_connector_output(
79
+ scheduler_output: "SchedulerOutput"
80
+ ) -> AbstractContextManager[Optional[KVConnectorOutput]]:
81
+ return KVConnectorModelRunnerMixin._get_kv_connector_output(
82
+ scheduler_output) if has_kv_transfer_group() else nullcontext()
83
+
84
+ # This context manager must be used within an active forward context.
85
+ # It encapsulates the entire KV conector lifecycle within execute_model
86
+ @staticmethod
87
+ @contextmanager
88
+ def _get_kv_connector_output(
89
+ scheduler_output: "SchedulerOutput",
90
+ wait_for_save: bool = True
91
+ ) -> Generator[KVConnectorOutput, None, None]:
92
+ output = KVConnectorOutput()
93
+
94
+ # Update KVConnector with the KVConnector metadata forward().
95
+ kv_connector = get_kv_transfer_group()
96
+ assert isinstance(kv_connector, KVConnectorBase)
97
+ assert scheduler_output.kv_connector_metadata is not None
98
+ kv_connector.bind_connector_metadata(
99
+ scheduler_output.kv_connector_metadata)
100
+
101
+ # Background KV cache transfers happen here.
102
+ # These transfers are designed to be async and the requests
103
+ # involved may be disjoint from the running requests.
104
+ # Do this here to save a collective_rpc.
105
+ kv_connector.start_load_kv(get_forward_context())
106
+ try:
107
+ yield output
108
+ finally:
109
+ if wait_for_save:
110
+ kv_connector.wait_for_save()
111
+
112
+ output.finished_sending, output.finished_recving = (
113
+ kv_connector.get_finished(scheduler_output.finished_req_ids))
114
+
115
+ kv_connector.clear_connector_metadata()
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/lora_model_runner_mixin.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """
4
+ Define LoRA functionality mixin for model runners.
5
+ """
6
+
7
+ from contextlib import contextmanager
8
+ from typing import Union
9
+
10
+ import numpy as np
11
+ import torch.nn as nn
12
+
13
+ from vllm.config import LoRAConfig, ModelConfig, SchedulerConfig
14
+ from vllm.logger import init_logger
15
+ from vllm.lora.layers import LoRAMapping
16
+ from vllm.lora.request import LoRARequest
17
+ from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
18
+ from vllm.model_executor.models import supports_lora, supports_multimodal
19
+ from vllm.v1.worker.gpu_input_batch import InputBatch as GPUInputBatch
20
+ from vllm.v1.worker.tpu_input_batch import InputBatch as TPUInputBatch
21
+
22
+ InputBatch = Union[TPUInputBatch, GPUInputBatch]
23
+
24
+ logger = init_logger(__name__)
25
+
26
+
27
+ # Defined as a mixin for GPUModelRunner
28
+ class LoRAModelRunnerMixin:
29
+
30
+ LORA_WARMUP_RANK = 8
31
+
32
+ def load_lora_model(self, model: nn.Module, model_config: ModelConfig,
33
+ scheduler_config: SchedulerConfig,
34
+ lora_config: LoRAConfig, device: str) -> nn.Module:
35
+
36
+ if not supports_lora(model):
37
+ raise ValueError(
38
+ f"{model.__class__.__name__} does not support LoRA yet.")
39
+
40
+ if supports_multimodal(model):
41
+ logger.warning("Regarding multimodal models, vLLM currently "
42
+ "only supports adding LoRA to language model.")
43
+
44
+ # Use get_text_config() in case of multimodal models
45
+ text_config = model_config.hf_config.get_text_config()
46
+
47
+ # Add LoRA Manager to the Model Runner
48
+ self.lora_manager = LRUCacheWorkerLoRAManager(
49
+ scheduler_config.max_num_seqs,
50
+ scheduler_config.max_num_batched_tokens,
51
+ model_config.get_vocab_size(),
52
+ lora_config,
53
+ device,
54
+ model.embedding_modules,
55
+ model.embedding_padding_modules,
56
+ max_position_embeddings=text_config.max_position_embeddings,
57
+ )
58
+ return self.lora_manager.create_lora_manager(model)
59
+
60
+ def _set_active_loras(self, prompt_lora_mapping: tuple[int, ...],
61
+ token_lora_mapping: tuple[int, ...],
62
+ lora_requests: set[LoRARequest]) -> None:
63
+ if not self.lora_manager:
64
+ raise RuntimeError("LoRA is not enabled.")
65
+
66
+ # Set is_prefill to True, so we always use the SGMV kernels on
67
+ # non-cuda platforms.
68
+ # On cuda platforms we use the same kernels for prefill and
69
+ # decode and this flag is generally ignored.
70
+ lora_mapping = LoRAMapping(token_lora_mapping,
71
+ prompt_lora_mapping,
72
+ is_prefill=True)
73
+ self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
74
+
75
+ def set_active_loras(self, input_batch: InputBatch,
76
+ num_scheduled_tokens: np.ndarray) -> None:
77
+
78
+ prompt_lora_mapping: tuple[int, ...] # of size input_batch.num_reqs
79
+ token_lora_mapping: tuple[int,
80
+ ...] # of size np.sum(num_scheduled_tokens)
81
+ lora_requests: set[LoRARequest]
82
+ prompt_lora_mapping, token_lora_mapping, lora_requests = \
83
+ input_batch.make_lora_inputs(num_scheduled_tokens)
84
+ return self._set_active_loras(prompt_lora_mapping, token_lora_mapping,
85
+ lora_requests)
86
+
87
+ @contextmanager
88
+ def maybe_setup_dummy_loras(self, lora_config):
89
+ if lora_config is None:
90
+ yield
91
+ else:
92
+ # __enter__ code
93
+ assert self.lora_manager is not None, "LoRA is not enabled"
94
+
95
+ num_loras = lora_config.max_loras
96
+
97
+ # Make dummy lora requests
98
+ lora_requests: set[LoRARequest] = {
99
+ LoRARequest(lora_name=f"warmup_{lora_id}",
100
+ lora_int_id=lora_id,
101
+ lora_path="/not/a/real/path")
102
+ for lora_id in range(1, num_loras + 1)
103
+ }
104
+
105
+ with self.lora_manager.dummy_lora_cache():
106
+ # Add the dummy LoRAs here so _set_active_loras doesn't try to
107
+ # load from disk.
108
+ for lr in lora_requests:
109
+ self.lora_manager.add_dummy_lora(
110
+ lr, rank=self.LORA_WARMUP_RANK)
111
+
112
+ yield
113
+
114
+ # __exit__ code
115
+ self.lora_manager.remove_all_adapters()
116
+
117
+ @contextmanager
118
+ def maybe_select_dummy_loras(self, lora_config: LoRAConfig,
119
+ num_scheduled_tokens: np.ndarray):
120
+ if lora_config is None:
121
+ yield
122
+ else:
123
+ # __enter__ code
124
+ assert self.lora_manager is not None, "LoRA is not enabled"
125
+
126
+ num_reqs = len(num_scheduled_tokens)
127
+ num_loras = lora_config.max_loras
128
+
129
+ # Make prompt lora mapping
130
+ # Assign LoRA IDs cyclically to simulate a worst-case scenario.
131
+ prompt_lora_mapping = (np.arange(num_reqs, dtype=np.int32) %
132
+ num_loras) + 1
133
+
134
+ # Make token lora mapping
135
+ token_lora_mapping = np.repeat(prompt_lora_mapping,
136
+ num_scheduled_tokens)
137
+
138
+ # Make dummy lora requests
139
+ lora_requests: set[LoRARequest] = {
140
+ LoRARequest(lora_name=f"warmup_{lora_id}",
141
+ lora_int_id=lora_id,
142
+ lora_path="/not/a/real/path")
143
+ for lora_id in range(1, num_loras + 1)
144
+ }
145
+
146
+ self._set_active_loras(tuple(prompt_lora_mapping),
147
+ tuple(token_lora_mapping), lora_requests)
148
+
149
+ yield
150
+
151
+ @contextmanager
152
+ def maybe_dummy_run_with_lora(self, lora_config: LoRAConfig,
153
+ num_scheduled_tokens: np.ndarray):
154
+ with self.maybe_setup_dummy_loras(
155
+ lora_config), self.maybe_select_dummy_loras(
156
+ lora_config, num_scheduled_tokens):
157
+ yield
158
+
159
+ def add_lora(self, lora_request: LoRARequest) -> bool:
160
+ if not self.lora_manager:
161
+ raise RuntimeError("LoRA is not enabled.")
162
+ return self.lora_manager.add_adapter(lora_request)
163
+
164
+ def remove_lora(self, lora_id: int) -> bool:
165
+ if not self.lora_manager:
166
+ raise RuntimeError("LoRA is not enabled.")
167
+ return self.lora_manager.remove_adapter(lora_id)
168
+
169
+ def pin_lora(self, lora_id: int) -> bool:
170
+ if not self.lora_manager:
171
+ raise RuntimeError("LoRA is not enabled.")
172
+ return self.lora_manager.pin_adapter(lora_id)
173
+
174
+ def list_loras(self) -> set[int]:
175
+ if not self.lora_manager:
176
+ raise RuntimeError("LoRA is not enabled.")
177
+ return self.lora_manager.list_adapters()
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_input_batch.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ # Datastructures defining a TPU input batch
4
+
5
+ from typing import Optional, cast
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ from vllm.lora.request import LoRARequest
11
+ from vllm.sampling_params import SamplingType
12
+ from vllm.utils import swap_dict_values
13
+ from vllm.v1.outputs import LogprobsTensors
14
+ from vllm.v1.worker.block_table import MultiGroupBlockTable
15
+ from vllm.v1.worker.gpu_input_batch import CachedRequestState
16
+
17
+ _SAMPLING_EPS = 1e-5
18
+
19
+
20
+ class InputBatch:
21
+
22
+ def __init__(
23
+ self,
24
+ max_num_reqs: int,
25
+ max_model_len: int,
26
+ max_num_batched_tokens: int,
27
+ device: torch.device,
28
+ pin_memory: bool,
29
+ vocab_size: int,
30
+ block_sizes: list[int], # The block_size of each kv cache group
31
+ ):
32
+ self.max_num_reqs = max_num_reqs
33
+ self.max_model_len = max_model_len
34
+ self.max_num_batched_tokens = max_num_batched_tokens
35
+ self.device = device
36
+ self.pin_memory = pin_memory
37
+ self.vocab_size = vocab_size
38
+
39
+ self._req_ids: list[Optional[str]] = []
40
+ self.req_id_to_index: dict[str, int] = {}
41
+
42
+ # TODO(woosuk): This buffer could be too large if max_model_len is big.
43
+ # Find a way to reduce the CPU memory usage.
44
+ # This buffer is not directly transferred to the GPU, so it does not
45
+ # need to be pinned.
46
+ self.token_ids_cpu_tensor = torch.zeros(
47
+ (max_num_reqs, max_model_len),
48
+ device="cpu",
49
+ dtype=torch.int32,
50
+ pin_memory=False,
51
+ )
52
+ self.token_ids_cpu = self.token_ids_cpu_tensor.numpy()
53
+ self.num_tokens = np.zeros(max_num_reqs, dtype=np.int32)
54
+ self.num_tokens_no_spec = np.zeros(max_num_reqs, dtype=np.int32)
55
+ self.num_prompt_tokens = np.zeros(max_num_reqs, dtype=np.int32)
56
+ self.num_computed_tokens_cpu_tensor = torch.zeros(
57
+ (max_num_reqs, ),
58
+ device="cpu",
59
+ dtype=torch.int32,
60
+ pin_memory=pin_memory,
61
+ )
62
+ self.num_computed_tokens_cpu = \
63
+ self.num_computed_tokens_cpu_tensor.numpy()
64
+
65
+ # Block table.
66
+ self.block_table = MultiGroupBlockTable(
67
+ max_num_reqs=max_num_reqs,
68
+ max_model_len=max_model_len,
69
+ max_num_batched_tokens=max_num_batched_tokens,
70
+ pin_memory=pin_memory,
71
+ device=device,
72
+ block_sizes=block_sizes,
73
+ )
74
+
75
+ # Sampling-related.
76
+ self.temperature = torch.empty((max_num_reqs, ),
77
+ dtype=torch.float32,
78
+ device=device)
79
+ self.temperature_cpu_tensor = torch.empty((max_num_reqs, ),
80
+ dtype=torch.float32,
81
+ device="cpu",
82
+ pin_memory=pin_memory)
83
+ self.temperature_cpu = self.temperature_cpu_tensor.numpy()
84
+ self.greedy_reqs: set[str] = set()
85
+ self.random_reqs: set[str] = set()
86
+
87
+ self.top_p = torch.empty((max_num_reqs, ),
88
+ dtype=torch.float32,
89
+ device=device)
90
+ self.top_p_cpu_tensor = torch.empty((max_num_reqs, ),
91
+ dtype=torch.float32,
92
+ device="cpu",
93
+ pin_memory=pin_memory)
94
+ self.top_p_cpu = self.top_p_cpu_tensor.numpy()
95
+ self.top_p_reqs: set[str] = set()
96
+
97
+ self.top_k = torch.empty((max_num_reqs, ),
98
+ dtype=torch.int32,
99
+ device=device)
100
+ self.top_k_cpu_tensor = torch.empty((max_num_reqs, ),
101
+ dtype=torch.int32,
102
+ device="cpu",
103
+ pin_memory=pin_memory)
104
+ self.top_k_cpu = self.top_k_cpu_tensor.numpy()
105
+ self.top_k_reqs: set[str] = set()
106
+
107
+ self.min_p = torch.empty((max_num_reqs, ),
108
+ dtype=torch.float32,
109
+ device=device)
110
+ self.min_p_cpu_tensor = torch.empty((max_num_reqs, ),
111
+ dtype=torch.float32,
112
+ device="cpu",
113
+ pin_memory=pin_memory)
114
+ self.min_p_cpu = self.min_p_cpu_tensor.numpy()
115
+ self.min_p_reqs: set[str] = set()
116
+
117
+ # Frequency penalty related data structures
118
+ self.frequency_penalties = torch.empty((max_num_reqs, ),
119
+ dtype=torch.float,
120
+ device=device)
121
+ self.frequency_penalties_cpu_tensor = torch.empty(
122
+ (max_num_reqs, ),
123
+ dtype=torch.float,
124
+ device="cpu",
125
+ pin_memory=pin_memory)
126
+ self.frequency_penalties_cpu = \
127
+ self.frequency_penalties_cpu_tensor.numpy()
128
+ self.frequency_penalties_reqs: set[str] = set()
129
+
130
+ # Presence penalty related data structures
131
+ self.presence_penalties = torch.empty((max_num_reqs, ),
132
+ dtype=torch.float,
133
+ device=device)
134
+ self.presence_penalties_cpu_tensor = torch.empty((max_num_reqs, ),
135
+ dtype=torch.float,
136
+ device="cpu",
137
+ pin_memory=pin_memory)
138
+ self.presence_penalties_cpu = self.presence_penalties_cpu_tensor.numpy(
139
+ )
140
+ self.presence_penalties_reqs: set[str] = set()
141
+
142
+ # Repetition penalty related data structures
143
+ self.repetition_penalties = torch.empty((max_num_reqs, ),
144
+ dtype=torch.float,
145
+ device=device)
146
+ self.repetition_penalties_cpu_tensor = torch.empty(
147
+ (max_num_reqs, ),
148
+ dtype=torch.float,
149
+ device="cpu",
150
+ pin_memory=pin_memory)
151
+ self.repetition_penalties_cpu = \
152
+ self.repetition_penalties_cpu_tensor.numpy()
153
+ self.repetition_penalties_reqs: set[str] = set()
154
+
155
+ # req_index -> (min_tokens, stop_token_ids)
156
+ self.min_tokens: dict[int, tuple[int, set[int]]] = {}
157
+
158
+ # lora related
159
+ self.request_lora_mapping = np.zeros((self.max_num_reqs, ),
160
+ dtype=np.int32)
161
+ self.lora_id_to_request_ids: dict[int, set[str]] = {}
162
+ self.lora_id_to_lora_request: dict[int, LoRARequest] = {}
163
+
164
+ # req_index -> generator
165
+ # NOTE(woosuk): The indices of the requests that do not have their own
166
+ # generator should not be included in the dictionary.
167
+ self.generators: dict[int, torch.Generator] = {}
168
+
169
+ self.num_logprobs: dict[str, int] = {}
170
+ # NOTE(rob): num_prompt_logprobs only includes reqs
171
+ # that are currently in the prefill phase.
172
+ self.num_prompt_logprobs: dict[str, int] = {}
173
+
174
+ # To accumulate prompt logprobs tensor chunks across prefill steps.
175
+ self.in_progress_prompt_logprobs_cpu: dict[str, LogprobsTensors] = {}
176
+
177
+ self.logit_bias: list[Optional[dict[int,
178
+ float]]] = [None] * max_num_reqs
179
+ self.has_allowed_token_ids: set[str] = set()
180
+ # NOTE(lufang): In the mask tensor, if the corresponding token allowed,
181
+ # the value is False. Since we use masked_fill_ to set -inf.
182
+ self.allowed_token_ids_mask: Optional[torch.Tensor] = None
183
+ self.allowed_token_ids_mask_cpu_tensor: Optional[torch.Tensor] = None
184
+
185
+ # req_index -> bad_words_token_ids
186
+ self.bad_words_token_ids: dict[int, list[list[int]]] = {}
187
+
188
+ self.req_output_token_ids: list[Optional[list[int]]] = []
189
+
190
+ @property
191
+ def req_ids(self) -> list[str]:
192
+ # None elements should only be present transiently
193
+ # while performing state updates to the batch.
194
+ return cast(list[str], self._req_ids)
195
+
196
+ def add_request(
197
+ self,
198
+ request: "CachedRequestState",
199
+ req_index: Optional[int] = None,
200
+ ) -> None:
201
+ if req_index is None:
202
+ req_index = self.num_reqs
203
+ assert req_index < self.max_num_reqs
204
+
205
+ req_id = request.req_id
206
+ if req_index == len(self._req_ids):
207
+ self._req_ids.append(req_id)
208
+ self.req_output_token_ids.append(request.output_token_ids)
209
+ else:
210
+ self._req_ids[req_index] = req_id
211
+ self.req_output_token_ids[req_index] = request.output_token_ids
212
+
213
+ self.req_id_to_index[req_id] = req_index
214
+
215
+ # Copy the prompt token ids and output token ids.
216
+ num_prompt_tokens = len(request.prompt_token_ids)
217
+ self.num_prompt_tokens[req_index] = num_prompt_tokens
218
+ self.token_ids_cpu[
219
+ req_index, :num_prompt_tokens] = request.prompt_token_ids
220
+ start_idx = num_prompt_tokens
221
+ end_idx = start_idx + len(request.output_token_ids)
222
+ self.token_ids_cpu[req_index,
223
+ start_idx:end_idx] = request.output_token_ids
224
+ # Number of token ids in token_ids_cpu.
225
+ # NOTE(woosuk): This may include spec decode tokens.
226
+ self.num_tokens[req_index] = request.num_tokens
227
+ # Number of tokens without spec decode tokens.
228
+ self.num_tokens_no_spec[req_index] = request.num_tokens
229
+
230
+ self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens
231
+ self.block_table.add_row(request.block_ids, req_index)
232
+
233
+ sampling_params = request.sampling_params
234
+ assert sampling_params is not None, "pooling requests not supported yet"
235
+ if sampling_params.sampling_type == SamplingType.GREEDY:
236
+ # Avoid later division by zero.
237
+ self.temperature_cpu[req_index] = -1.0
238
+ self.greedy_reqs.add(req_id)
239
+ else:
240
+ self.temperature_cpu[req_index] = sampling_params.temperature
241
+ self.random_reqs.add(req_id)
242
+
243
+ self.top_p_cpu[req_index] = sampling_params.top_p
244
+ if sampling_params.top_p < 1:
245
+ self.top_p_reqs.add(req_id)
246
+ top_k = sampling_params.top_k
247
+ if 0 < top_k < self.vocab_size:
248
+ self.top_k_reqs.add(req_id)
249
+ else:
250
+ top_k = self.vocab_size
251
+ self.top_k_cpu[req_index] = top_k
252
+ self.min_p_cpu[req_index] = sampling_params.min_p
253
+ self.frequency_penalties_cpu[
254
+ req_index] = sampling_params.frequency_penalty
255
+ if sampling_params.min_p > _SAMPLING_EPS:
256
+ self.min_p_reqs.add(req_id)
257
+ if sampling_params.frequency_penalty != 0.0:
258
+ self.frequency_penalties_reqs.add(req_id)
259
+ self.presence_penalties_cpu[
260
+ req_index] = sampling_params.presence_penalty
261
+ if sampling_params.presence_penalty != 0.0:
262
+ self.presence_penalties_reqs.add(req_id)
263
+ self.repetition_penalties_cpu[
264
+ req_index] = sampling_params.repetition_penalty
265
+ if sampling_params.repetition_penalty != 1.0:
266
+ self.repetition_penalties_reqs.add(req_id)
267
+ if sampling_params.min_tokens:
268
+ self.min_tokens[req_index] = (sampling_params.min_tokens,
269
+ sampling_params.all_stop_token_ids)
270
+
271
+ # NOTE(woosuk): self.generators should not include the requests that
272
+ # do not have their own generator.
273
+ if request.generator is not None:
274
+ self.generators[req_index] = request.generator
275
+
276
+ if sampling_params.logprobs is not None:
277
+ self.num_logprobs[req_id] = sampling_params.logprobs
278
+ if sampling_params.prompt_logprobs is not None:
279
+ self.num_prompt_logprobs[req_id] = sampling_params.prompt_logprobs
280
+ if sampling_params.logit_bias is not None:
281
+ self.logit_bias[req_index] = sampling_params.logit_bias
282
+
283
+ if sampling_params.allowed_token_ids:
284
+ self.has_allowed_token_ids.add(req_id)
285
+ if self.allowed_token_ids_mask_cpu_tensor is None:
286
+ # Lazy allocation for this tensor, which can be large.
287
+ # False means we don't fill with -inf.
288
+ self.allowed_token_ids_mask = torch.zeros(self.max_num_reqs,
289
+ self.vocab_size,
290
+ dtype=torch.bool,
291
+ device=self.device)
292
+ self.allowed_token_ids_mask_cpu_tensor = torch.zeros(
293
+ self.max_num_reqs,
294
+ self.vocab_size,
295
+ dtype=torch.bool,
296
+ device="cpu")
297
+ self.allowed_token_ids_mask_cpu_tensor[req_index] = True
298
+ # False means we don't fill with -inf.
299
+ self.allowed_token_ids_mask_cpu_tensor[req_index][
300
+ sampling_params.allowed_token_ids] = False
301
+
302
+ if sampling_params.bad_words_token_ids:
303
+ self.bad_words_token_ids[
304
+ req_index] = sampling_params.bad_words_token_ids
305
+
306
+ # Add request lora ID
307
+ if request.lora_request:
308
+ lora_id = request.lora_request.lora_int_id
309
+ if lora_id not in self.lora_id_to_request_ids:
310
+ self.lora_id_to_request_ids[lora_id] = set()
311
+
312
+ self.request_lora_mapping[req_index] = lora_id
313
+ self.lora_id_to_request_ids[lora_id].add(request.req_id)
314
+ self.lora_id_to_lora_request[lora_id] = request.lora_request
315
+ else:
316
+ # No LoRA
317
+ self.request_lora_mapping[req_index] = 0
318
+
319
+ def remove_request(self, req_id: str) -> Optional[int]:
320
+ """This method must always be followed by a call to condense()."""
321
+
322
+ req_index = self.req_id_to_index.pop(req_id, None)
323
+ if req_index is None:
324
+ return None
325
+ self._req_ids[req_index] = None
326
+ self.req_output_token_ids[req_index] = None
327
+
328
+ self.greedy_reqs.discard(req_id)
329
+ self.random_reqs.discard(req_id)
330
+ self.top_p_reqs.discard(req_id)
331
+ self.top_k_reqs.discard(req_id)
332
+ self.min_p_reqs.discard(req_id)
333
+ self.min_tokens.pop(req_index, None)
334
+ self.frequency_penalties_reqs.discard(req_id)
335
+ self.presence_penalties_reqs.discard(req_id)
336
+ self.repetition_penalties_reqs.discard(req_id)
337
+ self.generators.pop(req_index, None)
338
+ self.num_logprobs.pop(req_id, None)
339
+ self.num_prompt_logprobs.pop(req_id, None)
340
+ self.in_progress_prompt_logprobs_cpu.pop(req_id, None)
341
+
342
+ # LoRA
343
+ lora_id = self.request_lora_mapping[req_index]
344
+ if lora_id != 0:
345
+ self.lora_id_to_request_ids[lora_id].discard(req_id)
346
+ if len(self.lora_id_to_request_ids[lora_id]) == 0:
347
+ self.lora_id_to_request_ids.pop(lora_id)
348
+ self.lora_id_to_lora_request.pop(lora_id)
349
+ self.request_lora_mapping[req_index] = 0
350
+
351
+ self.logit_bias[req_index] = None
352
+ self.has_allowed_token_ids.discard(req_id)
353
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
354
+ # False means we don't fill with -inf.
355
+ self.allowed_token_ids_mask_cpu_tensor[req_index].fill_(False)
356
+ self.bad_words_token_ids.pop(req_index, None)
357
+ return req_index
358
+
359
+ def swap_states(self, i1: int, i2: int) -> None:
360
+ old_id_i1 = self._req_ids[i1]
361
+ old_id_i2 = self._req_ids[i2]
362
+ self._req_ids[i1], self._req_ids[i2] =\
363
+ self._req_ids[i2], self._req_ids[i1] # noqa
364
+ self.req_output_token_ids[i1], self.req_output_token_ids[i2] =\
365
+ self.req_output_token_ids[i2], self.req_output_token_ids[i1]
366
+ assert old_id_i1 is not None and old_id_i2 is not None
367
+ self.req_id_to_index[old_id_i1], self.req_id_to_index[old_id_i2] =\
368
+ self.req_id_to_index[old_id_i2], self.req_id_to_index[old_id_i1]
369
+ self.num_tokens[i1], self.num_tokens[i2] =\
370
+ self.num_tokens[i2], self.num_tokens[i1]
371
+ self.num_tokens_no_spec[i1], self.num_tokens_no_spec[i2] =\
372
+ self.num_tokens_no_spec[i2], self.num_tokens_no_spec[i1]
373
+ self.num_prompt_tokens[i1], self.num_prompt_tokens[i2] =\
374
+ self.num_prompt_tokens[i2], self.num_prompt_tokens[i1]
375
+ self.num_computed_tokens_cpu[i1], self.num_computed_tokens_cpu[i2] =\
376
+ self.num_computed_tokens_cpu[i2], self.num_computed_tokens_cpu[i1]
377
+ self.temperature_cpu[i1], self.temperature_cpu[i2] =\
378
+ self.temperature_cpu[i2], self.temperature_cpu[i1]
379
+ self.top_p_cpu[i1], self.top_p_cpu[i2] =\
380
+ self.top_p_cpu[i2], self.top_p_cpu[i1]
381
+ self.top_k_cpu[i1], self.top_k_cpu[i2] =\
382
+ self.top_k_cpu[i2], self.top_k_cpu[i1]
383
+ self.frequency_penalties_cpu[i1], self.frequency_penalties_cpu[i2] =\
384
+ self.frequency_penalties_cpu[i2], self.frequency_penalties_cpu[i1]
385
+ self.presence_penalties_cpu[i1], self.presence_penalties_cpu[i2] =\
386
+ self.presence_penalties_cpu[i2], self.presence_penalties_cpu[i1]
387
+ self.repetition_penalties_cpu[i1], self.repetition_penalties_cpu[i2] =\
388
+ self.repetition_penalties_cpu[i2], self.repetition_penalties_cpu[i1]
389
+ self.min_p_cpu[i1], self.min_p_cpu[i2] =\
390
+ self.min_p_cpu[i2], self.min_p_cpu[i1]
391
+
392
+ # NOTE: the following is unsafe
393
+ # self.token_ids_cpu[i1, ...], self.token_ids_cpu[i2, ...], =\
394
+ # self.token_ids_cpu[i2, ...], self.token_ids_cpu[i1, ...]
395
+ # instead, we need to temporiarily copy the data for one of the indices
396
+ # TODO(lucas): optimize this by only copying valid indices
397
+ tmp = self.token_ids_cpu[i1, ...].copy()
398
+ self.token_ids_cpu[i1, ...] = self.token_ids_cpu[i2, ...]
399
+ self.token_ids_cpu[i2, ...] = tmp
400
+
401
+ swap_dict_values(self.generators, i1, i2)
402
+ swap_dict_values(self.min_tokens, i1, i2)
403
+ swap_dict_values(self.bad_words_token_ids, i1, i2)
404
+
405
+ self.request_lora_mapping[i1], self.request_lora_mapping[i2] =\
406
+ self.request_lora_mapping[i2], self.request_lora_mapping[i1]
407
+ self.logit_bias[i1], self.logit_bias[i2] =\
408
+ self.logit_bias[i2], self.logit_bias[i1]
409
+
410
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
411
+ self.allowed_token_ids_mask_cpu_tensor[i1], \
412
+ self.allowed_token_ids_mask_cpu_tensor[i2] =\
413
+ self.allowed_token_ids_mask_cpu_tensor[i2], \
414
+ self.allowed_token_ids_mask_cpu_tensor[i1]
415
+ self.block_table.swap_row(i1, i2)
416
+
417
+ def condense(self, empty_req_indices: list[int]) -> None:
418
+ """Move non-empty requests down into lower, empty indices.
419
+
420
+ Args:
421
+ empty_req_indices: empty batch indices, sorted descending.
422
+ """
423
+ num_reqs = self.num_reqs
424
+ if num_reqs == 0:
425
+ # The batched states are empty.
426
+ self._req_ids.clear()
427
+ self.req_output_token_ids.clear()
428
+ return
429
+
430
+ # NOTE(woosuk): This function assumes that the empty_req_indices
431
+ # is sorted in descending order.
432
+ last_req_index = num_reqs + len(empty_req_indices) - 1
433
+ while empty_req_indices:
434
+ # Find the largest non-empty index.
435
+ while last_req_index in empty_req_indices:
436
+ last_req_index -= 1
437
+
438
+ # Find the smallest empty index.
439
+ empty_index = empty_req_indices.pop()
440
+ if empty_index >= last_req_index:
441
+ break
442
+
443
+ # Swap the states.
444
+ req_id = self._req_ids[last_req_index]
445
+ output_token_ids = self.req_output_token_ids[last_req_index]
446
+ assert req_id is not None
447
+ self._req_ids[empty_index] = req_id
448
+ self._req_ids[last_req_index] = None
449
+ self.req_output_token_ids[empty_index] = output_token_ids
450
+ self.req_output_token_ids[last_req_index] = None
451
+ self.req_id_to_index[req_id] = empty_index
452
+
453
+ num_tokens = self.num_tokens[last_req_index]
454
+ self.token_ids_cpu[empty_index, :num_tokens] = self.token_ids_cpu[
455
+ last_req_index, :num_tokens]
456
+ self.num_tokens[empty_index] = num_tokens
457
+ self.num_tokens_no_spec[empty_index] = self.num_tokens_no_spec[
458
+ last_req_index]
459
+ self.num_prompt_tokens[empty_index] = self.num_prompt_tokens[
460
+ last_req_index]
461
+ self.num_computed_tokens_cpu[
462
+ empty_index] = self.num_computed_tokens_cpu[last_req_index]
463
+ self.block_table.move_row(last_req_index, empty_index)
464
+ self.temperature_cpu[empty_index] = self.temperature_cpu[
465
+ last_req_index]
466
+ self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index]
467
+ self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index]
468
+ self.frequency_penalties_cpu[
469
+ empty_index] = self.frequency_penalties_cpu[last_req_index]
470
+ self.presence_penalties_cpu[
471
+ empty_index] = self.presence_penalties_cpu[last_req_index]
472
+ self.repetition_penalties_cpu[
473
+ empty_index] = self.repetition_penalties_cpu[last_req_index]
474
+ self.min_p_cpu[empty_index] = self.min_p_cpu[last_req_index]
475
+ generator = self.generators.pop(last_req_index, None)
476
+ if generator is not None:
477
+ self.generators[empty_index] = generator
478
+
479
+ min_token = self.min_tokens.pop(last_req_index, None)
480
+ if min_token is not None:
481
+ self.min_tokens[empty_index] = min_token
482
+
483
+ self.request_lora_mapping[empty_index] = self.request_lora_mapping[
484
+ last_req_index]
485
+
486
+ self.logit_bias[empty_index] = self.logit_bias[last_req_index]
487
+
488
+ if self.allowed_token_ids_mask_cpu_tensor is not None:
489
+ self.allowed_token_ids_mask_cpu_tensor[
490
+ empty_index] = self.allowed_token_ids_mask_cpu_tensor[
491
+ last_req_index]
492
+
493
+ bad_words_token_ids = self.bad_words_token_ids.pop(
494
+ last_req_index, None)
495
+ if bad_words_token_ids is not None:
496
+ self.bad_words_token_ids[empty_index] = bad_words_token_ids
497
+ # Decrement last_req_index since it is now empty.
498
+ last_req_index -= 1
499
+
500
+ # Trim lists to the batch size.
501
+ del self._req_ids[self.num_reqs:]
502
+ del self.req_output_token_ids[self.num_reqs:]
503
+
504
+ def _make_prompt_token_ids_tensor(self) -> torch.Tensor:
505
+ max_prompt_len = self.num_prompt_tokens[:self.num_reqs].max()
506
+ prompt_token_ids_cpu_tensor = torch.empty(
507
+ (self.num_reqs, max_prompt_len),
508
+ device="cpu",
509
+ dtype=torch.int64,
510
+ pin_memory=self.pin_memory,
511
+ )
512
+ prompt_token_ids = prompt_token_ids_cpu_tensor.numpy()
513
+ prompt_token_ids[:] = self.token_ids_cpu[:self.
514
+ num_reqs, :max_prompt_len]
515
+ # Use the value of vocab_size as a pad since we don't have a
516
+ # token_id of this value.
517
+ for i in range(self.num_reqs):
518
+ prompt_token_ids[i, self.num_prompt_tokens[i]:] = self.vocab_size
519
+ return prompt_token_ids_cpu_tensor.to(device=self.device,
520
+ non_blocking=True)
521
+
522
+ def make_lora_inputs(
523
+ self, num_scheduled_tokens: np.ndarray
524
+ ) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]:
525
+ """
526
+ Given the num_scheduled_tokens for each request in the batch, return
527
+ datastructures used to activate the current LoRAs.
528
+ Returns:
529
+ 1. prompt_lora_mapping: A tuple of size self.num_reqs where,
530
+ prompt_lora_mapping[i] is the LoRA id to use for the ith prompt.
531
+ 2. token_lora_mapping: A tuple of size np.sum(num_scheduled_tokens)
532
+ where, token_lora_mapping[i] is the LoRA id to use for ith token.
533
+ 3. lora_requests: Set of relevant LoRA requests.
534
+ """
535
+
536
+ req_lora_mapping = self.request_lora_mapping[:self.num_reqs]
537
+ prompt_lora_mapping = tuple(req_lora_mapping)
538
+ token_lora_mapping = tuple(
539
+ req_lora_mapping.repeat(num_scheduled_tokens))
540
+ active_lora_requests: set[LoRARequest] = set(
541
+ self.lora_id_to_lora_request.values())
542
+
543
+ return prompt_lora_mapping, token_lora_mapping, active_lora_requests
544
+
545
+ @property
546
+ def num_reqs(self) -> int:
547
+ return len(self.req_id_to_index)
548
+
549
+ @property
550
+ def all_greedy(self) -> bool:
551
+ return len(self.random_reqs) == 0
552
+
553
+ @property
554
+ def all_random(self) -> bool:
555
+ return len(self.greedy_reqs) == 0
556
+
557
+ @property
558
+ def no_top_p(self) -> bool:
559
+ return len(self.top_p_reqs) == 0
560
+
561
+ @property
562
+ def no_top_k(self) -> bool:
563
+ return len(self.top_k_reqs) == 0
564
+
565
+ @property
566
+ def no_min_p(self) -> bool:
567
+ return len(self.min_p_reqs) == 0
568
+
569
+ @property
570
+ def no_penalties(self) -> bool:
571
+ return (len(self.presence_penalties_reqs) == 0
572
+ and len(self.frequency_penalties_reqs) == 0
573
+ and len(self.repetition_penalties_reqs) == 0)
574
+
575
+ @property
576
+ def max_num_logprobs(self) -> Optional[int]:
577
+ return max(self.num_logprobs.values()) if self.num_logprobs else None
578
+
579
+ @property
580
+ def no_prompt_logprob(self) -> bool:
581
+ return not self.num_prompt_logprobs
582
+
583
+ @property
584
+ def no_allowed_token_ids(self) -> bool:
585
+ return len(self.has_allowed_token_ids) == 0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_model_runner.py ADDED
@@ -0,0 +1,2033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import bisect
4
+ import gc
5
+ import time
6
+ from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast
7
+ from unittest.mock import patch
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+ # TPU XLA related
13
+ import torch_xla.core.xla_model as xm
14
+ import torch_xla.distributed.spmd as xs
15
+ import torch_xla.runtime as xr
16
+
17
+ import vllm.envs as envs
18
+ from vllm.attention import Attention
19
+ from vllm.attention.backends.abstract import AttentionType
20
+ from vllm.attention.layers.chunked_local_attention import ChunkedLocalAttention
21
+ from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher
22
+ from vllm.config import (ParallelConfig, VllmConfig,
23
+ get_layers_from_vllm_config, update_config)
24
+ from vllm.distributed.kv_transfer import (get_kv_transfer_group,
25
+ has_kv_transfer_group)
26
+ from vllm.forward_context import set_forward_context
27
+ from vllm.logger import init_logger
28
+ from vllm.lora.layers import BaseLayerWithLoRA
29
+ from vllm.model_executor.model_loader import get_model_loader
30
+ from vllm.model_executor.model_loader.tpu import TPUModelLoader
31
+ from vllm.model_executor.models.interfaces import supports_transcription
32
+ from vllm.model_executor.models.interfaces_base import (
33
+ is_pooling_model, is_text_generation_model)
34
+ from vllm.multimodal import MULTIMODAL_REGISTRY
35
+ from vllm.multimodal.inputs import (BatchedTensorInputs, MultiModalKwargsItem,
36
+ PlaceholderRange)
37
+ from vllm.multimodal.utils import group_mm_kwargs_by_modality
38
+ from vllm.sequence import IntermediateTensors
39
+ from vllm.tasks import GenerationTask, PoolingTask, SupportedTask
40
+ from vllm.utils import (LayerBlockType, cdiv, is_pin_memory_available,
41
+ prev_power_of_2)
42
+ from vllm.v1.attention.backends.pallas import (TPU_STR_DTYPE_TO_TORCH_DTYPE,
43
+ PallasAttentionBackend,
44
+ PallasMetadata,
45
+ get_page_size_bytes)
46
+ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
47
+ KVCacheConfig, KVCacheSpec,
48
+ SlidingWindowSpec)
49
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsLists,
50
+ LogprobsTensors, ModelRunnerOutput)
51
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
52
+ from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
53
+ from vllm.v1.worker.kv_connector_model_runner_mixin import (
54
+ KVConnectorModelRunnerMixin, KVConnectorOutput)
55
+ from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
56
+ from vllm.v1.worker.tpu_input_batch import CachedRequestState, InputBatch
57
+
58
+ from .utils import (MultiModalBudget, bind_kv_cache,
59
+ initialize_kv_cache_for_kv_sharing,
60
+ sanity_check_mm_encoder_outputs)
61
+
62
+ if TYPE_CHECKING:
63
+ from vllm.v1.core.sched.output import SchedulerOutput
64
+
65
+ logger = init_logger(__name__)
66
+
67
+ INVALID_TOKEN_ID = -1
68
+ # Smallest output size
69
+ MIN_NUM_SEQS = 8
70
+
71
+
72
+ #########################################################
73
+ # Ways to avoid recompilation
74
+ #########################################################
75
+ #
76
+ # The model executor has two primary components:
77
+ # 1. preparing the model and sampler inputs
78
+ # 2. executing the model and sampler.
79
+ # The core idea is to avoid any TPU computation during input preparation. For
80
+ # better compilation tracking and increased flexibility, the model execution and
81
+ # sampler are divided into several distinct components.
82
+ #
83
+ # Below are the detailed steps:
84
+ #
85
+ # Step 1
86
+ # It is recommended to avoid TPU operations when preparing the model and sampler
87
+ # inputs. CPU tensors can be prepared and transferred to the XLA device using
88
+ # cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
89
+ # compilation.
90
+ #
91
+ # Step 2
92
+ # The TPU execution should be decomposed into subgraphs (4 at the moment):
93
+ # 1. the main model
94
+ # 2. selecting hidden states for each request
95
+ # 3. sampler
96
+ # 4. encoder.
97
+ # Each subgraph should be decorated in a torch.compile. This is used to make
98
+ # sure that we have the same subgraph topology in both dummy_run and
99
+ # xecute_model. The results from these subgraphs should either be passed to
100
+ # other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
101
+ # subsequent processing on the CPU.
102
+ #
103
+ # Step 3
104
+ # The dummy_run should be comprehensive, ensuring all potential input shapes and
105
+ # branch predictions are included as subgraph inputs to facilitate
106
+ # pre-compilation.
107
+ class TPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
108
+
109
+ def __init__(
110
+ self,
111
+ vllm_config: VllmConfig,
112
+ device: torch.device,
113
+ original_parallel_config: Optional[ParallelConfig] = None,
114
+ ):
115
+ self.vllm_config = vllm_config
116
+ self.model_config = vllm_config.model_config
117
+ self.cache_config = vllm_config.cache_config
118
+ self.lora_config = vllm_config.lora_config
119
+ self.load_config = vllm_config.load_config
120
+ self.parallel_config = vllm_config.parallel_config
121
+ self.original_parallel_config = original_parallel_config
122
+ self.scheduler_config = vllm_config.scheduler_config
123
+ self.speculative_config = vllm_config.speculative_config
124
+ self.observability_config = vllm_config.observability_config
125
+ self.device_config = vllm_config.device_config
126
+
127
+ model_config = self.model_config
128
+ cache_config = self.cache_config
129
+ scheduler_config = self.scheduler_config
130
+ parallel_config = self.parallel_config
131
+ self.device = device
132
+ self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
133
+
134
+ # SPMD Related
135
+ self.use_spmd = envs.VLLM_XLA_USE_SPMD
136
+ if self.use_spmd:
137
+ num_devices = xr.global_runtime_device_count()
138
+ mesh_shape = (num_devices, 1)
139
+ device_ids = np.array(range(num_devices))
140
+ self.mesh = xs.Mesh(device_ids, mesh_shape, ('x', 'y'))
141
+
142
+ self.enforce_eager = model_config.enforce_eager
143
+
144
+ self.num_xla_graphs = 0
145
+ self._update_num_xla_graphs("init")
146
+
147
+ self.pin_memory = is_pin_memory_available()
148
+ self.dtype = self.model_config.dtype
149
+ if cache_config.cache_dtype == "auto":
150
+ model_dtype = self.dtype
151
+ if isinstance(model_dtype, str):
152
+ self.kv_cache_dtype = TPU_STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
153
+ else:
154
+ self.kv_cache_dtype = model_dtype
155
+ else:
156
+ self.kv_cache_dtype = TPU_STR_DTYPE_TO_TORCH_DTYPE[
157
+ cache_config.cache_dtype]
158
+ self._hidden_states_dtype = self.dtype
159
+
160
+ self.sliding_window = model_config.get_sliding_window()
161
+ self.block_size = cache_config.block_size
162
+ self.max_model_len = model_config.max_model_len
163
+ self.most_model_len = envs.VLLM_TPU_MOST_MODEL_LEN
164
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
165
+ self.num_blocks_per_most_len_req = cdiv(
166
+ self.most_model_len,
167
+ self.block_size) if self.most_model_len is not None else None
168
+ # InputBatch needs to work with sampling tensors greater than padding
169
+ # to avoid dynamic shapes. Also, avoid suboptimal alignment.
170
+ self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
171
+ self.num_tokens_paddings = _get_token_paddings(
172
+ min_token_size=16,
173
+ max_token_size=scheduler_config.max_num_batched_tokens,
174
+ padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP)
175
+ # In case `max_num_tokens < max(num_tokens_paddings)` use the actual
176
+ # padded max value to pre-allocate data structures and pre-compile.
177
+ self.max_num_tokens = self.num_tokens_paddings[-1]
178
+
179
+ # Model-related.
180
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
181
+ parallel_config, LayerBlockType.attention)
182
+ self.num_query_heads = model_config.get_num_attention_heads(
183
+ parallel_config)
184
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
185
+ self.head_size = model_config.get_head_size()
186
+ self.hidden_size = model_config.get_hidden_size()
187
+ self.vocab_size = model_config.get_vocab_size()
188
+
189
+ if self.lora_config is not None:
190
+ self.vocab_size += self.lora_config.lora_extra_vocab_size
191
+
192
+ # Multi-modal data support
193
+ self.mm_registry = MULTIMODAL_REGISTRY
194
+ self.uses_mrope = model_config.uses_mrope
195
+ self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs(
196
+ model_config)
197
+ # TODO: Support M-RoPE (e.g, Qwen2-VL)
198
+ assert not self.uses_mrope, "TPU does not support M-RoPE yet."
199
+
200
+ self._num_slices_per_kv_cache_update_block = \
201
+ _get_num_slices_per_kv_cache_update_block(get_page_size_bytes(
202
+ block_size=self.block_size,
203
+ num_kv_heads=self.num_kv_heads,
204
+ head_size=self.head_size,
205
+ kv_cache_dtype=self.kv_cache_dtype,
206
+ ))
207
+
208
+ # Lazy initialization
209
+ self.model: nn.Module # Set after load_model
210
+ self.kv_caches: list[torch.Tensor] = []
211
+ # req_id -> (input_id -> encoder_output)
212
+ self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
213
+
214
+ # Request states.
215
+ self.requests: dict[str, CachedRequestState] = {}
216
+
217
+ # Initialize input batch early to avoid AttributeError in _update_states
218
+ self.input_batch = InputBatch(
219
+ max_num_reqs=self.max_num_reqs,
220
+ max_model_len=self.max_model_len,
221
+ max_num_batched_tokens=self.max_num_tokens,
222
+ device=self.device,
223
+ pin_memory=self.pin_memory,
224
+ vocab_size=self.model_config.get_vocab_size(),
225
+ block_sizes=[self.block_size],
226
+ )
227
+
228
+ # Cached torch/numpy tensor
229
+ # The pytorch tensor and numpy array share the same buffer.
230
+ # Sometimes the numpy op is faster so we create both.
231
+ self.input_ids_cpu = torch.zeros(self.max_num_tokens,
232
+ dtype=torch.int32,
233
+ device="cpu")
234
+
235
+ self.positions_cpu = torch.zeros(self.max_num_tokens,
236
+ dtype=torch.int32,
237
+ device="cpu")
238
+ self.positions_np = self.positions_cpu.numpy()
239
+ self.block_table_cpu = torch.zeros(
240
+ (self.max_num_reqs, self.max_num_blocks_per_req),
241
+ dtype=torch.int32,
242
+ device="cpu")
243
+ # adjust num_reqs to avoid SMEM OOM.
244
+ self.num_reqs_most_model_len = min(
245
+ PallasAttentionBackend.get_max_num_seqs(self.most_model_len,
246
+ self.block_size),
247
+ self.max_num_reqs) if self.most_model_len is not None else None
248
+ self.num_reqs_max_model_len = min(
249
+ PallasAttentionBackend.get_max_num_seqs(self.max_model_len,
250
+ self.block_size),
251
+ self.max_num_reqs)
252
+ self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1,
253
+ dtype=torch.int32,
254
+ device="cpu",
255
+ pin_memory=self.pin_memory)
256
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
257
+
258
+ self.seq_lens_cpu = torch.zeros(self.max_num_tokens,
259
+ dtype=torch.int32,
260
+ device="cpu",
261
+ pin_memory=self.pin_memory)
262
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
263
+
264
+ # Range tensor with values [0 .. self.max_num_tokens - 1].
265
+ # Used to initialize positions / context_lens / seq_lens
266
+ # Keep in int64 to avoid overflow with long context
267
+ self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
268
+ self.num_reqs_paddings = _get_req_paddings(
269
+ min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs)
270
+
271
+ # Layer pairings for cross-layer KV sharing.
272
+ # If an Attention layer `layer_name` is in the keys of this dict, it
273
+ # means this layer will perform attention using the keys and values
274
+ # from the KV cache of `shared_kv_cache_layers[layer_name]`.
275
+ self.shared_kv_cache_layers: dict[str, str] = {}
276
+
277
+ # tensors for structured decoding
278
+ self.grammar_bitmask_cpu = torch.zeros(
279
+ (self.max_num_reqs, cdiv(self.vocab_size, 32)),
280
+ dtype=torch.int32,
281
+ device="cpu",
282
+ pin_memory=self.pin_memory)
283
+ self.require_structured_out_cpu = torch.zeros(
284
+ (self.max_num_reqs, 1),
285
+ dtype=torch.bool,
286
+ device="cpu",
287
+ pin_memory=self.pin_memory)
288
+ self.structured_decode_arange = torch.arange(
289
+ 0, 32, device="cpu", pin_memory=self.pin_memory)
290
+
291
+ self.mm_budget = (MultiModalBudget(
292
+ self.model_config,
293
+ self.scheduler_config,
294
+ self.mm_registry,
295
+ max_model_len=self.max_model_len,
296
+ max_num_reqs=self.max_num_reqs,
297
+ ) if self.supports_mm_inputs else None)
298
+
299
+ if not self.use_spmd:
300
+ self.sample_from_logits_func = torch.compile(
301
+ self.sample_from_logits,
302
+ backend="openxla",
303
+ fullgraph=True,
304
+ dynamic=False)
305
+ else:
306
+ self.sample_from_logits_func = self.sample_from_logits
307
+
308
+ def _update_num_xla_graphs(self, case_str):
309
+ check_comp = self.check_recompilation and not self.enforce_eager
310
+ if not check_comp:
311
+ return
312
+
313
+ total_cached_graphs = xr.get_num_cached_compilation_graph()
314
+ new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
315
+ if new_compiled_graphs == 0:
316
+ return
317
+
318
+ logger.info("Add new %d compiled XLA graphs due to %s",
319
+ new_compiled_graphs, case_str)
320
+ self.num_xla_graphs += new_compiled_graphs
321
+
322
+ def _verify_num_xla_graphs(self, case_str):
323
+ check_comp = self.check_recompilation and not self.enforce_eager
324
+ if not check_comp:
325
+ return
326
+
327
+ curr_cached_graph = xr.get_num_cached_compilation_graph()
328
+ assert self.num_xla_graphs == curr_cached_graph, (
329
+ "Recompilation after warm up is detected during {}."
330
+ " num_xla_graphs = {} curr_cached_graph = {}".format(
331
+ case_str, self.num_xla_graphs, curr_cached_graph))
332
+
333
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
334
+ """Update the cached states and the persistent batch with the scheduler
335
+ output.
336
+
337
+ The updated states are used by the `_prepare_inputs` function to create
338
+ the input GPU tensors for the model.
339
+
340
+ Returns:
341
+ True if there is a new/resumed/paused/finished request.
342
+ If False, we can skip copying SamplingMetadata to the GPU.
343
+ """
344
+ # Remove finished requests from the cached states.
345
+ for req_id in scheduler_output.finished_req_ids:
346
+ self.requests.pop(req_id, None)
347
+ self.encoder_cache.pop(req_id, None)
348
+
349
+ # Remove the finished requests from the persistent batch.
350
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
351
+ # scheduled_req_ids overlap. This happens when a request is aborted and
352
+ # then resubmitted with the same ID. In this case, we treat them as two
353
+ # distinct requests - clearing the cached states for the first request
354
+ # and handling the second as a new request.
355
+ removed_req_indices: list[int] = []
356
+ for req_id in scheduler_output.finished_req_ids:
357
+ req_index = self.input_batch.remove_request(req_id)
358
+ if req_index is not None:
359
+ removed_req_indices.append(req_index)
360
+
361
+ # Free the cached encoder outputs.
362
+ for req_id, input_id in scheduler_output.free_encoder_input_ids:
363
+ encoder_outputs = self.encoder_cache.get(req_id)
364
+ if encoder_outputs is not None:
365
+ encoder_outputs.pop(input_id, None)
366
+ if not encoder_outputs:
367
+ self.encoder_cache.pop(req_id, None)
368
+
369
+ # Remove the unscheduled requests from the persistent batch.
370
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
371
+ # or running requests that are not scheduled in this step. We remove
372
+ # them from the persistent batch but keep their cached states since
373
+ # they will be scheduled again sometime in the future.
374
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
375
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
376
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
377
+ # NOTE(woosuk): The persistent batch optimization assumes that
378
+ # consecutive batches contain mostly the same requests. If batches
379
+ # have low request overlap (e.g., alternating between two distinct
380
+ # sets of requests), this optimization becomes very inefficient.
381
+ for req_id in unscheduled_req_ids:
382
+ req_index = self.input_batch.remove_request(req_id)
383
+ assert req_index is not None
384
+ removed_req_indices.append(req_index)
385
+
386
+ req_ids_to_add: list[str] = []
387
+ # Add new requests to the cached states.
388
+ for new_req_data in scheduler_output.scheduled_new_reqs:
389
+ assert new_req_data.sampling_params is not None,\
390
+ "Pooling is not supported in TPU yet"
391
+ req_id = new_req_data.req_id
392
+ sampling_params = new_req_data.sampling_params
393
+
394
+ self.requests[req_id] = CachedRequestState(
395
+ req_id=req_id,
396
+ prompt_token_ids=new_req_data.prompt_token_ids,
397
+ mm_kwargs=new_req_data.mm_kwargs,
398
+ mm_positions=new_req_data.mm_positions,
399
+ sampling_params=sampling_params,
400
+ pooling_params=None,
401
+ generator=None,
402
+ block_ids=new_req_data.block_ids,
403
+ num_computed_tokens=new_req_data.num_computed_tokens,
404
+ output_token_ids=[],
405
+ lora_request=new_req_data.lora_request,
406
+ )
407
+
408
+ req_ids_to_add.append(req_id)
409
+
410
+ # Update the states of the running/resumed requests.
411
+ req_data = scheduler_output.scheduled_cached_reqs
412
+ for i, req_id in enumerate(req_data.req_ids):
413
+ req_state = self.requests[req_id]
414
+ num_computed_tokens = req_data.num_computed_tokens[i]
415
+ new_block_ids = req_data.new_block_ids[i]
416
+ resumed_from_preemption = req_data.resumed_from_preemption[i]
417
+
418
+ # Update the cached states.
419
+ req_state.num_computed_tokens = num_computed_tokens
420
+ if not resumed_from_preemption:
421
+ # Append the new blocks to the existing block IDs.
422
+ for block_ids, new_ids in zip(req_state.block_ids,
423
+ new_block_ids):
424
+ block_ids.extend(new_ids)
425
+ else:
426
+ # The request is resumed from preemption.
427
+ # Replace the existing block IDs with the new ones.
428
+ req_state.block_ids = new_block_ids
429
+
430
+ req_index = self.input_batch.req_id_to_index.get(req_id)
431
+ if req_index is None:
432
+ # The request is not in the persistent batch.
433
+ # The request was either preempted and resumed later, or was not
434
+ # scheduled in the previous step and needs to be added again.
435
+ req_ids_to_add.append(req_id)
436
+ continue
437
+
438
+ # Update the persistent batch.
439
+ self.input_batch.num_computed_tokens_cpu[req_index] = (
440
+ num_computed_tokens)
441
+ self.input_batch.block_table.append_row(new_block_ids, req_index)
442
+
443
+ # Add the new or resumed requests to the persistent batch.
444
+ # The smaller empty indices are filled first.
445
+ removed_req_indices = sorted(removed_req_indices, reverse=True)
446
+ for req_id in req_ids_to_add:
447
+ req_state = self.requests[req_id]
448
+ if removed_req_indices:
449
+ # Fill the empty index.
450
+ req_index = removed_req_indices.pop()
451
+ else:
452
+ # Append to the end.
453
+ req_index = None
454
+ self.input_batch.add_request(req_state, req_index)
455
+
456
+ # Condense the batched states if there are empty indices.
457
+ if removed_req_indices:
458
+ self.input_batch.condense(removed_req_indices)
459
+
460
+ return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
461
+
462
+ def get_model(self) -> nn.Module:
463
+ return self.model
464
+
465
+ def get_supported_generation_tasks(self) -> list[GenerationTask]:
466
+ model = self.get_model()
467
+ supported_tasks = list[GenerationTask]()
468
+
469
+ if is_text_generation_model(model):
470
+ supported_tasks.append("generate")
471
+
472
+ if supports_transcription(model):
473
+ if model.supports_transcription_only:
474
+ return ["transcription"]
475
+
476
+ supported_tasks.append("transcription")
477
+
478
+ return supported_tasks
479
+
480
+ def get_supported_pooling_tasks(self) -> list[PoolingTask]:
481
+ model = self.get_model()
482
+ if not is_pooling_model(model):
483
+ return []
484
+
485
+ return list(model.pooler.get_supported_tasks())
486
+
487
+ def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
488
+ tasks = list[SupportedTask]()
489
+
490
+ if self.model_config.runner_type == "generate":
491
+ tasks.extend(self.get_supported_generation_tasks())
492
+ if self.model_config.runner_type == "pooling":
493
+ tasks.extend(self.get_supported_pooling_tasks())
494
+
495
+ return tuple(tasks)
496
+
497
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
498
+ """
499
+ Generates the KVCacheSpec by parsing the kv cache format from each
500
+ Attention module in the static forward context.
501
+ Returns:
502
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
503
+ format. Layers that do not need KV cache are not included.
504
+ """
505
+
506
+ layers = get_layers_from_vllm_config(self.vllm_config, Attention)
507
+ block_size = self.vllm_config.cache_config.block_size
508
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
509
+ for layer_name, attn_module in layers.items():
510
+ if (kv_tgt_layer :=
511
+ attn_module.kv_sharing_target_layer_name) is not None:
512
+ # The layer doesn't need its own KV cache and will use that of
513
+ # the target layer. We skip creating a KVCacheSpec for it, so
514
+ # that KV cache management logic will act as this layer does
515
+ # not exist, and doesn't allocate KV cache for the layer. This
516
+ # enables the memory saving of cross-layer kv sharing, allowing
517
+ # a given amount of memory to accommodate longer context lengths
518
+ # or enable more requests to be processed simultaneously.
519
+ self.shared_kv_cache_layers[layer_name] = kv_tgt_layer
520
+ continue
521
+
522
+ if attn_module.attn_type == AttentionType.DECODER:
523
+ if isinstance(attn_module, ChunkedLocalAttention):
524
+ logger.warning_once(
525
+ "Using irope in Pallas is not supported yet, it "
526
+ "will fall back to global attention for long context.")
527
+ if attn_module.sliding_window is not None:
528
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
529
+ block_size=block_size,
530
+ num_kv_heads=attn_module.num_kv_heads,
531
+ head_size=attn_module.head_size,
532
+ dtype=self.kv_cache_dtype,
533
+ sliding_window=attn_module.sliding_window,
534
+ use_mla=False,
535
+ )
536
+ else:
537
+ kv_cache_spec[layer_name] = FullAttentionSpec(
538
+ block_size=block_size,
539
+ num_kv_heads=attn_module.num_kv_heads,
540
+ head_size=attn_module.head_size,
541
+ dtype=self.kv_cache_dtype,
542
+ use_mla=False,
543
+ )
544
+ elif attn_module.attn_type in (AttentionType.ENCODER,
545
+ AttentionType.ENCODER_ONLY):
546
+ # encoder-only attention does not need KV cache.
547
+ continue
548
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
549
+ raise NotImplementedError
550
+ else:
551
+ raise ValueError(
552
+ f"Unknown attention type: {attn_module.attn_type}")
553
+
554
+ return kv_cache_spec
555
+
556
+ def _get_slot_mapping_metadata(self, num_reqs,
557
+ num_scheduled_tokens_per_req):
558
+ """
559
+ Computes metadata for mapping slots to blocks in the key-value (KV)
560
+ cache for a batch of requests.
561
+
562
+ This function determines, for each request in the batch, how the
563
+ scheduled tokens are distributed across memory blocks, and generates
564
+ metadata needed to map slices of tokens to their corresponding positions
565
+ in the KV cache.
566
+
567
+ Args:
568
+ num_reqs (int): Number of requests in the current batch.
569
+ num_scheduled_tokens_per_req (int or np.ndarray): Number of tokens
570
+ to be scheduled for each request.
571
+
572
+ Returns:
573
+ np.ndarray: A 2D array of shape (total_block_len, 3), where each row
574
+ contains:
575
+ - kv_cache_start_index (int): The starting index in the KV cache
576
+ for the corresponding slice.
577
+ - new_kv_start_index (int): The starting index in the new KV
578
+ cache for the corresponding slice.
579
+ - slice_len (int): The length of the slice.
580
+ """
581
+ slices_start = self.input_batch.num_computed_tokens_cpu[:num_reqs]
582
+ slices_end = self.input_batch.num_computed_tokens_cpu[:num_reqs] + \
583
+ num_scheduled_tokens_per_req
584
+ local_block_start_idx = slices_start // self.block_size
585
+ local_block_end_idx = (slices_end - 1) // self.block_size
586
+ no_repeat_req_indices = self.arange_np[:num_reqs]
587
+ global_block_start_idx = (
588
+ no_repeat_req_indices * self.max_num_blocks_per_req +
589
+ local_block_start_idx)
590
+ block_lens = local_block_end_idx - local_block_start_idx + 1
591
+ global_block_start_idx = np.repeat(global_block_start_idx, block_lens)
592
+ slice_arange = np.concatenate([self.arange_np[:n] for n in block_lens])
593
+ global_block_indices = global_block_start_idx + slice_arange
594
+ block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor()
595
+ block_numbers = block_table_cpu.flatten()[global_block_indices].numpy()
596
+ total_block_len = np.sum(block_lens)
597
+ slot_mapping_slices = np.repeat(np.array([[0, self.block_size]],
598
+ dtype=np.int32),
599
+ total_block_len,
600
+ axis=0)
601
+ cu_block_lens = np.zeros(len(block_lens) + 1, dtype=np.int32)
602
+ np.cumsum(block_lens, out=cu_block_lens[1:])
603
+ for req_idx in range(num_reqs):
604
+ slot_mapping_slices[cu_block_lens[req_idx]][
605
+ 0] = slices_start[req_idx] % self.block_size
606
+ slot_mapping_slices[
607
+ cu_block_lens[req_idx + 1] -
608
+ 1][1] = (slices_end[req_idx] - 1) % self.block_size + 1
609
+ slice_lens = slot_mapping_slices[:, 1] - slot_mapping_slices[:, 0]
610
+ cu_slices_lens = np.zeros(len(slice_lens) + 1, dtype=np.int32)
611
+ np.cumsum(slice_lens, out=cu_slices_lens[1:])
612
+ kv_cache_start_indices = slot_mapping_slices[:, 0] + \
613
+ (block_numbers * self.block_size)
614
+ new_kv_start_indices = cu_slices_lens[:-1]
615
+ slot_mapping_metadata = np.stack(
616
+ [kv_cache_start_indices, new_kv_start_indices, slice_lens], axis=1)
617
+ return slot_mapping_metadata
618
+
619
+ def _prepare_inputs(self, scheduler_output: "SchedulerOutput",
620
+ start_index: int):
621
+ assert scheduler_output.total_num_scheduled_tokens > 0
622
+ num_reqs = self.input_batch.num_reqs
623
+ assert num_reqs > 0
624
+ assert start_index < num_reqs
625
+
626
+ # Get the number of scheduled tokens for each request.
627
+ use_max_model_len = self.most_model_len is None
628
+ num_scheduled_tokens_per_req = []
629
+ max_num_scheduled_tokens_all_reqs = 0
630
+ end_index = start_index
631
+
632
+ # Use either most_model_len or max_model_len depending on request size.
633
+ for i in range(start_index, num_reqs):
634
+ req_id = self.input_batch.req_ids[i]
635
+ assert req_id is not None
636
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
637
+ if not use_max_model_len and num_tokens > self.most_model_len:
638
+ use_max_model_len = True
639
+ num_scheduled_tokens_per_req.append(num_tokens)
640
+ if use_max_model_len:
641
+ if len(num_scheduled_tokens_per_req) > self.num_reqs_max_model_len:
642
+ num_scheduled_tokens_per_req = \
643
+ num_scheduled_tokens_per_req[:self.num_reqs_max_model_len]
644
+ end_index = start_index + self.num_reqs_max_model_len
645
+ else:
646
+ end_index = num_reqs
647
+ else:
648
+ if len(num_scheduled_tokens_per_req
649
+ ) > self.num_reqs_most_model_len:
650
+ num_scheduled_tokens_per_req = \
651
+ num_scheduled_tokens_per_req[:self.num_reqs_most_model_len]
652
+ end_index = start_index + self.num_reqs_most_model_len
653
+ else:
654
+ end_index = num_reqs
655
+ max_num_scheduled_tokens_all_reqs = max(num_scheduled_tokens_per_req)
656
+ num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req,
657
+ dtype=np.int32)
658
+ total_num_scheduled_tokens = sum(num_scheduled_tokens_per_req)
659
+ assert max_num_scheduled_tokens_all_reqs > 0
660
+
661
+ num_reqs = len(num_scheduled_tokens_per_req)
662
+
663
+ # Get request indices.
664
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
665
+ # For each scheduled token, what are the corresponding req index.
666
+ req_indices = np.repeat(self.arange_np[:num_reqs],
667
+ num_scheduled_tokens_per_req)
668
+
669
+ # Get batched arange.
670
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
671
+ # For each scheduled token, what is its position in corresponding req.
672
+ arange = np.concatenate(
673
+ [self.arange_np[:n] for n in num_scheduled_tokens_per_req])
674
+
675
+ # Get positions.
676
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
677
+ np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
678
+ arange,
679
+ out=positions_np)
680
+
681
+ # Get token indices.
682
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
683
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
684
+ # where M is the max_model_len.
685
+ token_indices = (positions_np +
686
+ req_indices * self.input_batch.token_ids_cpu.shape[1])
687
+
688
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
689
+ # because torch.index_select is much faster than np.take for large
690
+ # tensors.
691
+ torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
692
+ 0,
693
+ torch.from_numpy(token_indices),
694
+ out=self.input_ids_cpu[:total_num_scheduled_tokens])
695
+
696
+ # Prepare the attention metadata.
697
+ self.query_start_loc_np[0] = 0
698
+ np.cumsum(num_scheduled_tokens_per_req,
699
+ out=self.query_start_loc_np[1:num_reqs + 1])
700
+ self.query_start_loc_np[num_reqs + 1:] = 1
701
+
702
+ self.seq_lens_np[:num_reqs] = (
703
+ self.input_batch.num_computed_tokens_cpu[:num_reqs] +
704
+ num_scheduled_tokens_per_req)
705
+
706
+ # Do the padding and copy the tensors to the TPU.
707
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
708
+ self.num_tokens_paddings, total_num_scheduled_tokens)
709
+ # Zero out to avoid spurious values from prev iteration (last cp chunk)
710
+ self.input_ids_cpu[
711
+ total_num_scheduled_tokens:padded_total_num_scheduled_tokens] = 0
712
+ self.input_ids = self.input_ids_cpu[:
713
+ padded_total_num_scheduled_tokens].to(
714
+ self.device)
715
+ self.position_ids = self.positions_cpu[:
716
+ padded_total_num_scheduled_tokens].to(
717
+ self.device)
718
+ if use_max_model_len:
719
+ block_tables = self.block_table_cpu[:self.num_reqs_max_model_len, :
720
+ self.max_num_blocks_per_req]
721
+ block_tables[:num_reqs, :self.max_num_blocks_per_req] = (
722
+ self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs])
723
+ query_start_loc = self.query_start_loc_cpu[:self.
724
+ num_reqs_max_model_len +
725
+ 1].to(self.device)
726
+ seq_lens = self.seq_lens_cpu[:self.num_reqs_max_model_len].to(
727
+ self.device)
728
+ else:
729
+ block_tables = self.block_table_cpu[:self.
730
+ num_reqs_most_model_len, :self.
731
+ num_blocks_per_most_len_req]
732
+ block_tables[:num_reqs, :self.num_blocks_per_most_len_req] = (
733
+ self.input_batch.block_table[0].get_cpu_tensor()
734
+ [:num_reqs, :self.num_blocks_per_most_len_req])
735
+ query_start_loc = self.query_start_loc_cpu[:self.
736
+ num_reqs_most_model_len +
737
+ 1].to(self.device)
738
+ seq_lens = self.seq_lens_cpu[:self.num_reqs_most_model_len].to(
739
+ self.device)
740
+ block_tables = block_tables.to(self.device)
741
+
742
+ # Calculate the slot mapping
743
+ slot_mapping_metadata = self._get_slot_mapping_metadata(
744
+ num_reqs, num_scheduled_tokens_per_req)
745
+ num_kv_update_slices = slot_mapping_metadata.shape[0]
746
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
747
+ padded_total_num_scheduled_tokens, self.max_num_reqs,
748
+ self.block_size)
749
+ slot_mapping_metadata = np.pad(
750
+ slot_mapping_metadata,
751
+ [[0, padded_num_slices - len(slot_mapping_metadata)], [0, 0]],
752
+ constant_values=0)
753
+ slot_mapping_metadata = np.transpose(slot_mapping_metadata)
754
+ slot_mapping_metadata = torch.tensor(slot_mapping_metadata,
755
+ device=self.device)
756
+
757
+ if self.lora_config is not None:
758
+ # We need to respect padding when activating LoRA adapters
759
+ padded_num_scheduled_tokens_per_req = np.copy(
760
+ num_scheduled_tokens_per_req
761
+ ) # Copying to avoid accidental state corruption bugs
762
+ padded_num_scheduled_tokens_per_req[-1] += \
763
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
764
+
765
+ self.set_active_loras(self.input_batch,
766
+ padded_num_scheduled_tokens_per_req)
767
+
768
+ attn_metadata = PallasMetadata(
769
+ slot_mapping=slot_mapping_metadata,
770
+ block_tables=block_tables,
771
+ context_lens=seq_lens,
772
+ query_start_loc=query_start_loc,
773
+ num_seqs=torch.tensor([num_reqs],
774
+ dtype=torch.int32,
775
+ device=self.device),
776
+ num_kv_update_slices=torch.tensor([num_kv_update_slices],
777
+ dtype=torch.int32,
778
+ device=self.device),
779
+ num_slices_per_kv_cache_update_block=self.
780
+ _num_slices_per_kv_cache_update_block,
781
+ )
782
+ # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
783
+ # request in the batch. While we should not sample any token from this
784
+ # partial request, we do so for simplicity. We will ignore the sampled
785
+ # token from the partial request.
786
+ # TODO: Support prompt logprobs.
787
+ padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
788
+ num_reqs, self.max_num_reqs)
789
+ # Indices at which we sample (positions of last token in the sequence).
790
+ # Padded to avoid recompiling when `num_reqs` varies.
791
+ logits_indices = self.query_start_loc_cpu[1:padded_num_reqs + 1] - 1
792
+ logits_indices = logits_indices.to(self.device)
793
+
794
+ if self.lora_config is not None:
795
+ # We need to respect padding when activating LoRA adapters
796
+ padded_num_scheduled_tokens_per_req = np.copy(
797
+ num_scheduled_tokens_per_req
798
+ ) # Copying to avoid accidental state corruption bugs
799
+ padded_num_scheduled_tokens_per_req[-1] += \
800
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
801
+
802
+ self.set_active_loras(self.input_batch,
803
+ padded_num_scheduled_tokens_per_req)
804
+
805
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
806
+ Attention).keys()
807
+ per_layer_attn_metadata = {
808
+ layer_name: attn_metadata
809
+ for layer_name in layer_names
810
+ }
811
+ return per_layer_attn_metadata, logits_indices, padded_num_reqs,\
812
+ num_reqs, end_index
813
+
814
+ def _scatter_placeholders(
815
+ self,
816
+ embeds: torch.Tensor,
817
+ is_embed: Optional[torch.Tensor],
818
+ ) -> torch.Tensor:
819
+ if is_embed is None:
820
+ return embeds
821
+
822
+ placeholders = embeds.new_full(
823
+ (is_embed.shape[0], embeds.shape[-1]),
824
+ fill_value=torch.nan,
825
+ )
826
+ placeholders[is_embed] = embeds
827
+ return placeholders
828
+
829
+ def _gather_placeholders(
830
+ self,
831
+ placeholders: torch.Tensor,
832
+ is_embed: Optional[torch.Tensor],
833
+ ) -> torch.Tensor:
834
+ if is_embed is None:
835
+ return placeholders
836
+
837
+ return placeholders[is_embed]
838
+
839
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
840
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
841
+ if not scheduled_encoder_inputs:
842
+ return
843
+
844
+ # Batch the multi-modal inputs.
845
+ mm_kwargs = list[MultiModalKwargsItem]()
846
+ req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
847
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
848
+ req_state = self.requests[req_id]
849
+
850
+ for mm_input_id in encoder_input_ids:
851
+ mm_kwargs.append(req_state.mm_kwargs[mm_input_id])
852
+ req_ids_pos.append(
853
+ (req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
854
+
855
+ # Batch mm inputs as much as we can: if a request in the batch has
856
+ # multiple modalities or a different modality than the previous one,
857
+ # we process it separately to preserve item order.
858
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
859
+ # in the same batch while still being able to benefit from batching
860
+ # multimodal inputs. The proper solution should be reordering the
861
+ # encoder outputs.
862
+ encoder_outputs = []
863
+ for _, num_items, mm_kwargs_group in group_mm_kwargs_by_modality(
864
+ mm_kwargs,
865
+ device=self.device,
866
+ pin_memory=self.pin_memory,
867
+ ):
868
+ # Run the encoder.
869
+ # `curr_group_outputs` is either of the following:
870
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
871
+ # in case feature_size is fixed across all multimodal items.
872
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
873
+ # (feature_size, hidden_size) in case the feature size is dynamic
874
+ # depending on the input multimodal items.
875
+ xm.mark_step()
876
+ curr_group_outputs = self.model.get_multimodal_embeddings(
877
+ **mm_kwargs_group)
878
+ xm.mark_step()
879
+
880
+ sanity_check_mm_encoder_outputs(
881
+ curr_group_outputs,
882
+ expected_num_items=num_items,
883
+ )
884
+
885
+ if isinstance(curr_group_outputs, torch.Tensor):
886
+ encoder_outputs.append(curr_group_outputs)
887
+ else:
888
+ assert isinstance(curr_group_outputs, (list, tuple))
889
+ for output in curr_group_outputs:
890
+ encoder_outputs.append(output)
891
+
892
+ # Cache the encoder outputs.
893
+ # NOTE (NickLucche) here we diverge from logic in other runners, as we
894
+ # assume to only have whole mm items to process. Hence we avoid the
895
+ # intrinsic dynamism that `scatter_mm_placeholders` introduces.
896
+ for (req_id, input_id, pos_info), output in zip(
897
+ req_ids_pos,
898
+ encoder_outputs,
899
+ ):
900
+ if req_id not in self.encoder_cache:
901
+ self.encoder_cache[req_id] = {}
902
+ assert pos_info.is_embed is None, "Expected all positions to be"\
903
+ " contiguous and embeddings."
904
+ self.encoder_cache[req_id][input_id] = output
905
+
906
+ def _gather_mm_embeddings(
907
+ self,
908
+ scheduler_output: "SchedulerOutput",
909
+ ) -> list[torch.Tensor]:
910
+ mm_embeds: list[torch.Tensor] = []
911
+ for req_id in self.input_batch.req_ids:
912
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
913
+ req_id]
914
+ req_state = self.requests[req_id]
915
+ num_computed_tokens = req_state.num_computed_tokens
916
+ mm_positions = req_state.mm_positions
917
+ # TODO unroll loop and assume/enforce --disable_chunked_mm_input
918
+ # NOTE (NickLucche) here we diverge from logic in other runners, as
919
+ # we assume to only have whole mm items to process. Hence we avoid
920
+ # the intrinsic dynamism that `gather_mm_placeholders` introduces.
921
+ for i, pos_info in enumerate(mm_positions):
922
+ start_pos = pos_info.offset
923
+ num_encoder_tokens = pos_info.length
924
+
925
+ # The encoder output is needed if the two ranges overlap:
926
+ # [num_computed_tokens,
927
+ # num_computed_tokens + num_scheduled_tokens) and
928
+ # [start_pos, start_pos + num_encoder_tokens)
929
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
930
+ # The encoder output is not needed in this step.
931
+ break
932
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
933
+ # The encoder output is already processed and stored
934
+ # in the decoder's KV cache.
935
+ continue
936
+
937
+ assert req_id in self.encoder_cache
938
+ assert i in self.encoder_cache[req_id]
939
+ assert pos_info.is_embed is None, "Expected all positions to"\
940
+ " be contiguous and embeddings."
941
+ encoder_output = self.encoder_cache[req_id][i]
942
+ mm_embeds.append(encoder_output)
943
+ return mm_embeds
944
+
945
+ def _get_model_inputs(self, input_ids: torch.Tensor,
946
+ mm_embeds: list[torch.Tensor]):
947
+ if self.supports_mm_inputs:
948
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
949
+ # embeddings), we always use embeddings (rather than token ids)
950
+ # as input to the multimodal model, even when the input is text.
951
+ inputs_embeds = self.model.get_input_embeddings(
952
+ input_ids=input_ids,
953
+ multimodal_embeddings=mm_embeds,
954
+ )
955
+ return None, inputs_embeds
956
+ else:
957
+ # For text-only models, we use token ids as input.
958
+ # While it is possible to use embeddings as input just like the
959
+ # multimodal models, it is not desirable for performance since
960
+ # then the embedding layer is not included in the CUDA graph.
961
+ return input_ids, None
962
+
963
+ @torch.no_grad()
964
+ def execute_model(
965
+ self,
966
+ scheduler_output: "SchedulerOutput",
967
+ intermediate_tensors: Optional[IntermediateTensors] = None,
968
+ ) -> ModelRunnerOutput:
969
+ # Update cached state
970
+ self._update_states(scheduler_output)
971
+ if not scheduler_output.total_num_scheduled_tokens:
972
+ if not has_kv_transfer_group():
973
+ # Return empty ModelRunnerOutput if there's no work to do.
974
+ return EMPTY_MODEL_RUNNER_OUTPUT
975
+
976
+ return self.kv_connector_no_forward(scheduler_output,
977
+ self.vllm_config)
978
+
979
+ if self.supports_mm_inputs:
980
+ # Run the multimodal encoder if any.
981
+ self._execute_mm_encoder(scheduler_output)
982
+ mm_embeds = self._gather_mm_embeddings(scheduler_output)
983
+ else:
984
+ mm_embeds = []
985
+ xm.mark_step()
986
+ # Prepare inputs, the requests might be split into multiple
987
+ # executions, combine the result of each execution.
988
+ start_index = 0
989
+ combined_selected_tokens: list[torch.Tensor] = []
990
+ combined_logprobs: list[LogprobsLists] = []
991
+
992
+ # NOTE: setup current batch's metadata for kv connector.
993
+ # Currently, only verified with NixlConnector
994
+ with set_forward_context(None, self.vllm_config):
995
+ self.maybe_setup_kv_connector(scheduler_output)
996
+
997
+ while start_index < self.input_batch.num_reqs:
998
+ attn_metadata, logits_indices, padded_num_reqs, num_reqs,\
999
+ end_index = self._prepare_inputs(scheduler_output, start_index)
1000
+ input_ids, inputs_embeds = self._get_model_inputs(
1001
+ self.input_ids, mm_embeds)
1002
+ xm.mark_step()
1003
+ # Run the decoder
1004
+ with set_forward_context(
1005
+ attn_metadata,
1006
+ self.vllm_config,
1007
+ num_tokens=scheduler_output.total_num_scheduled_tokens):
1008
+ hidden_states = self.model(
1009
+ input_ids=input_ids,
1010
+ positions=self.position_ids,
1011
+ inputs_embeds=inputs_embeds,
1012
+ )
1013
+ hidden_states = self.select_hidden_states(hidden_states,
1014
+ logits_indices)
1015
+ logits = self.compute_logits(hidden_states)
1016
+ tpu_sampling_metadata = TPUSupportedSamplingMetadata.\
1017
+ from_input_batch(self.input_batch, padded_num_reqs, self.device)
1018
+ if scheduler_output.grammar_bitmask is not None:
1019
+ require_struct_decoding, grammar_bitmask_padded, arange = \
1020
+ self.prepare_structured_decoding_input(logits,
1021
+ scheduler_output)
1022
+ logits = self.structured_decode(require_struct_decoding,
1023
+ grammar_bitmask_padded, logits,
1024
+ arange)
1025
+ selected_token_ids = self.sample_from_logits_func(
1026
+ logits, tpu_sampling_metadata)
1027
+ # NOTE (NickLucche) Use the original logits (before any penalties or
1028
+ # temperature scaling) for the top-k logprobs. We can't enforce it
1029
+ # due to recompilations outside torch.compiled code, so just make
1030
+ # sure `sample_from_logits` does not modify the logits in-place.
1031
+ logprobs = self.gather_logprobs(logits, selected_token_ids) \
1032
+ if tpu_sampling_metadata.logprobs else None
1033
+
1034
+ # Remove padding on cpu and keep dynamic op outside of xla graph.
1035
+ selected_token_ids = selected_token_ids.cpu()[:num_reqs]
1036
+
1037
+ combined_selected_tokens.append(selected_token_ids)
1038
+ if tpu_sampling_metadata.logprobs:
1039
+ combined_logprobs.append(logprobs.tolists())
1040
+
1041
+ start_index = end_index
1042
+
1043
+ # NOTE: current kv load and save get h2d/d2h copies involved.
1044
+ # Those copies are blocking. Once they become async., kv_save
1045
+ # should be called right after each single forward pass,
1046
+ # instead of the forwards of the entire input batch.
1047
+ self.maybe_wait_for_kv_save()
1048
+ finished_sending, finished_recving = (
1049
+ self.get_finished_kv_transfers(scheduler_output))
1050
+
1051
+ selected_token_ids = torch.cat(combined_selected_tokens, dim=0)
1052
+ if tpu_sampling_metadata.logprobs:
1053
+
1054
+ def concat_lists(input_lists):
1055
+ result = []
1056
+ for input_list in input_lists:
1057
+ result.extend(input_list)
1058
+ return result
1059
+
1060
+ logprobs_lists = LogprobsLists(logprob_token_ids=concat_lists(
1061
+ [lp.logprob_token_ids for lp in combined_logprobs]),
1062
+ logprobs=concat_lists([
1063
+ lp.logprobs
1064
+ for lp in combined_logprobs
1065
+ ]),
1066
+ sampled_token_ranks=concat_lists([
1067
+ lp.sampled_token_ranks
1068
+ for lp in combined_logprobs
1069
+ ]))
1070
+ else:
1071
+ logprobs_lists = None
1072
+
1073
+ # Update the cache state concurrently. Code above will not block until
1074
+ # we use `selected_token_ids`. Add mark_step if post-processing changes
1075
+ request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
1076
+ discard_sampled_tokens_req_indices = []
1077
+ num_reqs = self.input_batch.num_reqs
1078
+ for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
1079
+ assert req_id is not None
1080
+ req_state = self.requests[req_id]
1081
+ seq_len = (req_state.num_computed_tokens +
1082
+ scheduler_output.num_scheduled_tokens[req_id])
1083
+ if seq_len >= req_state.num_tokens:
1084
+ request_seq_lens.append((i, req_state, seq_len))
1085
+ else:
1086
+ # Ignore the sampled token from the partial request.
1087
+ # Rewind the generator state as if the token was not sampled.
1088
+ generator = self.input_batch.generators.get(i)
1089
+ if generator is not None:
1090
+ # This relies on cuda-specific torch-internal impl details
1091
+ generator.set_offset(generator.get_offset() - 4)
1092
+
1093
+ # Record the index of the request that should not be sampled,
1094
+ # so that we could clear the sampled tokens before returning.
1095
+ discard_sampled_tokens_req_indices.append(i)
1096
+
1097
+ assert all(
1098
+ req_id is not None for req_id in
1099
+ self.input_batch.req_ids[:num_reqs]), "req_ids contains None"
1100
+ req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
1101
+
1102
+ prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
1103
+ for req_id in self.input_batch.req_ids[:num_reqs]:
1104
+ prompt_logprobs_dict[req_id] = None
1105
+
1106
+ max_gen_len = selected_token_ids.shape[-1]
1107
+ if max_gen_len == 1:
1108
+ valid_sampled_token_ids = selected_token_ids.tolist()
1109
+
1110
+ # Mask out the sampled tokens that should not be sampled.
1111
+ # TODO: Keep in sync with gpu_model_runner.py, in particular
1112
+ # the "else" case here
1113
+ for i in discard_sampled_tokens_req_indices:
1114
+ valid_sampled_token_ids[i].clear()
1115
+
1116
+ # Append sampled tokens
1117
+ for i, req_state, seq_len in request_seq_lens:
1118
+ token_id = valid_sampled_token_ids[i][0]
1119
+ self.input_batch.token_ids_cpu[i, seq_len] = token_id
1120
+ req_state.output_token_ids.append(token_id)
1121
+ self.input_batch.num_tokens[i] += 1
1122
+
1123
+ else:
1124
+ valid_mask = selected_token_ids != INVALID_TOKEN_ID
1125
+ gen_lens = valid_mask.sum(dim=1).tolist()
1126
+ valid_sampled_token_ids = [
1127
+ seq.tolist()
1128
+ for seq in selected_token_ids[valid_mask].split(gen_lens)
1129
+ ]
1130
+ self.input_batch.num_tokens[:num_reqs] += gen_lens
1131
+ for i, req_state, seq_len in request_seq_lens:
1132
+ target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
1133
+ self.input_batch.token_ids_cpu[
1134
+ i, target_slice] = valid_sampled_token_ids[i]
1135
+ req_state.output_token_ids.extend(valid_sampled_token_ids[i])
1136
+
1137
+ kv_connector_output = None if (
1138
+ finished_sending is None
1139
+ and finished_recving is None) else KVConnectorOutput(
1140
+ finished_sending=finished_sending,
1141
+ finished_recving=finished_recving,
1142
+ )
1143
+
1144
+ model_runner_output = ModelRunnerOutput(
1145
+ req_ids=req_ids,
1146
+ req_id_to_index=self.input_batch.req_id_to_index,
1147
+ sampled_token_ids=valid_sampled_token_ids,
1148
+ spec_token_ids=None,
1149
+ logprobs=logprobs_lists,
1150
+ prompt_logprobs_dict=prompt_logprobs_dict,
1151
+ pooler_output=[],
1152
+ kv_connector_output=kv_connector_output,
1153
+ )
1154
+
1155
+ # Check there are no new graphs compiled - all the graphs should be
1156
+ # captured and compiled during warm up.
1157
+ self._verify_num_xla_graphs("execute_model")
1158
+
1159
+ return model_runner_output
1160
+
1161
+ def update_config(self, overrides: dict[str, Any]) -> None:
1162
+ # TODO: TPU config may need extra validation
1163
+ # https://github.com/vllm-project/vllm/pull/20095#discussion_r2201497754
1164
+ allowed_config_names = {"load_config", "model_config"}
1165
+ for config_name, config_overrides in overrides.items():
1166
+ assert config_name in allowed_config_names, \
1167
+ f"Config `{config_name}` not supported. " \
1168
+ f"Allowed configs: {allowed_config_names}"
1169
+ config = getattr(self, config_name)
1170
+ new_config = update_config(config, config_overrides)
1171
+ setattr(self, config_name, new_config)
1172
+
1173
+ def load_model(self) -> None:
1174
+ self.device = self.device_config.device
1175
+
1176
+ # NOTE(woosuk): While the executor assigns the TP ranks to the worker
1177
+ # process, the ranks can be different from the ranks internally assigned
1178
+ # by the xm runtime. Therefore, there is a mismatch in the rank
1179
+ # assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
1180
+ # This is not a problem in linear layers because all-reduce is
1181
+ # rank-agnostic. However, it matters for all-gather as the ranks
1182
+ # determine the order of concatenating the output tensors.
1183
+ # As a workaround, we use the xm's rank assignment only when loading
1184
+ # the embedding weights.
1185
+ xm_tp_rank = xr.global_ordinal()
1186
+ with patch(
1187
+ "vllm.model_executor.layers.vocab_parallel_embedding."
1188
+ "get_tensor_model_parallel_rank",
1189
+ return_value=xm_tp_rank):
1190
+ try:
1191
+ if self.use_spmd:
1192
+ tpu_loader = TPUModelLoader(
1193
+ load_config=self.vllm_config.load_config)
1194
+ model = tpu_loader.load_model(
1195
+ vllm_config=self.vllm_config,
1196
+ model_config=self.vllm_config.model_config,
1197
+ mesh=self.mesh)
1198
+ else:
1199
+ model_loader = get_model_loader(self.load_config)
1200
+ logger.info("Loading model from scratch...")
1201
+ model = model_loader.load_model(
1202
+ vllm_config=self.vllm_config,
1203
+ model_config=self.model_config)
1204
+ except RuntimeError as e:
1205
+ raise RuntimeError(
1206
+ f"Unable to load model, a likely reason is the model is "
1207
+ "too large for the current device's HBM memory. "
1208
+ "Consider switching to a smaller model "
1209
+ "or sharding the weights on more chips. "
1210
+ f"See the detailed error: {e}") from e
1211
+ if self.lora_config is not None:
1212
+ model = self.load_lora_model(model, self.model_config,
1213
+ self.scheduler_config,
1214
+ self.lora_config, self.device)
1215
+ replace_set_lora(model)
1216
+
1217
+ # Sync all pending XLA execution during model initialization and weight
1218
+ # loading.
1219
+ xm.mark_step()
1220
+ xm.wait_device_ops()
1221
+ if not hasattr(self, "model"):
1222
+ self.model = model
1223
+ self.sampler = TPUSampler()
1224
+
1225
+ def reload_weights(self) -> None:
1226
+ assert getattr(self, "model", None) is not None, \
1227
+ "Cannot reload weights before model is loaded."
1228
+ model_loader = get_model_loader(self.load_config)
1229
+ logger.info("Reloading weights inplace...")
1230
+ model_loader.load_weights(self.model, model_config=self.model_config)
1231
+
1232
+ @torch.no_grad()
1233
+ def _dummy_run(self, num_tokens: int, num_reqs: int,
1234
+ num_blocks: int) -> None:
1235
+ if self.supports_mm_inputs:
1236
+ input_ids = None
1237
+ inputs_embeds = torch.zeros((num_tokens, self.hidden_size),
1238
+ dtype=self.dtype,
1239
+ device=self.device)
1240
+ else:
1241
+ input_ids = torch.zeros((num_tokens),
1242
+ dtype=torch.int32).to(self.device)
1243
+ inputs_embeds = None
1244
+ actual_num_reqs = min(num_tokens, num_reqs)
1245
+ position_ids = torch.zeros(num_tokens,
1246
+ dtype=torch.int32).to(self.device)
1247
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
1248
+ num_tokens, self.max_num_reqs, self.block_size)
1249
+ num_kv_update_slices = torch.tensor([padded_num_slices],
1250
+ dtype=torch.int32).to(self.device)
1251
+ slot_mapping = torch.zeros((3, padded_num_slices),
1252
+ dtype=torch.int32).to(self.device)
1253
+ block_tables = torch.zeros((num_reqs, num_blocks),
1254
+ dtype=torch.int32).to(self.device)
1255
+ query_lens = [1] * num_reqs
1256
+ query_start_loc = torch.cumsum(torch.tensor([0] + query_lens,
1257
+ dtype=torch.int32),
1258
+ dim=0,
1259
+ dtype=torch.int32).to(self.device)
1260
+ context_lens = torch.ones((num_reqs, ),
1261
+ dtype=torch.int32).to(self.device)
1262
+ num_seqs = torch.tensor([actual_num_reqs],
1263
+ dtype=torch.int32).to(self.device)
1264
+ attn_metadata = PallasMetadata(
1265
+ slot_mapping=slot_mapping,
1266
+ block_tables=block_tables,
1267
+ context_lens=context_lens,
1268
+ query_start_loc=query_start_loc,
1269
+ num_seqs=num_seqs,
1270
+ num_kv_update_slices=num_kv_update_slices,
1271
+ num_slices_per_kv_cache_update_block=self.
1272
+ _num_slices_per_kv_cache_update_block,
1273
+ )
1274
+
1275
+ if self.supports_mm_inputs:
1276
+ torch._dynamo.mark_dynamic(inputs_embeds, 0)
1277
+ else:
1278
+ torch._dynamo.mark_dynamic(input_ids, 0)
1279
+ torch._dynamo.mark_dynamic(position_ids, 0)
1280
+ torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
1281
+ torch._dynamo.mark_dynamic(attn_metadata.block_tables, (0, 1))
1282
+ torch._dynamo.mark_dynamic(attn_metadata.context_lens, 0)
1283
+ torch._dynamo.mark_dynamic(attn_metadata.query_start_loc, 0)
1284
+
1285
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
1286
+ Attention).keys()
1287
+ per_layer_attn_metadata = {
1288
+ layer_name: attn_metadata
1289
+ for layer_name in layer_names
1290
+ }
1291
+
1292
+ with self.maybe_select_dummy_loras(
1293
+ self.lora_config,
1294
+ np.array([num_tokens], dtype=np.int32)), set_forward_context(
1295
+ per_layer_attn_metadata, self.vllm_config, 0):
1296
+ out = self.model(input_ids=input_ids,
1297
+ positions=position_ids,
1298
+ inputs_embeds=inputs_embeds)
1299
+ self._hidden_states_dtype = out.dtype
1300
+
1301
+ def _set_active_loras(self, prompt_lora_mapping, token_lora_mapping,
1302
+ lora_requests) -> None:
1303
+ xm.mark_step() # Captures input updates
1304
+ super()._set_active_loras(prompt_lora_mapping, token_lora_mapping,
1305
+ lora_requests)
1306
+ xm.mark_step() # Captures metadata updates
1307
+
1308
+ def _precompile_mm_encoder(self) -> None:
1309
+ if not self.supports_mm_inputs:
1310
+ return
1311
+
1312
+ # Pre-compile MM encoder for all supported data modalities.
1313
+ hf_config = self.vllm_config.model_config.hf_config
1314
+
1315
+ mm_budget = self.mm_budget
1316
+ assert mm_budget is not None
1317
+
1318
+ max_items_per_seq_by_modality = mm_budget.max_items_per_batch_by_modality # noqa: E501
1319
+
1320
+ for mode, max_items_per_seq in max_items_per_seq_by_modality.items():
1321
+ logger.info(
1322
+ "Compiling Multimodal %s Encoder with different input"
1323
+ " shapes.", mode)
1324
+ start = time.perf_counter()
1325
+ # No padding for MM encoder just yet.
1326
+ for num_items in range(1, max_items_per_seq + 1):
1327
+ logger.info(" -- mode: %s items: %d", mode, num_items)
1328
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1329
+ mode,
1330
+ num_items,
1331
+ )
1332
+ # Run multimodal encoder.
1333
+ xm.mark_step()
1334
+ mm_embeds = self.model.get_multimodal_embeddings(
1335
+ **batched_dummy_mm_inputs)
1336
+ xm.mark_step()
1337
+ num_patches = mm_embeds[0].shape[0]
1338
+ items_size = num_patches * num_items
1339
+
1340
+ # NOTE (NickLucche) pre-compile `get_input_embeddings` when mm
1341
+ # embeddings are present. We assume `--disable-mm-chunked`,
1342
+ # hence only whole items can be scheduled. This implies we just
1343
+ # need to compile when `num_items` fit the (padded) `input_ids`
1344
+ for num_tokens in self.num_tokens_paddings:
1345
+ if num_tokens >= items_size:
1346
+ # XLA Workaround: if torch.zeros(..device) is used, XLA
1347
+ # compiles a scalar+expansion op, which won't match
1348
+ # the graph generated at runtime. CPU->TPU must be used
1349
+ placeholders_ids = torch.zeros(num_tokens,
1350
+ dtype=torch.int32,
1351
+ device="cpu")
1352
+ # Align placeholders and actual num mm_embeddings.
1353
+ placeholders_ids[:items_size] = \
1354
+ hf_config.image_token_index
1355
+
1356
+ placeholders_ids = placeholders_ids.to(self.device)
1357
+ # Assign outputs or the graph will be cut short.
1358
+ a, b = self._get_model_inputs(placeholders_ids,
1359
+ [mm_embeds])
1360
+ assert a is None
1361
+ xm.mark_step()
1362
+
1363
+ # Pre-compile `get_input_embeddings` when mm_embeddings are not
1364
+ # present. Chunk is only made of text, no mm_placeholders.
1365
+ for num_tokens in self.num_tokens_paddings:
1366
+ placeholders_ids = torch.zeros(num_tokens,
1367
+ dtype=torch.int32,
1368
+ device="cpu")
1369
+ placeholders_ids = placeholders_ids.to(self.device)
1370
+ a, b = self._get_model_inputs(placeholders_ids, [])
1371
+ assert a is None
1372
+ xm.mark_step()
1373
+
1374
+ xm.wait_device_ops()
1375
+ end = time.perf_counter()
1376
+ logger.info(
1377
+ "Multimodal %s Encoder compilation finished in in %.2f "
1378
+ "[secs].", mode, end - start)
1379
+
1380
+ def _precompile_backbone(self) -> None:
1381
+ logger.info("Compiling the model with different input shapes.")
1382
+ start = time.perf_counter()
1383
+ for num_tokens in self.num_tokens_paddings:
1384
+ logger.info(" -- num_tokens: %d", num_tokens)
1385
+ self._dummy_run(num_tokens, self.num_reqs_max_model_len,
1386
+ self.max_num_blocks_per_req)
1387
+ if self.most_model_len is not None:
1388
+ self._dummy_run(num_tokens, self.num_reqs_most_model_len,
1389
+ self.num_blocks_per_most_len_req)
1390
+ xm.wait_device_ops()
1391
+ end = time.perf_counter()
1392
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1393
+ self._update_num_xla_graphs("model backbone")
1394
+
1395
+ def _precompile_select_hidden_states(self) -> None:
1396
+ # Compile hidden state selection function for bucketed
1397
+ # n_tokens x max_num_reqs. Graph is really small so this is fine.
1398
+ logger.info(
1399
+ "Compiling select_hidden_states with different input shapes.")
1400
+ start = time.perf_counter()
1401
+ hsize = self.model_config.get_hidden_size()
1402
+ for num_tokens in self.num_tokens_paddings:
1403
+ dummy_hidden = torch.zeros((num_tokens, hsize),
1404
+ device=self.device,
1405
+ dtype=self._hidden_states_dtype)
1406
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1407
+ for num_reqs in self.num_reqs_paddings:
1408
+ indices = torch.zeros(num_reqs,
1409
+ dtype=torch.int32,
1410
+ device=self.device)
1411
+ torch._dynamo.mark_dynamic(indices, 0)
1412
+ self.select_hidden_states(dummy_hidden, indices)
1413
+ logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens,
1414
+ num_reqs)
1415
+ # Requests can't be more than tokens. But do compile for the
1416
+ # next bigger value in case num_tokens uses bucketed padding.
1417
+ if num_reqs >= min(num_tokens, self.max_num_reqs):
1418
+ break
1419
+ xm.wait_device_ops()
1420
+ end = time.perf_counter()
1421
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1422
+ self._update_num_xla_graphs("select_hidden_states")
1423
+
1424
+ def _precompile_compute_logits(self) -> None:
1425
+ logger.info("Compiling compute_logits with different input shapes.")
1426
+ start = time.perf_counter()
1427
+ hsize = self.model_config.get_hidden_size()
1428
+ for num_reqs in self.num_reqs_paddings:
1429
+ dummy_hidden = torch.zeros((num_reqs, hsize),
1430
+ device=self.device,
1431
+ dtype=self._hidden_states_dtype)
1432
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1433
+ self.compute_logits(dummy_hidden)
1434
+ logger.info(" -- num_seqs: %d", num_reqs)
1435
+ xm.wait_device_ops()
1436
+ end = time.perf_counter()
1437
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1438
+ self._update_num_xla_graphs("compute_logits")
1439
+
1440
+ def _precompile_structured_decoding(self) -> None:
1441
+ logger.info(
1442
+ "Compiling structured_decoding with different input shapes.")
1443
+ start = time.perf_counter()
1444
+ for num_reqs in self.num_reqs_paddings:
1445
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1446
+ device=self.device,
1447
+ dtype=self._hidden_states_dtype)
1448
+ dummy_require_struct_decoding = \
1449
+ self.require_structured_out_cpu[:num_reqs].to(self.device)
1450
+ dummy_grammar_bitmask = \
1451
+ self.grammar_bitmask_cpu[:num_reqs].to(self.device)
1452
+ # The first dimension of the above 3 dummy tensors cannot be
1453
+ # mark_dynamic because some operations in structured_decode require
1454
+ # them to be static.
1455
+ arange = self.structured_decode_arange.to(self.device)
1456
+ self.structured_decode(dummy_require_struct_decoding,
1457
+ dummy_grammar_bitmask, dummy_logits, arange)
1458
+ logger.info(" -- num_seqs: %d", num_reqs)
1459
+ xm.wait_device_ops()
1460
+ end = time.perf_counter()
1461
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1462
+ self._update_num_xla_graphs("structured_decoding")
1463
+
1464
+ def _precompile_sample_from_logits(self) -> None:
1465
+ logger.info(
1466
+ "Compiling sample_from_logits with different input shapes.")
1467
+ start = time.perf_counter()
1468
+ for num_reqs in self.num_reqs_paddings:
1469
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1470
+ device=self.device,
1471
+ dtype=self._hidden_states_dtype)
1472
+ # The first dimension of dummy_logits cannot be mark_dynamic
1473
+ # because some operations in the sampler require it to be static.
1474
+ for all_greedy in [False, True]:
1475
+ generate_params_if_all_greedy = not all_greedy
1476
+ sampling_metadata = (
1477
+ TPUSupportedSamplingMetadata.from_input_batch(
1478
+ self.input_batch,
1479
+ num_reqs,
1480
+ self.device,
1481
+ generate_params_if_all_greedy,
1482
+ ))
1483
+ sampling_metadata.all_greedy = all_greedy
1484
+ with self.maybe_select_dummy_loras(
1485
+ self.lora_config, np.array([num_reqs],
1486
+ dtype=np.int32)):
1487
+ self.sample_from_logits_func(dummy_logits,
1488
+ sampling_metadata)
1489
+ logger.info(" -- num_seqs: %d", num_reqs)
1490
+ xm.wait_device_ops()
1491
+ end = time.perf_counter()
1492
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1493
+ self._update_num_xla_graphs("sample_from_logits")
1494
+
1495
+ def _precompile_gather_logprobs(self) -> None:
1496
+ logger.info("Compiling gather_logprobs with different input shapes.")
1497
+ start = time.perf_counter()
1498
+ for num_reqs in self.num_reqs_paddings:
1499
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1500
+ device=self.device,
1501
+ dtype=self._hidden_states_dtype)
1502
+ dummy_tokens = torch.zeros((num_reqs, 1),
1503
+ dtype=torch.int64).to(self.device)
1504
+ with self.maybe_select_dummy_loras(
1505
+ self.lora_config, np.array([num_reqs], dtype=np.int32)):
1506
+ self.gather_logprobs(dummy_logits, dummy_tokens)
1507
+ logger.info(" -- num_seqs: %d", num_reqs)
1508
+ xm.wait_device_ops()
1509
+ end = time.perf_counter()
1510
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1511
+ self._update_num_xla_graphs("gather_logprobs")
1512
+
1513
+ def capture_model(self) -> None:
1514
+ """
1515
+ Precompile all the subgraphs with possible input shapes.
1516
+ """
1517
+ with self.maybe_setup_dummy_loras(self.lora_config):
1518
+ self._precompile_mm_encoder()
1519
+ self._precompile_backbone()
1520
+ self._precompile_select_hidden_states()
1521
+ self._precompile_compute_logits()
1522
+ self._precompile_structured_decoding()
1523
+ self._precompile_sample_from_logits()
1524
+ self._precompile_gather_logprobs()
1525
+
1526
+ def profile_run(
1527
+ self,
1528
+ num_tokens: int,
1529
+ ) -> None:
1530
+ # Profile with multimodal encoder & encoder cache.
1531
+ if self.supports_mm_inputs:
1532
+ if self.model_config.multimodal_config.skip_mm_profiling:
1533
+ logger.info(
1534
+ "Skipping memory profiling for multimodal encoder and "
1535
+ "encoder cache.")
1536
+ else:
1537
+ mm_budget = self.mm_budget
1538
+ assert mm_budget is not None
1539
+
1540
+ # TODO: handle encoder-decoder models once we support them.
1541
+ if (encoder_budget := mm_budget.get_encoder_budget()) > 0:
1542
+ # NOTE: Currently model is profiled with a single non-text
1543
+ # modality with the max possible input tokens even when
1544
+ # it supports multiple.
1545
+ (
1546
+ dummy_modality,
1547
+ max_tokens,
1548
+ ) = mm_budget.get_modality_with_max_tokens()
1549
+ (
1550
+ max_mm_items_per_prompt,
1551
+ max_mm_items_per_batch,
1552
+ ) = mm_budget.get_max_items(dummy_modality, max_tokens)
1553
+
1554
+ logger.info(
1555
+ "Encoder cache will be initialized with a budget of "
1556
+ "%s tokens, and profiled with %s %s items of the "
1557
+ "maximum feature size.",
1558
+ encoder_budget,
1559
+ max_mm_items_per_batch,
1560
+ dummy_modality,
1561
+ )
1562
+
1563
+ # Create dummy batch of multimodal inputs.
1564
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1565
+ dummy_modality,
1566
+ max_mm_items_per_batch,
1567
+ )
1568
+
1569
+ # Run multimodal encoder.
1570
+ # Isolate encoder graph from post-processing to minimize
1571
+ # impact of recompilation until it's fixed.
1572
+ start = time.perf_counter()
1573
+ xm.mark_step()
1574
+ dummy_encoder_outputs = \
1575
+ self.model.get_multimodal_embeddings(
1576
+ **batched_dummy_mm_inputs)
1577
+ xm.mark_step()
1578
+ xm.wait_device_ops()
1579
+ end = time.perf_counter()
1580
+ logger.info(
1581
+ "Multimodal Encoder profiling finished in %.2f [secs].",
1582
+ end - start)
1583
+
1584
+ sanity_check_mm_encoder_outputs(
1585
+ dummy_encoder_outputs,
1586
+ expected_num_items=max_mm_items_per_batch,
1587
+ )
1588
+
1589
+ # Cache the dummy encoder outputs.
1590
+ self.encoder_cache["tmp"] = dict(
1591
+ enumerate(dummy_encoder_outputs))
1592
+
1593
+ # Trigger compilation for general shape.
1594
+ self._dummy_run(num_tokens, self.num_reqs_max_model_len,
1595
+ self.max_num_blocks_per_req)
1596
+ if self.most_model_len is not None:
1597
+ self._dummy_run(num_tokens, self.num_reqs_most_model_len,
1598
+ self.num_blocks_per_most_len_req)
1599
+
1600
+ xm.mark_step()
1601
+ xm.wait_device_ops()
1602
+ self.encoder_cache.clear()
1603
+ gc.collect()
1604
+
1605
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1606
+ """
1607
+ Initialize KV cache based on `kv_cache_config`.
1608
+ Args:
1609
+ kv_cache_config: Configuration for the KV cache, including the KV
1610
+ cache size of each layer
1611
+ """
1612
+ if len(kv_cache_config.kv_cache_groups) > 1:
1613
+ raise NotImplementedError(
1614
+ "Hybrid models with more than one KV cache type are not "
1615
+ "supported yet.")
1616
+
1617
+ if kv_cache_config.kv_cache_groups[
1618
+ 0].kv_cache_spec.block_size != self.block_size:
1619
+ self.input_batch = InputBatch(
1620
+ max_num_reqs=self.max_num_reqs,
1621
+ max_model_len=self.max_model_len,
1622
+ max_num_batched_tokens=self.max_num_tokens,
1623
+ device=self.device,
1624
+ pin_memory=self.pin_memory,
1625
+ vocab_size=self.model_config.get_vocab_size(),
1626
+ block_sizes=[
1627
+ kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
1628
+ ],
1629
+ )
1630
+ # Verify dtype compatibility between block_table_cpu and input_batch
1631
+ assert self.block_table_cpu.dtype == self.input_batch.block_table[
1632
+ 0].get_cpu_tensor().dtype
1633
+
1634
+ kv_cache_sizes = {}
1635
+ for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
1636
+ assert len(kv_cache_tensor.shared_by) == 1, (
1637
+ "KV cache tensor shared by multiple layers is not supported in "
1638
+ "TPU.")
1639
+ kv_cache_sizes[kv_cache_tensor.shared_by[0]] = kv_cache_tensor.size
1640
+
1641
+ kv_caches: dict[str, torch.Tensor] = {}
1642
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1643
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1644
+ for layer_name in kv_cache_group.layer_names:
1645
+ tensor_size = kv_cache_sizes[layer_name]
1646
+ assert tensor_size % kv_cache_spec.page_size_bytes == 0
1647
+ num_blocks = tensor_size // kv_cache_spec.page_size_bytes # noqa
1648
+ if isinstance(kv_cache_spec, AttentionSpec):
1649
+ if self.use_spmd:
1650
+ num_kv_heads = kv_cache_spec.num_kv_heads
1651
+ assert self.original_parallel_config is not None
1652
+ tp_size = \
1653
+ self.original_parallel_config.tensor_parallel_size
1654
+ # TODO: Handle kv cache duplication under SPMD mode.
1655
+ assert num_kv_heads % tp_size == 0, (
1656
+ f"num_kv_heads {num_kv_heads} must be divisible by "
1657
+ f"tp_size {tp_size} under SPMD mode")
1658
+ kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
1659
+ num_blocks, kv_cache_spec.block_size,
1660
+ kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
1661
+ dtype = kv_cache_spec.dtype
1662
+
1663
+ tpu_kv_cache = torch.zeros(kv_cache_shape,
1664
+ dtype=dtype).to(self.device)
1665
+
1666
+ kv_caches[layer_name] = tpu_kv_cache
1667
+ else:
1668
+ raise NotImplementedError
1669
+
1670
+ # Setup `kv_cache_config` and `kv_caches` for models
1671
+ # with cross-layer KV sharing
1672
+ if self.shared_kv_cache_layers:
1673
+ initialize_kv_cache_for_kv_sharing(
1674
+ self.shared_kv_cache_layers,
1675
+ kv_cache_config.kv_cache_groups,
1676
+ kv_caches,
1677
+ )
1678
+
1679
+ bind_kv_cache(
1680
+ kv_caches,
1681
+ self.vllm_config.compilation_config.static_forward_context,
1682
+ self.kv_caches)
1683
+
1684
+ if self.use_spmd:
1685
+ # Shard KV Cache
1686
+ for cache in self.kv_caches:
1687
+ xs.mark_sharding(cache, self.mesh, (None, 'x', None, None))
1688
+
1689
+ if has_kv_transfer_group():
1690
+ get_kv_transfer_group().register_kv_caches(kv_caches)
1691
+ get_kv_transfer_group().set_host_xfer_buffer_ops(copy_kv_blocks)
1692
+
1693
+ def reset_dynamo_cache(self):
1694
+
1695
+ # NOTE: We check `is_multimodal_model` instead of `supports_mm_inputs`
1696
+ # since the compiled model object of the language backbone of a
1697
+ # multimodal model needs to be extracted via `get_language_model`.
1698
+ if self.model_config.is_multimodal_model:
1699
+ compiled_model = self.model.get_language_model().model
1700
+ else:
1701
+ compiled_model = self.model.model
1702
+ if isinstance(compiled_model, TorchCompileWrapperWithCustomDispatcher):
1703
+ logger.info("Clear dynamo cache and cached dynamo bytecode.")
1704
+ torch._dynamo.eval_frame.remove_from_cache(
1705
+ compiled_model.original_code_object)
1706
+ compiled_model.compiled_codes.clear()
1707
+
1708
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1709
+ def select_hidden_states(self, hidden_states, indices_do_sample):
1710
+ return hidden_states[indices_do_sample]
1711
+
1712
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1713
+ def compute_logits(self,
1714
+ sample_hidden_states: torch.Tensor) -> torch.Tensor:
1715
+ return self.model.compute_logits(sample_hidden_states, None)
1716
+
1717
+ # TODO: Under SPMD mode, sample_from_logits has correctness issue.
1718
+ # Re-enable the torch.compile once the issue is fixed in torchxla.
1719
+ # @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1720
+ def sample_from_logits(
1721
+ self, logits: torch.Tensor,
1722
+ sampling_metadata: TPUSupportedSamplingMetadata) -> torch.Tensor:
1723
+ """
1724
+ Sample with xla-friendly function. This function is to be traced
1725
+ separately from `forward` for lighter compilation overhead.
1726
+ """
1727
+ if sampling_metadata.all_greedy:
1728
+ out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
1729
+ else:
1730
+ out_tokens = self.sampler(logits,
1731
+ sampling_metadata).sampled_token_ids
1732
+ return out_tokens
1733
+
1734
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1735
+ def gather_logprobs(self, logits: torch.Tensor,
1736
+ sampled_tokens: torch.Tensor) -> LogprobsTensors:
1737
+ """
1738
+ Gather the top_logprobs with corresponding tokens. Use a fixed number
1739
+ of logprobs as an alternative to having multiple pre-compiled graphs.
1740
+ Select the number of logprobs actually demanded by each request on CPU.
1741
+ """
1742
+ logprobs = self.sampler.compute_logprobs(logits)
1743
+ return self.sampler.gather_logprobs(
1744
+ logprobs,
1745
+ self.model_config.max_logprobs,
1746
+ token_ids=sampled_tokens.squeeze(-1))
1747
+
1748
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1749
+ def structured_decode(self, require_struct_decoding: torch.Tensor,
1750
+ grammar_bitmask: torch.Tensor, logits: torch.Tensor,
1751
+ arange: torch.Tensor) -> torch.Tensor:
1752
+ return torch.where(
1753
+ require_struct_decoding,
1754
+ self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
1755
+ logits)
1756
+
1757
+ def apply_grammar_bitmask(self, logits: torch.Tensor,
1758
+ grammar_bitmask: torch.Tensor,
1759
+ arange: torch.Tensor):
1760
+ assert (logits.shape[0] == grammar_bitmask.shape[0])
1761
+ logits_cloned = logits.clone()
1762
+ for i in range(logits.shape[0]):
1763
+ unpacked_bitmask = (torch.bitwise_right_shift(
1764
+ grammar_bitmask[i][:, None], arange[None, :]) & 1) == 0
1765
+ unpacked_bitmask = unpacked_bitmask.reshape(-1)[:self.vocab_size]
1766
+ logits_cloned[i] = logits_cloned[i].masked_fill(
1767
+ unpacked_bitmask, -float("inf"))
1768
+ return logits_cloned
1769
+
1770
+ def get_multimodal_embeddings(self, *args, **kwargs):
1771
+ return self.model.get_multimodal_embeddings(*args, **kwargs)
1772
+
1773
+ def get_input_embeddings(self, *args, **kwargs):
1774
+ return self.model.get_input_embeddings(*args, **kwargs)
1775
+
1776
+ def prepare_structured_decoding_input(
1777
+ self, logits: torch.Tensor, scheduler_output: "SchedulerOutput"
1778
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
1779
+ grammar_bitmask = scheduler_output.grammar_bitmask
1780
+ assert grammar_bitmask is not None
1781
+ num_reqs, _ = logits.shape
1782
+
1783
+ # Reset pre-allocated tensors
1784
+ self.grammar_bitmask_cpu.zero_()
1785
+ self.require_structured_out_cpu.zero_()
1786
+
1787
+ # We receive the structured output bitmask from the scheduler, but the
1788
+ # indices of the requests in the batch may not match the indices of
1789
+ # the bitmask since the scheduler doesn't know how the tpu runner is
1790
+ # ordering the requests in the batch. We need to match the order of
1791
+ # bitmask with the order of requests
1792
+ struct_out_indices: list[int] = []
1793
+ mask_indices: list[int] = []
1794
+ for req_id in self.input_batch.req_ids:
1795
+ mask_index = scheduler_output.structured_output_request_ids.get(
1796
+ req_id)
1797
+ if mask_index is None:
1798
+ continue
1799
+ batch_index = self.input_batch.req_id_to_index[req_id]
1800
+ struct_out_indices.append(batch_index)
1801
+ mask_indices.append(mask_index)
1802
+ self.grammar_bitmask_cpu[struct_out_indices] = torch.from_numpy(
1803
+ grammar_bitmask[mask_indices])
1804
+ # It's not guaranteed that all requests in this batch require
1805
+ # structured output, so create a bool tensor to represent
1806
+ # the requests that need structured output.
1807
+ struct_out_indices = torch.tensor(struct_out_indices, dtype=torch.long)
1808
+ self.require_structured_out_cpu[struct_out_indices] = True
1809
+ return self.require_structured_out_cpu[:num_reqs].to(logits.device), \
1810
+ self.grammar_bitmask_cpu[:num_reqs].to(logits.device), \
1811
+ self.structured_decode_arange.to(logits.device)
1812
+
1813
+ def _get_mm_dummy_batch(
1814
+ self,
1815
+ modality: str,
1816
+ max_items_per_batch: int,
1817
+ ) -> BatchedTensorInputs:
1818
+ """Dummy data for profiling and precompiling multimodal models."""
1819
+ dummy_decoder_data = self.mm_registry.get_decoder_dummy_data(
1820
+ model_config=self.model_config,
1821
+ seq_len=self.max_num_tokens,
1822
+ mm_counts={modality: 1},
1823
+ )
1824
+ dummy_mm_data = dummy_decoder_data.multi_modal_data
1825
+
1826
+ # Result in the maximum GPU consumption of the model
1827
+ dummy_mm_item = dummy_mm_data.get_item(modality=modality, item_index=0)
1828
+
1829
+ return next(grouped_mm_kwargs
1830
+ for _, _, grouped_mm_kwargs in group_mm_kwargs_by_modality(
1831
+ [dummy_mm_item] * max_items_per_batch,
1832
+ device=self.device,
1833
+ pin_memory=self.pin_memory,
1834
+ ))
1835
+
1836
+
1837
+ def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
1838
+ logger.info("Preparing request paddings:")
1839
+ # assert min_req_size is power of 2
1840
+ assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
1841
+ paddings: list = []
1842
+ num = max(MIN_NUM_SEQS, min_req_size)
1843
+ while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
1844
+ paddings.append(num)
1845
+ logger.info(" %d", num)
1846
+ num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
1847
+ return paddings
1848
+
1849
+
1850
+ def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
1851
+ res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
1852
+ return min(res, upper_limit)
1853
+
1854
+
1855
+ def _get_token_paddings(min_token_size: int, max_token_size: int,
1856
+ padding_gap: int) -> list[int]:
1857
+ """Generate a list of padding size, starting from min_token_size,
1858
+ ending with a number that can cover max_token_size
1859
+
1860
+ If padding_gap == 0 then:
1861
+ increase 2X each time (exponential)
1862
+ else:
1863
+ first increase the size to twice,
1864
+ then increase the padding size by padding_gap.
1865
+ """
1866
+ # assert min_token_size is power of 2
1867
+ assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
1868
+ paddings = []
1869
+ num = min_token_size
1870
+
1871
+ if padding_gap == 0:
1872
+ logger.info("Using exponential token paddings:")
1873
+ while True:
1874
+ logger.info(" %d", num)
1875
+ paddings.append(num)
1876
+ if num >= max_token_size:
1877
+ break
1878
+ num *= 2
1879
+ else:
1880
+ logger.info("Using incremental token paddings:")
1881
+ while num <= padding_gap:
1882
+ logger.info(" %d", num)
1883
+ paddings.append(num)
1884
+ num *= 2
1885
+ num //= 2
1886
+ while num < max_token_size:
1887
+ num += padding_gap
1888
+ logger.info(" %d", num)
1889
+ paddings.append(num)
1890
+
1891
+ return paddings
1892
+
1893
+
1894
+ def _get_padded_token_len(paddings: list[int], x: int) -> int:
1895
+ """Return the first element in paddings list greater or equal to x.
1896
+ """
1897
+ index = bisect.bisect_left(paddings, x)
1898
+ assert index < len(paddings)
1899
+ return paddings[index]
1900
+
1901
+
1902
+ def _make_src_and_dst_indices(
1903
+ src_block_ids: list[int],
1904
+ dst_block_ids: list[int],
1905
+ src_device: Union[torch.device, str],
1906
+ dst_device: Union[torch.device, str],
1907
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1908
+ src_indices = torch.tensor(src_block_ids,
1909
+ device=src_device,
1910
+ dtype=torch.int64)
1911
+ dst_indices = torch.tensor(dst_block_ids,
1912
+ device=dst_device,
1913
+ dtype=torch.int64)
1914
+ return src_indices, dst_indices
1915
+
1916
+
1917
+ @torch.compile(backend="openxla")
1918
+ def _insert_blocks_to_tpu(
1919
+ cpu_cache: torch.Tensor,
1920
+ tpu_cache: torch.Tensor,
1921
+ cpu_block_indices: torch.Tensor,
1922
+ tpu_block_indices: torch.Tensor,
1923
+ ) -> None:
1924
+ torch.ops.xla.dynamo_set_buffer_donor_(tpu_cache, True)
1925
+ tpu_cache[tpu_block_indices] = cpu_cache[cpu_block_indices].to(
1926
+ tpu_cache.device)
1927
+
1928
+
1929
+ @torch.compile(backend="openxla")
1930
+ def _swap_out_tpu_blocks(
1931
+ tpu_cache: torch.Tensor,
1932
+ cpu_cache: torch.Tensor,
1933
+ tpu_block_indices: torch.Tensor,
1934
+ cpu_block_indices: torch.Tensor,
1935
+ ) -> None:
1936
+ """ tpu blocks to cpu blocks"""
1937
+ torch.ops.xla.dynamo_set_buffer_donor_(tpu_cache, True)
1938
+ cpu_cache[cpu_block_indices] = tpu_cache[tpu_block_indices].cpu()
1939
+
1940
+
1941
+ def copy_kv_blocks(
1942
+ src_kv_caches: dict[str, torch.Tensor],
1943
+ dst_kv_caches: dict[str, torch.Tensor],
1944
+ src_block_ids: list[int],
1945
+ dst_block_ids: list[int],
1946
+ direction: Literal["h2d", "d2h"],
1947
+ ) -> None:
1948
+ """Copy kv blocks between different buffers."""
1949
+ if not src_kv_caches or not dst_kv_caches or \
1950
+ not src_block_ids or not dst_block_ids or \
1951
+ len(src_block_ids) != len(dst_block_ids):
1952
+ return
1953
+
1954
+ src_device = next(iter(src_kv_caches.values())).device
1955
+ dst_device = next(iter(dst_kv_caches.values())).device
1956
+
1957
+ src_indices, dst_indices = _make_src_and_dst_indices(
1958
+ src_block_ids=src_block_ids,
1959
+ dst_block_ids=dst_block_ids,
1960
+ src_device=src_device,
1961
+ dst_device=dst_device)
1962
+
1963
+ _copy_fn = _insert_blocks_to_tpu if direction == "h2d" else \
1964
+ _swap_out_tpu_blocks
1965
+ for layer_name in src_kv_caches:
1966
+ src_tensor = src_kv_caches[layer_name]
1967
+ dst_tensor = dst_kv_caches[layer_name]
1968
+ _copy_fn(src_tensor, dst_tensor, src_indices, dst_indices)
1969
+
1970
+
1971
+ def _get_padded_num_kv_cache_update_slices(num_tokens: int, max_num_reqs: int,
1972
+ page_size: int) -> int:
1973
+ """Calculates the padded number of KV cache update slices to avoid
1974
+ recompilation."""
1975
+ # NOTE(chengjiyao): let's say R_i is the token num for i-th request,
1976
+ # so it occupies most 2 + R_i // page_size pages. The total maximum
1977
+ # possible number of pages needed is sum(2 + R_i // page_size), which
1978
+ # is <= 2 * max_num_reqs + sum(R_i) // page_size
1979
+ # = 2 * max_num_reqs + num_tokens // page_size
1980
+ padded_num_slices = 2 * max_num_reqs + num_tokens // page_size
1981
+ padded_num_slices = min(padded_num_slices, num_tokens)
1982
+ return padded_num_slices
1983
+
1984
+
1985
+ def _get_num_slices_per_kv_cache_update_block(page_size_bytes: int) -> int:
1986
+ """Find the optimum number of slices to copy per Pallas program instance.
1987
+
1988
+ Increasing the number of slices copied in one instance of the kernel program
1989
+ will increase HBM bandwidth utilization via more in-flight DMAs.
1990
+
1991
+ However, it will also use more VMEM, and experimentally, we observed
1992
+ performance regression at 128 slices on v6e, likely due to running
1993
+ out of scalar registers. Thus this function will limit the number of
1994
+ slices to 64.
1995
+ """
1996
+ # The default vmem_limit_bytes of a pallas kernel is 32MB. Here we
1997
+ # calculate num_slices_per_block based on 16MB in case any register spills.
1998
+ vmem_limit = 16 * 1024 * 1024
1999
+ num_slices_per_block = vmem_limit // page_size_bytes
2000
+ assert num_slices_per_block > 0, "Number of slices should be positive"
2001
+ num_slices_per_block = prev_power_of_2(num_slices_per_block)
2002
+ if num_slices_per_block > 64:
2003
+ num_slices_per_block = 64
2004
+ return num_slices_per_block
2005
+
2006
+
2007
+ def replace_set_lora(model):
2008
+
2009
+ def _tpu_set_lora(
2010
+ self,
2011
+ index: int,
2012
+ lora_a: torch.Tensor,
2013
+ lora_b: torch.Tensor,
2014
+ embeddings_tensor: Optional[torch.Tensor],
2015
+ bias: Optional[torch.Tensor] = None,
2016
+ ):
2017
+ # TODO: The integer index leads to a recompilation, but converting it
2018
+ # to a tensor doesn't seem to work anymore. This might be fixed with a
2019
+ # later release of torch_xla.
2020
+ self._original_set_lora(index, lora_a, lora_b, embeddings_tensor, bias)
2021
+ xm.mark_step()
2022
+
2023
+ def _tpu_reset_lora(self, index: int):
2024
+ self._original_reset_lora(index)
2025
+ xm.mark_step()
2026
+
2027
+ for _, module in model.named_modules():
2028
+ if isinstance(module, BaseLayerWithLoRA):
2029
+ module._original_set_lora = module.set_lora
2030
+ module._original_reset_lora = module.reset_lora
2031
+ module.set_lora = _tpu_set_lora.__get__(module, module.__class__)
2032
+ module.reset_lora = _tpu_reset_lora.__get__(
2033
+ module, module.__class__)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/tpu_worker.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """A TPU worker class."""
4
+ import os
5
+ from typing import Any, Optional
6
+
7
+ import torch
8
+ import torch.distributed
9
+ import torch.nn as nn
10
+ import torch_xla.core.xla_model as xm
11
+ import torch_xla.debug.profiler as xp
12
+ import torch_xla.runtime as xr
13
+
14
+ import vllm.envs as envs
15
+ from vllm.config import VllmConfig
16
+ from vllm.distributed import (ensure_model_parallel_initialized,
17
+ init_distributed_environment)
18
+ from vllm.distributed.kv_transfer import (ensure_kv_transfer_initialized,
19
+ has_kv_transfer_group)
20
+ from vllm.logger import init_logger
21
+ from vllm.lora.request import LoRARequest
22
+ from vllm.model_executor import set_random_seed
23
+ from vllm.platforms import current_platform
24
+ from vllm.tasks import SupportedTask
25
+ from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, cdiv
26
+ from vllm.v1.attention.backends.pallas import TPU_HEAD_SIZE_ALIGNMENT
27
+ from vllm.v1.core.sched.output import SchedulerOutput
28
+ from vllm.v1.kv_cache_interface import (AttentionSpec, KVCacheConfig,
29
+ KVCacheSpec)
30
+ from vllm.v1.outputs import ModelRunnerOutput
31
+ from vllm.v1.utils import report_usage_stats
32
+ from vllm.v1.worker.tpu_model_runner import TPUModelRunner
33
+ from vllm.v1.worker.utils import bind_kv_cache
34
+
35
+ logger = init_logger(__name__)
36
+
37
+
38
+ class TPUWorker:
39
+
40
+ def __init__(
41
+ self,
42
+ vllm_config: VllmConfig,
43
+ local_rank: int,
44
+ rank: int,
45
+ distributed_init_method: str,
46
+ is_driver_worker: bool = False,
47
+ ):
48
+ self.is_driver_worker = is_driver_worker
49
+ self.vllm_config = vllm_config
50
+ self.model_config = vllm_config.model_config
51
+ self.cache_config = vllm_config.cache_config
52
+ self.lora_config = vllm_config.lora_config
53
+ self.load_config = vllm_config.load_config
54
+ self.parallel_config = vllm_config.parallel_config
55
+ self.use_spmd = envs.VLLM_XLA_USE_SPMD
56
+ self.original_parallel_config = None
57
+ if self.use_spmd:
58
+ # Under SPMD mode, distributed env is initialized as if there is
59
+ # only one worker/device.
60
+ self.original_parallel_config = self.parallel_config
61
+ self.parallel_config.tensor_parallel_size = 1
62
+ self.parallel_config.pipeline_parallel_size = 1
63
+ self.parallel_config.world_size = 1
64
+ self.scheduler_config = vllm_config.scheduler_config
65
+ self.device_config = vllm_config.device_config
66
+ self.speculative_config = vllm_config.speculative_config
67
+ self.observability_config = vllm_config.observability_config
68
+
69
+ self.parallel_config.rank = rank
70
+ self.local_rank = local_rank
71
+ self.rank = rank
72
+ self.distributed_init_method = distributed_init_method
73
+
74
+ if self.cache_config.cache_dtype == "auto":
75
+ self.cache_dtype = self.model_config.dtype
76
+ else:
77
+ self.cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
78
+ self.cache_config.cache_dtype]
79
+
80
+ if self.model_config.trust_remote_code:
81
+ # note: lazy import to avoid importing torch before initializing
82
+ from vllm.utils import init_cached_hf_modules
83
+ init_cached_hf_modules()
84
+
85
+ # Delay profiler initialization to the start of the profiling.
86
+ # This is because in vLLM V1, MP runtime is initialized before the
87
+ # TPU Worker is initialized. The profiler server needs to start after
88
+ # MP runtime is initialized.
89
+ self.profiler = None
90
+ self.profile_dir = None
91
+ if envs.VLLM_TORCH_PROFILER_DIR and self.rank < 1:
92
+ # For TPU, we can only have 1 active profiler session for 1 profiler
93
+ # server. So we only profile on rank0.
94
+ self.profile_dir = envs.VLLM_TORCH_PROFILER_DIR
95
+ logger.info("Profiling enabled. Traces will be saved to: %s",
96
+ self.profile_dir)
97
+
98
+ if self.model_config.seed is None:
99
+ self.model_config.seed = 0
100
+
101
+ def initialize_cache(self, num_gpu_blocks: int,
102
+ num_cpu_blocks: int) -> None:
103
+ self.cache_config.num_gpu_blocks = num_gpu_blocks
104
+ self.cache_config.num_cpu_blocks = num_cpu_blocks
105
+
106
+ def init_device(self):
107
+ os.environ["PJRT_DEVICE"] = "TPU"
108
+ # Note: Currently the XLA compiler wrongly uses 2D ring strategy on 1D
109
+ # ring, the xla tpu compiler flag
110
+ # `xla_tpu_force_1d_allreduce_at_chunk_count` is a temporary solution to
111
+ # fix this. It will be removed after the bug in XLA compiler is fixed.
112
+ os.environ["LIBTPU_INIT_ARGS"] = (
113
+ os.environ.get("LIBTPU_INIT_ARGS", "") +
114
+ " --xla_tpu_force_1d_allreduce_at_chunk_count=1"
115
+ " --xla_jf_conv_input_fusion=False")
116
+ # --xla_jf_conv_input_fusion=False is used to improve the perf of
117
+ # quantized matmul.
118
+ torch.set_grad_enabled(False)
119
+ torch.set_default_dtype(self.model_config.dtype)
120
+
121
+ # Initialize the distributed environment.
122
+ self._init_tpu_worker_distributed_environment(
123
+ self.vllm_config, self.rank, self.distributed_init_method,
124
+ self.local_rank)
125
+
126
+ # Device initialization should happen after initializing
127
+ # the distributed runtime.
128
+ self.device = xm.xla_device()
129
+ self.device_config.device = self.device
130
+
131
+ # Set random seed.
132
+ set_random_seed(self.model_config.seed)
133
+ if self.model_config.seed is not None:
134
+ xm.set_rng_state(self.model_config.seed, self.device)
135
+
136
+ # Increase the cache size limit, which is the maximum number of
137
+ # dynamo graphs that can be compiled.
138
+ # TODO (NickLucche) On gsm we compile 80+ graphs.
139
+ # Re-evaluate limit, with MM we may get close to this limit.
140
+ torch._dynamo.config.cache_size_limit = 128
141
+ # Use persistent cache to avoid XLA recompilation.
142
+ # NOTE(woosuk): Set per-rank cache path since different ranks
143
+ # can have slightly different XLA graphs.
144
+ world_size = self.parallel_config.world_size
145
+ rank = xr.global_ordinal()
146
+ # The PyTorch/XLA compilation cache uses the Torch IR to generate keys.
147
+ # Consequently, changes in optimization flags, which affect compilation
148
+ # results, don't change the cache key. This can result in the wrong
149
+ # compilation being used. To prevent this, disabling the XLA compilation
150
+ # cache during development is recommended.We can disable it by
151
+ # `export VLLM_XLA_CACHE_PATH=`
152
+ if envs.VLLM_XLA_CACHE_PATH:
153
+ per_rank_path = os.path.join(envs.VLLM_XLA_CACHE_PATH,
154
+ f"tp{world_size}_rank{rank}")
155
+ xr.initialize_cache(per_rank_path, readonly=False)
156
+
157
+ # Init ModelRunner here, so that we have access to self.device.
158
+ self.model_runner = \
159
+ TPUModelRunner(self.vllm_config, self.device,
160
+ self.original_parallel_config)
161
+
162
+ if rank == 0:
163
+ # If usage stat is enabled, collect relevant info.
164
+ report_usage_stats(self.vllm_config)
165
+
166
+ def determine_available_memory(self) -> int:
167
+ kv_caches: dict[str, torch.Tensor] = {}
168
+ kv_cache_spec = self.model_runner.get_kv_cache_spec()
169
+ for layer_name, layer_spec in kv_cache_spec.items():
170
+ if isinstance(layer_spec, AttentionSpec):
171
+ dtype = layer_spec.dtype
172
+
173
+ # Use an empty tensor instead of `None`` to force Dynamo to pass
174
+ # it by reference, rather by specializing on the value ``None``.
175
+ tpu_kv_cache = torch.tensor([], dtype=dtype).to(self.device)
176
+ kv_caches[layer_name] = tpu_kv_cache
177
+ else:
178
+ raise NotImplementedError(
179
+ f"Unsupported KV cache spec '{type(layer_spec)}'")
180
+
181
+ runner_kv_caches: list[torch.Tensor] = []
182
+ bind_kv_cache(
183
+ kv_caches,
184
+ self.vllm_config.compilation_config.static_forward_context,
185
+ runner_kv_caches)
186
+
187
+ # `max_num_tokens >= max_num_batched_tokens` due to padding.
188
+ with self.model_runner.maybe_setup_dummy_loras(self.lora_config):
189
+ self.model_runner.profile_run(self.model_runner.max_num_tokens)
190
+
191
+ # Synchronize before measuring the memory usage.
192
+ xm.wait_device_ops()
193
+
194
+ # During the profiling run, the model runs without KV cache. After
195
+ # the profiling run, the model always runs with KV cache. Here we clear
196
+ # the dynamo cache and cached bytecode to ensure the model always has
197
+ # one compiled bytecode. Having one FX graph/cached bytecode per
198
+ # compiled model is required for `support_torch_compile` decorator to
199
+ # skip dynamo guard.
200
+ self.model_runner.reset_dynamo_cache()
201
+
202
+ # Get the maximum amount of memory used by the model weights and
203
+ # intermediate activations.
204
+ if self.use_spmd:
205
+ # This is a workaround for the TPU SPMD mode. The get_memory_info
206
+ # API doesn't work with SPMD mode in PyTorch/XLA.
207
+ # TODO: use xm.get_memory_info for SPMD once it's supported in
208
+ # PyTorch/XLA.
209
+ import tpu_info
210
+ chip_type, _ = tpu_info.device.get_local_chips()
211
+ device_usage = tpu_info.metrics.get_chip_usage(chip_type)
212
+ total_memory_size = device_usage[0].total_memory
213
+ current_mem = device_usage[0].memory_usage
214
+ else:
215
+ m = xm.get_memory_info(self.device)
216
+ total_memory_size = m["bytes_limit"]
217
+ current_mem = m["bytes_used"]
218
+ # Ideally we would use profiled = m["peak_bytes_used"] to
219
+ # get weights + activations. But there is memory used during
220
+ # compilation / weight loading that impacts the peak and
221
+ # there is no way to reset peak memory in XLA, So we
222
+ # use the heuristic of 2% of weights.
223
+ profiled = current_mem * 1.02
224
+
225
+ # Calculate the TPU KV cache size based on profiling.
226
+ usable_memory_size = int(total_memory_size *
227
+ self.cache_config.gpu_memory_utilization)
228
+ tpu_kv_cache_bytes = max(usable_memory_size - profiled, 0)
229
+ head_size = self.model_config.get_head_size()
230
+ if head_size > 0:
231
+ padded_head_size = cdiv(
232
+ head_size, TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT
233
+ if padded_head_size != head_size:
234
+ logger.warning_once("head size is padded to %d",
235
+ padded_head_size)
236
+ # We adjust the usable memory size for the KV cache to prevent OOM
237
+ # errors, even after padding the head_size.
238
+ tpu_kv_cache_bytes = (tpu_kv_cache_bytes * head_size //
239
+ padded_head_size)
240
+ return int(tpu_kv_cache_bytes)
241
+
242
+ def execute_model(
243
+ self,
244
+ scheduler_output: "SchedulerOutput",
245
+ ) -> Optional[ModelRunnerOutput]:
246
+ output = self.model_runner.execute_model(scheduler_output)
247
+ # every worker's output is needed when kv_transfer_group is setup
248
+ return output if self.is_driver_worker or has_kv_transfer_group(
249
+ ) else None
250
+
251
+ def profile(self, is_start: bool = True):
252
+ if self.rank < 1:
253
+ if self.profile_dir is None:
254
+ raise RuntimeError("Profiler is not enabled.")
255
+ if is_start:
256
+ if self.profiler is None:
257
+ self.profiler = xp.start_server(9012)
258
+ xp.start_trace(self.profile_dir)
259
+ else:
260
+ xp.stop_trace()
261
+
262
+ def add_lora(self, lora_request: LoRARequest) -> bool:
263
+ return self.model_runner.add_lora(lora_request)
264
+
265
+ def load_model(self) -> None:
266
+ self.model_runner.load_model()
267
+
268
+ def update_config(self, overrides: dict[str, Any]) -> None:
269
+ self.model_runner.update_config(overrides)
270
+
271
+ def reload_weights(self) -> None:
272
+ self.model_runner.reload_weights()
273
+
274
+ def compile_or_warm_up_model(self) -> None:
275
+ if not self.model_config.enforce_eager:
276
+ self.model_runner.capture_model()
277
+
278
+ # Reset the seed to ensure that the random state is not affected by
279
+ # the model initialization and profiling.
280
+ set_random_seed(self.model_config.seed)
281
+
282
+ def get_model(self) -> nn.Module:
283
+ return self.model_runner.get_model()
284
+
285
+ def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
286
+ return self.model_runner.get_supported_tasks()
287
+
288
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
289
+ return self.model_runner.get_kv_cache_spec()
290
+
291
+ def initialize_from_config(self, kv_cache_config: KVCacheConfig) -> None:
292
+ """Allocate GPU KV cache with the specified kv_cache_config."""
293
+ self.model_runner.initialize_kv_cache(kv_cache_config)
294
+
295
+ def check_health(self) -> None:
296
+ # worker will always be healthy as long as it's running.
297
+ return
298
+
299
+ def _init_tpu_worker_distributed_environment(
300
+ self,
301
+ vllm_config: VllmConfig,
302
+ rank: int,
303
+ distributed_init_method: Optional[str] = None,
304
+ local_rank: int = -1,
305
+ ) -> None:
306
+ """Initialize the distributed environment."""
307
+ if self.use_spmd:
308
+ xr.use_spmd()
309
+ # NOTE(woosuk): This is just to initialize the TP group and broadcast
310
+ # the input objects on CPU. The all-reduce and all-gather ops on TPU
311
+ # are invoked by `xm.all_reduce` and `xm.all_gather` which use their
312
+ # own context.
313
+ parallel_config = vllm_config.parallel_config
314
+ init_distributed_environment(
315
+ world_size=parallel_config.world_size,
316
+ rank=rank,
317
+ local_rank=local_rank,
318
+ distributed_init_method=distributed_init_method,
319
+ backend=current_platform.dist_backend,
320
+ )
321
+ ensure_model_parallel_initialized(
322
+ parallel_config.tensor_parallel_size,
323
+ parallel_config.pipeline_parallel_size)
324
+
325
+ ensure_kv_transfer_initialized(vllm_config)
326
+
327
+
328
+ try:
329
+ from tpu_commons.worker import TPUWorker as TPUCommonsWorker
330
+ TPUWorker = TPUCommonsWorker # type: ignore
331
+ except ImportError:
332
+ logger.info("tpu_commons not found, using vLLM's TPUWorker.")
333
+ pass
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/utils.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from collections import defaultdict
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Optional
6
+
7
+ import torch
8
+
9
+ from vllm.attention.backends.abstract import AttentionBackend
10
+ from vllm.config import ModelConfig, SchedulerConfig
11
+ from vllm.model_executor.models.interfaces import MultiModalEmbeddings
12
+ from vllm.model_executor.models.utils import extract_layer_index
13
+ from vllm.multimodal.registry import MultiModalRegistry
14
+ from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
15
+ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
16
+ from vllm.v1.kv_cache_interface import KVCacheGroupSpec
17
+
18
+ if TYPE_CHECKING:
19
+ from vllm.attention.layer import Attention
20
+
21
+
22
+ class MultiModalBudget:
23
+ """Helper class to calculate budget information for multi-modal models."""
24
+
25
+ def __init__(
26
+ self,
27
+ model_config: ModelConfig,
28
+ scheduler_config: SchedulerConfig,
29
+ mm_registry: MultiModalRegistry,
30
+ *,
31
+ max_model_len: int,
32
+ max_num_reqs: int,
33
+ ) -> None:
34
+ super().__init__()
35
+
36
+ self.model_config = model_config
37
+ self.scheduler_config = scheduler_config
38
+ self.mm_registry = mm_registry
39
+
40
+ encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
41
+ model_config=model_config,
42
+ scheduler_config=scheduler_config,
43
+ mm_registry=mm_registry,
44
+ )
45
+
46
+ self.max_num_encoder_input_tokens = encoder_compute_budget
47
+ self.encoder_cache_size = encoder_cache_size
48
+ self.max_model_len = max_model_len
49
+ self.max_num_reqs = max_num_reqs
50
+
51
+ self.mm_limits = mm_registry.get_mm_limits_per_prompt(model_config)
52
+
53
+ max_items_per_prompt_by_modality = dict[str, int]()
54
+ max_items_per_batch_by_modality = dict[str, int]()
55
+
56
+ max_tokens_by_modality = mm_registry \
57
+ .get_max_tokens_per_item_by_nonzero_modality(model_config)
58
+
59
+ for modality, max_tokens in max_tokens_by_modality.items():
60
+ (
61
+ max_items_per_prompt,
62
+ max_items_per_batch,
63
+ ) = self.get_max_items(modality, max_tokens)
64
+
65
+ max_items_per_prompt_by_modality[modality] = max_items_per_prompt
66
+ max_items_per_batch_by_modality[modality] = max_items_per_batch
67
+
68
+ self.max_tokens_by_modality = max_tokens_by_modality
69
+ self.max_items_per_prompt_by_modality = max_items_per_prompt_by_modality
70
+ self.max_items_per_batch_by_modality = max_items_per_batch_by_modality
71
+
72
+ def get_modality_with_max_tokens(self) -> tuple[str, int]:
73
+ max_tokens_by_modality = self.max_tokens_by_modality
74
+ modality, max_tokens = max(max_tokens_by_modality.items(),
75
+ key=lambda item: item[1])
76
+
77
+ return modality, max_tokens
78
+
79
+ def get_encoder_budget(self) -> int:
80
+ return min(self.max_num_encoder_input_tokens, self.encoder_cache_size)
81
+
82
+ def get_max_items(
83
+ self,
84
+ modality: str,
85
+ max_tokens_per_item: int,
86
+ ) -> tuple[int, int]:
87
+ if max_tokens_per_item == 0:
88
+ return 0, 0
89
+
90
+ # Check how many items of this modality can be supported by
91
+ # the encoder budget.
92
+ encoder_budget = self.get_encoder_budget()
93
+
94
+ # TODO: handle encoder-decoder models once we support them.
95
+ if encoder_budget == 0:
96
+ return 0, 0
97
+
98
+ max_encoder_items_per_batch = encoder_budget // max_tokens_per_item
99
+
100
+ # Check how many items of this modality can be supported by
101
+ # the decoder budget.
102
+ mm_limit = self.mm_limits[modality]
103
+
104
+ max_items_per_prompt = max(
105
+ 1,
106
+ min(mm_limit, self.max_model_len // max_tokens_per_item),
107
+ )
108
+
109
+ scheduler_config = self.scheduler_config
110
+ max_num_reqs = self.max_num_reqs
111
+
112
+ if not scheduler_config.enable_chunked_prefill:
113
+ max_num_reqs = min(
114
+ max_num_reqs,
115
+ scheduler_config.max_num_batched_tokens // max_tokens_per_item,
116
+ )
117
+
118
+ max_decoder_items_per_batch = max_num_reqs * max_items_per_prompt
119
+
120
+ max_items_per_batch = max(
121
+ 1,
122
+ min(max_encoder_items_per_batch, max_decoder_items_per_batch),
123
+ )
124
+
125
+ return max_items_per_prompt, max_items_per_batch
126
+
127
+
128
+ @dataclass
129
+ class AttentionGroup:
130
+ backend: type[AttentionBackend]
131
+ metadata_builder: AttentionMetadataBuilder
132
+ layer_names: list[str]
133
+
134
+
135
+ def sanity_check_mm_encoder_outputs(
136
+ mm_embeddings: MultiModalEmbeddings,
137
+ expected_num_items: int,
138
+ ) -> None:
139
+ """
140
+ Perform sanity checks for the result of
141
+ [`vllm.model_executor.models.SupportsMultiModal.get_multimodal_embeddings`][].
142
+ """
143
+ assert isinstance(mm_embeddings, (list, tuple, torch.Tensor)), (
144
+ "Expected multimodal embeddings to be a list/tuple of 2D tensors, "
145
+ f"or a single 3D tensor, but got {type(mm_embeddings)} "
146
+ "instead. This is most likely due to incorrect implementation "
147
+ "of the model's `get_multimodal_embeddings` method.")
148
+
149
+ assert len(mm_embeddings) == expected_num_items, (
150
+ "Expected number of multimodal embeddings to match number of "
151
+ f"input items: {expected_num_items}, but got {len(mm_embeddings)=} "
152
+ "instead. This is most likely due to incorrect implementation "
153
+ "of the model's `get_multimodal_embeddings` method.")
154
+
155
+ assert all(e.ndim == 2 for e in mm_embeddings), (
156
+ "Expected multimodal embeddings to be a sequence of 2D tensors, "
157
+ f"but got tensors with shapes {[e.shape for e in mm_embeddings]} "
158
+ "instead. This is most likely due to incorrect implementation "
159
+ "of the model's `get_multimodal_embeddings` method.")
160
+
161
+
162
+ def scatter_mm_placeholders(
163
+ embeds: torch.Tensor,
164
+ is_embed: Optional[torch.Tensor],
165
+ ) -> torch.Tensor:
166
+ """
167
+ Scatter the multimodal embeddings into a contiguous tensor that represents
168
+ the placeholder tokens.
169
+
170
+ [`vllm.multimodal.processing.PromptUpdateDetails.is_embed`][].
171
+
172
+ Args:
173
+ embeds: The multimodal embeddings.
174
+ Shape: `(num_embeds, embed_dim)`
175
+ is_embed: A boolean mask indicating which positions in the placeholder
176
+ tokens need to be filled with multimodal embeddings.
177
+ Shape: `(num_placeholders, num_embeds)`
178
+ """
179
+ if is_embed is None:
180
+ return embeds
181
+
182
+ placeholders = embeds.new_full(
183
+ (is_embed.shape[0], embeds.shape[-1]),
184
+ fill_value=torch.nan,
185
+ )
186
+ placeholders[is_embed] = embeds
187
+ return placeholders
188
+
189
+
190
+ def gather_mm_placeholders(
191
+ placeholders: torch.Tensor,
192
+ is_embed: Optional[torch.Tensor],
193
+ ) -> torch.Tensor:
194
+ """
195
+ Reconstructs the embeddings from the placeholder tokens.
196
+
197
+ This is the operation of [scatter_mm_placeholders][].
198
+ """
199
+ if is_embed is None:
200
+ return placeholders
201
+
202
+ return placeholders[is_embed]
203
+
204
+
205
+ def initialize_kv_cache_for_kv_sharing(
206
+ shared_kv_cache_layers: dict[str, str],
207
+ kv_cache_groups: list[KVCacheGroupSpec],
208
+ kv_caches: dict[str, torch.Tensor],
209
+ # Optional for now to avoid breaking TPU
210
+ attn_groups: Optional[list[list[AttentionGroup]]] = None,
211
+ ) -> None:
212
+ """
213
+ Sets up KV cache sharing by reusing the allocated KV caches in `kv_caches`
214
+ for layers that do not allocate its own KV cache, based on the mapping in
215
+ `shared_kv_cache_layers`. Adds these layers to the corresponding KV cache
216
+ group, which is needed to ensure that attention metadata is assigned later.
217
+
218
+ Args:
219
+ shared_kv_cache_layers: Layer pairings for cross-layer KV sharing.
220
+ If an Attention layer `layer_name` is in the keys of this dict, it
221
+ means this layer will perform attention using the keys and values
222
+ from the KV cache of `shared_kv_cache_layers[layer_name]`.
223
+ kv_cache_groups: The KV cache groups of the model.
224
+ kv_caches: The allocated kv_caches with layer names as keys.
225
+ Note that layers in shared_kv_cache_layers.keys() are not
226
+ originally included as it only contains layers which have its own
227
+ KV cache allocation.
228
+ attn_groups: Optional list of attention groups. Layers in the same KV
229
+ cache group may be placed in different attention groups if they
230
+ have different attention backends. Currently only provided by
231
+ GPU model runner.
232
+ """
233
+ # mapping from layer name to tuple of (kv_cache_group_idx, attn_group_idx)
234
+ layer_to_attn_group_idx: dict[str, tuple[int, int]] = {}
235
+ if attn_groups:
236
+ for kv_cache_group_idx, kv_attn_groups in enumerate(attn_groups):
237
+ for attn_group_idx, attn_group in enumerate(kv_attn_groups):
238
+ for layer_name in attn_group.layer_names:
239
+ layer_to_attn_group_idx[layer_name] = (kv_cache_group_idx,
240
+ attn_group_idx)
241
+ else:
242
+ for kv_cache_group_idx, kv_cache_group in enumerate(kv_cache_groups):
243
+ for layer_name in kv_cache_group.layer_names:
244
+ # attn group idx default to 0 if not provided
245
+ layer_to_attn_group_idx[layer_name] = (kv_cache_group_idx, 0)
246
+
247
+ for layer_name, target_layer_name in shared_kv_cache_layers.items():
248
+ kv_caches[layer_name] = kv_caches[target_layer_name]
249
+ kv_cache_group_idx = layer_to_attn_group_idx[target_layer_name][0]
250
+ kv_cache_groups[kv_cache_group_idx].layer_names.append(layer_name)
251
+
252
+ if attn_groups:
253
+ attn_group_idx = layer_to_attn_group_idx[target_layer_name][1]
254
+ attn_groups[kv_cache_group_idx][attn_group_idx].layer_names.append(
255
+ layer_name)
256
+
257
+
258
+ def bind_kv_cache(
259
+ kv_caches: dict[str, torch.Tensor],
260
+ forward_context: dict[str, "Attention"],
261
+ runner_kv_caches: list[torch.Tensor],
262
+ ) -> None:
263
+ """
264
+ Bind the allocated KV cache to both ModelRunner and forward context so
265
+ that the KV cache can be used in the forward pass.
266
+
267
+ This function:
268
+ 1) Fills the ModelRunner's kv cache list (`runner_kv_caches`) with
269
+ kv_caches.
270
+ 2) Associates each attention layer in the `forward_context` with its
271
+ corresponding KV cache in kv_caches.
272
+
273
+ Args:
274
+ kv_caches: The allocated kv_caches with layer names as keys.
275
+ forward_context: The global forward context containing all Attention
276
+ layers with layer names as keys.
277
+ runner_kv_caches: The kv_cache declared by ModelRunner.
278
+ """
279
+ # Bind kv_caches to ModelRunner
280
+ assert len(runner_kv_caches) == 0
281
+
282
+ # Convert kv_caches dict to a list of tensors in the order of layer_index.
283
+ index2name = defaultdict(list)
284
+ for layer_name in kv_caches:
285
+ index2name[extract_layer_index(layer_name)].append(layer_name)
286
+
287
+ for layer_index in sorted(index2name.keys()):
288
+ layer_names = index2name[layer_index]
289
+ if len(layer_names) > 1:
290
+ # One typical case is encoder-decoder model, e.g., bart.
291
+ # The cross attention and self attention in the same decoder layer
292
+ # has different layer_name but the same layer_index.
293
+ raise NotImplementedError
294
+ layer_name = layer_names[0]
295
+ runner_kv_caches.append(kv_caches[layer_name])
296
+
297
+ # Bind kv_caches to forward context
298
+ for layer_name, kv_cache in kv_caches.items():
299
+ # NOTE: Use list because of v0 PP virtual engine.
300
+ forward_context[layer_name].kv_cache = [kv_cache]
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/worker_base.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from typing import Optional
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ from vllm.config import VllmConfig
10
+ from vllm.logger import init_logger
11
+ from vllm.v1.kv_cache_interface import KVCacheSpec
12
+ from vllm.worker.worker_base import WorkerBase as WorkerBaseV0
13
+
14
+ logger = init_logger(__name__)
15
+
16
+
17
+ class WorkerBase(WorkerBaseV0):
18
+ """
19
+ Abstract class for v1 worker, mainly define some methods for v1.
20
+ For methods shared by v0 and v1, define them in v0 WorkerBase
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ vllm_config: VllmConfig,
26
+ local_rank: int,
27
+ rank: int,
28
+ distributed_init_method: str,
29
+ is_driver_worker: bool = False,
30
+ ):
31
+ """
32
+ Initialize common worker components.
33
+
34
+ Args:
35
+ vllm_config: Complete vLLM configuration
36
+ local_rank: Local device index
37
+ rank: Global rank in distributed setup
38
+ distributed_init_method: Distributed initialization method
39
+ is_driver_worker: Whether this worker handles driver
40
+ responsibilities
41
+ """
42
+ # Configuration storage
43
+ super().__init__(vllm_config=vllm_config)
44
+
45
+ self.parallel_config.rank = rank
46
+ self.local_rank = local_rank
47
+ self.rank = rank
48
+ self.distributed_init_method = distributed_init_method
49
+ self.is_driver_worker = is_driver_worker
50
+
51
+ # Device and model state
52
+ self.device: Optional[torch.device] = None
53
+ self.model_runner: Optional[nn.Module] = None
54
+
55
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
56
+ """Get specifications for KV cache implementation."""
57
+ raise NotImplementedError
58
+
59
+ def compile_or_warm_up_model(self) -> None:
60
+ """Prepare model for execution through compilation/warmup."""
61
+ raise NotImplementedError
62
+
63
+ def check_health(self) -> None:
64
+ """Basic health check (override for device-specific checks)."""
65
+ return
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/xpu_model_runner.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from typing import TYPE_CHECKING
4
+
5
+ import torch
6
+
7
+ from vllm.config import VllmConfig
8
+ from vllm.logger import init_logger
9
+ from vllm.v1.worker.gpu_model_runner import GPUModelRunner
10
+
11
+ if TYPE_CHECKING:
12
+ pass
13
+
14
+ logger = init_logger(__name__)
15
+
16
+
17
+ class XPUModelRunner(GPUModelRunner):
18
+ """A model runner for XPU devices."""
19
+
20
+ def __init__(
21
+ self,
22
+ vllm_config: VllmConfig,
23
+ device: torch.device,
24
+ ):
25
+ super().__init__(vllm_config, device)
26
+ # FIXME: To be verified.
27
+ self.cascade_attn_enabled = False
28
+
29
+ def _init_device_properties(self) -> None:
30
+ self.num_sms = None
31
+
32
+ def _sync_device(self) -> None:
33
+ torch.xpu.synchronize()
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/xpu_worker.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import os
4
+
5
+ import torch
6
+ import torch.distributed
7
+
8
+ import vllm.envs as envs
9
+ from vllm.config import VllmConfig
10
+ from vllm.distributed import get_world_group
11
+ from vllm.logger import init_logger
12
+ from vllm.model_executor import set_random_seed
13
+ from vllm.platforms import current_platform
14
+ from vllm.v1.worker.gpu_worker import (Worker,
15
+ init_worker_distributed_environment)
16
+ from vllm.v1.worker.xpu_model_runner import XPUModelRunner
17
+
18
+ logger = init_logger(__name__)
19
+
20
+
21
+ class XPUWorker(Worker):
22
+ """A XPU worker class."""
23
+
24
+ def __init__(
25
+ self,
26
+ vllm_config: VllmConfig,
27
+ local_rank: int,
28
+ rank: int,
29
+ distributed_init_method: str,
30
+ is_driver_worker: bool = False,
31
+ ):
32
+ super().__init__(vllm_config, local_rank, rank,
33
+ distributed_init_method, is_driver_worker)
34
+ device_config = self.device_config
35
+ assert device_config.device_type == "xpu"
36
+ assert current_platform.is_xpu()
37
+
38
+ # Torch profiler. Enabled and configured through env vars:
39
+ # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace
40
+ if envs.VLLM_TORCH_PROFILER_DIR:
41
+ torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR
42
+ logger.info("Profiling enabled. Traces will be saved to: %s",
43
+ torch_profiler_trace_dir)
44
+ logger.debug(
45
+ "Profiler config: record_shapes=%s,"
46
+ "profile_memory=%s,with_stack=%s,with_flops=%s",
47
+ envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
48
+ envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
49
+ envs.VLLM_TORCH_PROFILER_WITH_STACK,
50
+ envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
51
+ )
52
+ self.profiler = torch.profiler.profile(
53
+ activities=[
54
+ torch.profiler.ProfilerActivity.CPU,
55
+ torch.profiler.ProfilerActivity.XPU,
56
+ ],
57
+ record_shapes=envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
58
+ profile_memory=envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
59
+ with_stack=envs.VLLM_TORCH_PROFILER_WITH_STACK,
60
+ with_flops=envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
61
+ on_trace_ready=torch.profiler.tensorboard_trace_handler(
62
+ torch_profiler_trace_dir, use_gzip=True))
63
+ else:
64
+ self.profiler = None
65
+
66
+ # we provide this function due to `torch.xpu.mem_get_info()` doesn't
67
+ # return correct free_gpu_memory on intel client GPU. We need to
68
+ # calculate/estiamte it.
69
+ def xpu_get_mem_info(self):
70
+ if current_platform.is_data_center_gpu():
71
+ return torch.xpu.mem_get_info()
72
+ else:
73
+ _, total_gpu_memory = torch.xpu.mem_get_info()
74
+ # FIXME: memory_allocated() doesn't count non-torch allocations,
75
+ # and we don't have any API to get it. so we mark it as 128MB.
76
+ used_memory = torch.xpu.memory_allocated()
77
+ non_torch_allocations = 128 * 1024 * 1024
78
+ free_gpu_memory = total_gpu_memory - (used_memory +
79
+ non_torch_allocations)
80
+ return free_gpu_memory, total_gpu_memory
81
+
82
+ @torch.inference_mode()
83
+ def determine_available_memory(self) -> int:
84
+ """Profiles the peak memory usage of the model to determine how many
85
+ KV blocks may be allocated without OOMs.
86
+ The engine will first conduct a profiling of the existing memory usage.
87
+ Then, it calculate the maximum possible number of GPU and CPU blocks
88
+ that can be allocated with the remaining free memory.
89
+ .. tip::
90
+ You may limit the usage of GPU memory
91
+ by adjusting the `gpu_memory_utilization` parameter.
92
+ """
93
+ # Profile the memory usage of the model and get the maximum number of
94
+ # cache blocks that can be allocated with the remaining free memory.
95
+ torch.xpu.empty_cache()
96
+ torch.xpu.reset_peak_memory_stats()
97
+
98
+ free_gpu_memory, total_gpu_memory = torch.xpu.mem_get_info()
99
+ current_allocated_bytes = torch.xpu.memory_allocated()
100
+ msg = ("Before memory profiling run, "
101
+ f"total GPU memory: {total_gpu_memory / 1024**2:.2f} MB, "
102
+ f"model load takes {current_allocated_bytes / 1024**2:.2f} MB, "
103
+ f"free gpu memory is {free_gpu_memory / 1024**2:.2f} MB.")
104
+ logger.info(msg)
105
+ # Execute a forward pass with dummy inputs to profile the memory usage
106
+ # of the model.
107
+ self.model_runner.profile_run()
108
+
109
+ free_gpu_memory, _ = self.xpu_get_mem_info()
110
+ # NOTE(woosuk): Here we assume that the other processes using the same
111
+ # GPU did not change their memory usage during the profiling.
112
+ assert self.init_gpu_memory > free_gpu_memory, (
113
+ "Error in memory profiling. "
114
+ f"Initial free memory {self.init_gpu_memory}, current free memory"
115
+ f" {free_gpu_memory}. This happens when the GPU memory was "
116
+ "not properly cleaned up before initializing the vLLM instance.")
117
+
118
+ # Get the peak memory allocation recorded by torch
119
+ peak_memory = torch.xpu.memory_stats()["allocated_bytes.all.peak"]
120
+
121
+ torch.xpu.empty_cache()
122
+ torch_allocated_bytes = torch.xpu.memory_stats(
123
+ )["allocated_bytes.all.current"]
124
+ total_allocated_bytes = self.xpu_get_mem_info(
125
+ )[1] - self.xpu_get_mem_info()[0]
126
+
127
+ non_torch_allocations = total_allocated_bytes - torch_allocated_bytes
128
+ if non_torch_allocations > 0:
129
+ peak_memory += non_torch_allocations
130
+ available_kv_cache_memory = (
131
+ total_gpu_memory * self.cache_config.gpu_memory_utilization -
132
+ peak_memory)
133
+
134
+ msg = ("After memory profiling run, "
135
+ f"peak memory usage is {peak_memory / 1024**2:.2f} MB,"
136
+ f"torch mem is {torch_allocated_bytes / 1024**2:.2f} MB, "
137
+ f"non-torch mem is {non_torch_allocations / 1024**2:.2f} MB, "
138
+ f"free gpu memory is {free_gpu_memory / 1024**2:.2f} MB.")
139
+ logger.info(msg)
140
+
141
+ return int(available_kv_cache_memory)
142
+
143
+ def init_device(self):
144
+ if self.device_config.device.type == "xpu" and current_platform.is_xpu(
145
+ ):
146
+ self.device = torch.device(f"xpu:{self.local_rank}")
147
+ current_platform.set_device(self.device)
148
+ torch.xpu.empty_cache()
149
+ self.init_gpu_memory = torch.xpu.get_device_properties(
150
+ self.local_rank).total_memory
151
+ else:
152
+ raise RuntimeError(
153
+ f"Not support device type: {self.device_config.device}")
154
+
155
+ ENV_CCL_ZE_IPC_EXCHANGE = os.getenv("CCL_ZE_IPC_EXCHANGE", "pidfd")
156
+ ENV_CCL_ATL_TRANSPORT = os.getenv("CCL_ATL_TRANSPORT", "ofi")
157
+ ENV_LOCAL_WORLD_SIZE = os.getenv("LOCAL_WORLD_SIZE",
158
+ str(self.parallel_config.world_size))
159
+ os.environ["CCL_ZE_IPC_EXCHANGE"] = ENV_CCL_ZE_IPC_EXCHANGE
160
+ os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT
161
+ os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE
162
+ os.environ["LOCAL_RANK"] = str(self.local_rank)
163
+
164
+ init_worker_distributed_environment(self.vllm_config, self.rank,
165
+ self.distributed_init_method,
166
+ self.local_rank,
167
+ current_platform.dist_backend)
168
+
169
+ # global all_reduce needed for overall oneccl warm up
170
+ torch.distributed.all_reduce(torch.zeros(1).xpu(),
171
+ group=get_world_group().device_group)
172
+
173
+ # Set random seed.
174
+ set_random_seed(self.model_config.seed)
175
+
176
+ # Construct the model runner
177
+ self.model_runner = XPUModelRunner( # type: ignore
178
+ self.vllm_config, self.device)