Add files using upload-large-folder tool
Browse files- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/metadata.py +43 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/rejection_sampler.py +631 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/sampler.py +240 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/eagle.py +722 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/medusa.py +64 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/metadata.py +62 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/metrics.py +178 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/ngram_proposer.py +157 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/utils.py +14 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/__init__.py +289 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_guidance.py +245 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_outlines.py +320 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_types.py +134 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_xgrammar.py +323 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/request.py +86 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/utils.py +373 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/block_table.py +174 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/cpu_model_runner.py +124 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/metadata.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from vllm.v1.sample.logits_processor import LogitsProcessors
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass
|
| 13 |
+
class SamplingMetadata:
|
| 14 |
+
|
| 15 |
+
temperature: Optional[torch.Tensor]
|
| 16 |
+
all_greedy: bool
|
| 17 |
+
all_random: bool
|
| 18 |
+
|
| 19 |
+
top_p: Optional[torch.Tensor]
|
| 20 |
+
top_k: Optional[torch.Tensor]
|
| 21 |
+
|
| 22 |
+
generators: dict[int, torch.Generator]
|
| 23 |
+
|
| 24 |
+
# None means no logprobs, 0 means sampled token logprobs only
|
| 25 |
+
max_num_logprobs: Optional[int]
|
| 26 |
+
|
| 27 |
+
no_penalties: bool
|
| 28 |
+
prompt_token_ids: Optional[torch.Tensor]
|
| 29 |
+
frequency_penalties: torch.Tensor
|
| 30 |
+
presence_penalties: torch.Tensor
|
| 31 |
+
repetition_penalties: torch.Tensor
|
| 32 |
+
|
| 33 |
+
output_token_ids: list[list[int]]
|
| 34 |
+
|
| 35 |
+
# `allowed_token_ids_mask` is a 2D bool tensor of shape (max batch size,
|
| 36 |
+
# vocab size).
|
| 37 |
+
allowed_token_ids_mask: Optional[torch.Tensor]
|
| 38 |
+
|
| 39 |
+
# req_index -> bad_words_token_ids
|
| 40 |
+
bad_words_token_ids: dict[int, list[list[int]]]
|
| 41 |
+
|
| 42 |
+
# Loaded logits processors
|
| 43 |
+
logitsprocs: LogitsProcessors
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/rejection_sampler.py
ADDED
|
@@ -0,0 +1,631 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from vllm.logger import init_logger
|
| 9 |
+
from vllm.triton_utils import tl, triton
|
| 10 |
+
from vllm.v1.sample.metadata import SamplingMetadata
|
| 11 |
+
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
|
| 12 |
+
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
|
| 13 |
+
|
| 14 |
+
logger = init_logger(__name__)
|
| 15 |
+
|
| 16 |
+
PLACEHOLDER_TOKEN_ID: tl.constexpr = -1
|
| 17 |
+
GREEDY_TEMPERATURE: tl.constexpr = -1
|
| 18 |
+
# Maximum number of speculative draft tokens allowed per request in a single
|
| 19 |
+
# step. This value is chosen to be large enough to handle typical use cases.
|
| 20 |
+
MAX_SPEC_LEN = 32
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class RejectionSampler(nn.Module):
|
| 24 |
+
"""
|
| 25 |
+
The implementation strictly follows the algorithm described in
|
| 26 |
+
https://arxiv.org/abs/2211.17192.
|
| 27 |
+
However, we want to clarify the terminology used in the implementation:
|
| 28 |
+
accepted tokens: tokens that are accepted based on the relationship
|
| 29 |
+
between the "raw" draft and target probabilities.
|
| 30 |
+
recovered tokens: tokens that are sampled based on the adjusted probability
|
| 31 |
+
distribution, which is derived from both the draft and target
|
| 32 |
+
probabilities.
|
| 33 |
+
bonus tokens:
|
| 34 |
+
If all proposed tokens are accepted, the bonus token is added to the
|
| 35 |
+
end of the sequence. The bonus token is only sampled from the target
|
| 36 |
+
probabilities. We pass in the bonus tokens instead of sampling them
|
| 37 |
+
in the rejection sampler to allow for more flexibility in the
|
| 38 |
+
sampling process. For example, we can use top_p, top_k sampling for
|
| 39 |
+
bonus tokens, while spec decode does not support these sampling
|
| 40 |
+
strategies.
|
| 41 |
+
output tokens:
|
| 42 |
+
Tokens are finally generated with the rejection sampler.
|
| 43 |
+
output tokens = accepted tokens + recovered tokens + bonus tokens
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def forward(
|
| 47 |
+
self,
|
| 48 |
+
metadata: SpecDecodeMetadata,
|
| 49 |
+
# [num_tokens, vocab_size]
|
| 50 |
+
draft_probs: Optional[torch.Tensor],
|
| 51 |
+
# [num_tokens, vocab_size]
|
| 52 |
+
target_logits: torch.Tensor,
|
| 53 |
+
# [batch_size, 1]
|
| 54 |
+
bonus_token_ids: torch.Tensor,
|
| 55 |
+
sampling_metadata: SamplingMetadata,
|
| 56 |
+
) -> torch.Tensor:
|
| 57 |
+
'''
|
| 58 |
+
Args:
|
| 59 |
+
metadata:
|
| 60 |
+
Metadata for spec decoding.
|
| 61 |
+
draft_probs (Optional[torch.Tensor]):
|
| 62 |
+
Probability distribution for the draft tokens. Shape is
|
| 63 |
+
[num_tokens, vocab_size]. Can be None if probabilities are
|
| 64 |
+
not provided, which is the case for ngram spec decode.
|
| 65 |
+
target_logits (torch.Tensor):
|
| 66 |
+
Target model's logits probability distribution.
|
| 67 |
+
Shape is [num_tokens, vocab_size]. Here, probabilities from
|
| 68 |
+
different requests are flattened into a single tensor because
|
| 69 |
+
this is the shape of the output logits.
|
| 70 |
+
NOTE: `target_logits` can be updated in place to save memory.
|
| 71 |
+
bonus_token_ids_tensor (torch.Tensor):
|
| 72 |
+
A tensor containing bonus tokens. Shape is [batch_size, 1].
|
| 73 |
+
Bonus tokens are added to the end of the sequence if all
|
| 74 |
+
proposed tokens are accepted. We generate the bonus tokens
|
| 75 |
+
outside of the rejection sampler with the default sampling
|
| 76 |
+
strategy. It allows for more flexibility in the sampling
|
| 77 |
+
process such as top_p, top_k sampling.
|
| 78 |
+
sampling_metadata (vllm.v1.sample.metadata.SamplingMetadata):
|
| 79 |
+
Additional metadata needed for sampling, such as temperature,
|
| 80 |
+
top-k/top-p parameters, or other relevant information.
|
| 81 |
+
Returns:
|
| 82 |
+
output_token_ids (torch.Tensor):
|
| 83 |
+
A tensor containing the final output token IDs.
|
| 84 |
+
'''
|
| 85 |
+
assert metadata.max_spec_len <= MAX_SPEC_LEN
|
| 86 |
+
# [num_tokens, vocab_size]
|
| 87 |
+
# NOTE(woosuk): `target_logits` can be updated in place inside the
|
| 88 |
+
# `compute_probs` function.
|
| 89 |
+
target_probs = compute_probs(
|
| 90 |
+
target_logits,
|
| 91 |
+
metadata.cu_num_draft_tokens,
|
| 92 |
+
sampling_metadata,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
output_token_ids = rejection_sample(
|
| 96 |
+
metadata.draft_token_ids,
|
| 97 |
+
metadata.num_draft_tokens,
|
| 98 |
+
metadata.max_spec_len,
|
| 99 |
+
metadata.cu_num_draft_tokens,
|
| 100 |
+
draft_probs,
|
| 101 |
+
target_probs,
|
| 102 |
+
bonus_token_ids,
|
| 103 |
+
sampling_metadata,
|
| 104 |
+
)
|
| 105 |
+
return output_token_ids
|
| 106 |
+
|
| 107 |
+
@staticmethod
|
| 108 |
+
def parse_output(
|
| 109 |
+
output_token_ids: torch.Tensor,
|
| 110 |
+
vocab_size: int,
|
| 111 |
+
) -> list[list[int]]:
|
| 112 |
+
"""Parse the output of the rejection sampler.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
output_token_ids: The sampled token IDs in shape
|
| 116 |
+
[batch_size, max_spec_len + 1]. The rejected tokens are
|
| 117 |
+
replaced with `PLACEHOLDER_TOKEN_ID` by the rejection sampler
|
| 118 |
+
and will be filtered out in this function.
|
| 119 |
+
vocab_size: The size of the vocabulary.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
A list of lists of token IDs.
|
| 123 |
+
"""
|
| 124 |
+
output_token_ids_np = output_token_ids.cpu().numpy()
|
| 125 |
+
# Create mask for valid tokens.
|
| 126 |
+
valid_mask = ((output_token_ids_np != PLACEHOLDER_TOKEN_ID) &
|
| 127 |
+
(output_token_ids_np < vocab_size))
|
| 128 |
+
outputs = [
|
| 129 |
+
row[valid_mask[i]].tolist()
|
| 130 |
+
for i, row in enumerate(output_token_ids_np)
|
| 131 |
+
]
|
| 132 |
+
return outputs
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def rejection_sample(
|
| 136 |
+
# [num_tokens]
|
| 137 |
+
draft_token_ids: torch.Tensor,
|
| 138 |
+
# [batch_size]
|
| 139 |
+
num_draft_tokens: list[int],
|
| 140 |
+
max_spec_len: int,
|
| 141 |
+
# [batch_size]
|
| 142 |
+
cu_num_draft_tokens: torch.Tensor,
|
| 143 |
+
# [num_tokens, vocab_size]
|
| 144 |
+
draft_probs: Optional[torch.Tensor],
|
| 145 |
+
# [num_tokens, vocab_size]
|
| 146 |
+
target_probs: torch.Tensor,
|
| 147 |
+
# [batch_size, 1]
|
| 148 |
+
bonus_token_ids: torch.Tensor,
|
| 149 |
+
sampling_metadata: SamplingMetadata,
|
| 150 |
+
) -> torch.Tensor:
|
| 151 |
+
assert draft_token_ids.ndim == 1
|
| 152 |
+
assert draft_probs is None or draft_probs.ndim == 2
|
| 153 |
+
assert cu_num_draft_tokens.ndim == 1
|
| 154 |
+
assert target_probs.ndim == 2
|
| 155 |
+
|
| 156 |
+
batch_size = len(num_draft_tokens)
|
| 157 |
+
num_tokens = draft_token_ids.shape[0]
|
| 158 |
+
vocab_size = target_probs.shape[-1]
|
| 159 |
+
device = target_probs.device
|
| 160 |
+
assert draft_token_ids.is_contiguous()
|
| 161 |
+
assert draft_probs is None or draft_probs.is_contiguous()
|
| 162 |
+
assert target_probs.is_contiguous()
|
| 163 |
+
assert bonus_token_ids.is_contiguous()
|
| 164 |
+
assert target_probs.shape == (num_tokens, vocab_size)
|
| 165 |
+
|
| 166 |
+
# Create output buffer.
|
| 167 |
+
output_token_ids = torch.empty(
|
| 168 |
+
(batch_size, max_spec_len + 1),
|
| 169 |
+
dtype=torch.int32, # Consistent with SamplerOutput.sampled_token_ids.
|
| 170 |
+
device=device,
|
| 171 |
+
)
|
| 172 |
+
output_token_ids.fill_(PLACEHOLDER_TOKEN_ID)
|
| 173 |
+
|
| 174 |
+
if sampling_metadata.all_greedy:
|
| 175 |
+
is_greedy = None
|
| 176 |
+
else:
|
| 177 |
+
is_greedy = sampling_metadata.temperature == GREEDY_TEMPERATURE
|
| 178 |
+
if not sampling_metadata.all_random:
|
| 179 |
+
# Rejection sampling for greedy sampling requests.
|
| 180 |
+
target_argmax = target_probs.argmax(dim=-1)
|
| 181 |
+
rejection_greedy_sample_kernel[(batch_size, )](
|
| 182 |
+
output_token_ids,
|
| 183 |
+
cu_num_draft_tokens,
|
| 184 |
+
draft_token_ids,
|
| 185 |
+
target_argmax,
|
| 186 |
+
bonus_token_ids,
|
| 187 |
+
is_greedy,
|
| 188 |
+
max_spec_len,
|
| 189 |
+
num_warps=1,
|
| 190 |
+
)
|
| 191 |
+
if sampling_metadata.all_greedy:
|
| 192 |
+
return output_token_ids
|
| 193 |
+
|
| 194 |
+
# Generate uniform probabilities for rejection sampling.
|
| 195 |
+
# [num_tokens]
|
| 196 |
+
uniform_probs = generate_uniform_probs(
|
| 197 |
+
num_tokens,
|
| 198 |
+
num_draft_tokens,
|
| 199 |
+
sampling_metadata.generators,
|
| 200 |
+
device,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# Sample recovered tokens for each position.
|
| 204 |
+
# [num_tokens]
|
| 205 |
+
recovered_token_ids = sample_recovered_tokens(
|
| 206 |
+
max_spec_len,
|
| 207 |
+
num_draft_tokens,
|
| 208 |
+
cu_num_draft_tokens,
|
| 209 |
+
draft_token_ids,
|
| 210 |
+
draft_probs,
|
| 211 |
+
target_probs,
|
| 212 |
+
sampling_metadata,
|
| 213 |
+
device,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
# Rejection sampling for random sampling requests.
|
| 217 |
+
rejection_random_sample_kernel[(batch_size, )](
|
| 218 |
+
output_token_ids,
|
| 219 |
+
cu_num_draft_tokens,
|
| 220 |
+
draft_token_ids,
|
| 221 |
+
draft_probs,
|
| 222 |
+
target_probs,
|
| 223 |
+
bonus_token_ids,
|
| 224 |
+
recovered_token_ids,
|
| 225 |
+
uniform_probs,
|
| 226 |
+
is_greedy,
|
| 227 |
+
max_spec_len,
|
| 228 |
+
vocab_size,
|
| 229 |
+
NO_DRAFT_PROBS=draft_probs is None,
|
| 230 |
+
num_warps=1,
|
| 231 |
+
)
|
| 232 |
+
return output_token_ids
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def compute_probs(
|
| 236 |
+
logits: torch.Tensor, # [num_tokens, vocab_size]
|
| 237 |
+
cu_num_draft_tokens: torch.Tensor, # [batch_size]
|
| 238 |
+
sampling_metadata: SamplingMetadata,
|
| 239 |
+
) -> torch.Tensor:
|
| 240 |
+
"""Compute probability distribution from logits based on sampling metadata.
|
| 241 |
+
|
| 242 |
+
This function applies temperature scaling to the logits and converts
|
| 243 |
+
them to probabilities using softmax. For greedy decoding, it returns
|
| 244 |
+
the original logits.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
logits: Input logits tensor to be converted to probabilities.
|
| 248 |
+
cu_num_draft_tokens: Cumulative number of draft tokens.
|
| 249 |
+
sampling_metadata: Metadata containing sampling parameters such as
|
| 250 |
+
temperature and whether greedy sampling is used.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
torch.Tensor: Probability distribution (softmax of scaled logits)
|
| 254 |
+
if non-greedy sampling is used, otherwise returns the
|
| 255 |
+
original logits.
|
| 256 |
+
"""
|
| 257 |
+
assert logits.ndim == 2
|
| 258 |
+
assert cu_num_draft_tokens.ndim == 1
|
| 259 |
+
if sampling_metadata.all_greedy:
|
| 260 |
+
return logits
|
| 261 |
+
|
| 262 |
+
num_tokens = logits.shape[0]
|
| 263 |
+
temperature = expand_batch_to_tokens(
|
| 264 |
+
sampling_metadata.temperature,
|
| 265 |
+
cu_num_draft_tokens,
|
| 266 |
+
num_tokens,
|
| 267 |
+
replace_from=GREEDY_TEMPERATURE,
|
| 268 |
+
replace_to=1,
|
| 269 |
+
)
|
| 270 |
+
# NOTE(woosuk): Update `logits` in place to avoid allocating a new tensor.
|
| 271 |
+
logits.div_(temperature.unsqueeze(-1))
|
| 272 |
+
|
| 273 |
+
# Get expanded top_k and top_p tensors.
|
| 274 |
+
top_k = None
|
| 275 |
+
if sampling_metadata.top_k is not None:
|
| 276 |
+
top_k = expand_batch_to_tokens(
|
| 277 |
+
sampling_metadata.top_k,
|
| 278 |
+
cu_num_draft_tokens,
|
| 279 |
+
num_tokens,
|
| 280 |
+
)
|
| 281 |
+
top_p = None
|
| 282 |
+
if sampling_metadata.top_p is not None:
|
| 283 |
+
top_p = expand_batch_to_tokens(
|
| 284 |
+
sampling_metadata.top_p,
|
| 285 |
+
cu_num_draft_tokens,
|
| 286 |
+
num_tokens,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# NOTE(woosuk): `apply_top_k_top_p` uses sorting to calculate the mask,
|
| 290 |
+
# which is slow for large vocab sizes. This may cause performance issues.
|
| 291 |
+
logits = apply_top_k_top_p(logits, top_k, top_p)
|
| 292 |
+
output_prob = logits.softmax(dim=-1, dtype=torch.float32)
|
| 293 |
+
return output_prob
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def expand_batch_to_tokens(
|
| 297 |
+
x: torch.Tensor, # [batch_size]
|
| 298 |
+
cu_num_tokens: torch.Tensor, # [batch_size]
|
| 299 |
+
num_tokens: int,
|
| 300 |
+
replace_from: int = 0,
|
| 301 |
+
replace_to: int = 0,
|
| 302 |
+
) -> torch.Tensor:
|
| 303 |
+
"""Expand [batch_size] tensor to [num_tokens] tensor based on the number of
|
| 304 |
+
tokens per batch in cu_num_tokens.
|
| 305 |
+
|
| 306 |
+
For example, if x = [a, b, c] and cu_num_tokens = [2, 5, 6], then
|
| 307 |
+
num_tokens = 6, and expanded_x = [a, a, b, b, b, c].
|
| 308 |
+
|
| 309 |
+
Args:
|
| 310 |
+
x: [batch_size] tensor to expand.
|
| 311 |
+
cu_num_tokens: [batch_size] tensor containing the cumulative number of
|
| 312 |
+
tokens per batch. Each element represents the total number of
|
| 313 |
+
tokens up to and including that batch.
|
| 314 |
+
num_tokens: Total number of tokens.
|
| 315 |
+
replace_from: int = 0
|
| 316 |
+
Value to be replaced if it is found in x.
|
| 317 |
+
replace_to: int = 0
|
| 318 |
+
Value to replace with when replace_from is found.
|
| 319 |
+
Returns:
|
| 320 |
+
expanded_x: [num_tokens] tensor.
|
| 321 |
+
"""
|
| 322 |
+
batch_size = x.shape[0]
|
| 323 |
+
assert cu_num_tokens.shape[0] == batch_size
|
| 324 |
+
expanded_x = x.new_empty(num_tokens)
|
| 325 |
+
expand_kernel[(batch_size, )](
|
| 326 |
+
expanded_x,
|
| 327 |
+
x,
|
| 328 |
+
cu_num_tokens,
|
| 329 |
+
replace_from,
|
| 330 |
+
replace_to,
|
| 331 |
+
MAX_NUM_TOKENS=MAX_SPEC_LEN, # To avoid recompilation.
|
| 332 |
+
num_warps=1,
|
| 333 |
+
)
|
| 334 |
+
return expanded_x
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def generate_uniform_probs(
|
| 338 |
+
num_tokens: int,
|
| 339 |
+
num_draft_tokens: list[int],
|
| 340 |
+
generators: dict[int, torch.Generator],
|
| 341 |
+
device: torch.device,
|
| 342 |
+
) -> torch.Tensor:
|
| 343 |
+
"""
|
| 344 |
+
Generates a batch of uniform random samples, with optional seeding
|
| 345 |
+
if available.
|
| 346 |
+
|
| 347 |
+
This method creates a tensor of shape `(num_tokens, )` filled
|
| 348 |
+
with uniform random values in the range [0, 1). If `generators` is provided,
|
| 349 |
+
the requests with their own seeds will use the provided `torch.Generator`
|
| 350 |
+
for reproducibility. The samples for the other requests will be generated
|
| 351 |
+
without a seed.
|
| 352 |
+
|
| 353 |
+
Args:
|
| 354 |
+
num_tokens : int
|
| 355 |
+
Total number of tokens.
|
| 356 |
+
num_draft_tokens : List[List[int]]
|
| 357 |
+
Number of draft tokens per request.
|
| 358 |
+
generators : Optional[Dict[int, torch.Generator]]
|
| 359 |
+
A dictionary mapping indices in the batch to
|
| 360 |
+
`torch.Generator` objects.
|
| 361 |
+
device : torch.device
|
| 362 |
+
The device on which to allocate the tensor.
|
| 363 |
+
Returns:
|
| 364 |
+
uniform_rand : torch.Tensor
|
| 365 |
+
A tensor of shape `(num_tokens, )` containing uniform
|
| 366 |
+
random values in the range [0, 1).
|
| 367 |
+
"""
|
| 368 |
+
uniform_probs = torch.rand(
|
| 369 |
+
(num_tokens, ),
|
| 370 |
+
dtype=torch.float32,
|
| 371 |
+
device=device,
|
| 372 |
+
)
|
| 373 |
+
start_idx = 0
|
| 374 |
+
for req_idx, n in enumerate(num_draft_tokens):
|
| 375 |
+
# Do not generate random numbers for requests with no draft tokens.
|
| 376 |
+
# This can be important for reproducibility.
|
| 377 |
+
if n == 0:
|
| 378 |
+
continue
|
| 379 |
+
end_idx = start_idx + n
|
| 380 |
+
generator = generators.get(req_idx)
|
| 381 |
+
if generator is not None:
|
| 382 |
+
uniform_probs[start_idx:end_idx].uniform_(generator=generator)
|
| 383 |
+
start_idx = end_idx
|
| 384 |
+
return uniform_probs
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def sample_recovered_tokens(
|
| 388 |
+
max_spec_len: int,
|
| 389 |
+
num_draft_tokens: list[int],
|
| 390 |
+
# [batch_size]
|
| 391 |
+
cu_num_draft_tokens: torch.Tensor,
|
| 392 |
+
# [num_tokens]
|
| 393 |
+
draft_token_ids: torch.Tensor,
|
| 394 |
+
# [num_tokens, vocab_size]
|
| 395 |
+
draft_probs: Optional[torch.Tensor],
|
| 396 |
+
# [num_tokens, vocab_size]
|
| 397 |
+
target_probs: torch.Tensor,
|
| 398 |
+
sampling_metadata: SamplingMetadata,
|
| 399 |
+
device: torch.device,
|
| 400 |
+
) -> torch.Tensor:
|
| 401 |
+
# NOTE(woosuk): Create only one distribution for each request.
|
| 402 |
+
batch_size = len(num_draft_tokens)
|
| 403 |
+
vocab_size = target_probs.shape[-1]
|
| 404 |
+
q = torch.empty(
|
| 405 |
+
(batch_size, vocab_size),
|
| 406 |
+
dtype=torch.float32,
|
| 407 |
+
device=device,
|
| 408 |
+
)
|
| 409 |
+
q.exponential_()
|
| 410 |
+
for i, generator in sampling_metadata.generators.items():
|
| 411 |
+
# Do not generate random numbers for requests with no draft tokens.
|
| 412 |
+
# This can be important for reproducibility.
|
| 413 |
+
if num_draft_tokens[i] > 0:
|
| 414 |
+
q[i].exponential_(generator=generator)
|
| 415 |
+
|
| 416 |
+
recovered_token_ids = torch.empty_like(draft_token_ids)
|
| 417 |
+
sample_recovered_tokens_kernel[(batch_size, max_spec_len)](
|
| 418 |
+
recovered_token_ids,
|
| 419 |
+
cu_num_draft_tokens,
|
| 420 |
+
draft_token_ids,
|
| 421 |
+
draft_probs,
|
| 422 |
+
target_probs,
|
| 423 |
+
q,
|
| 424 |
+
vocab_size,
|
| 425 |
+
triton.next_power_of_2(vocab_size),
|
| 426 |
+
NO_DRAFT_PROBS=draft_probs is None,
|
| 427 |
+
)
|
| 428 |
+
return recovered_token_ids
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
# NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation.
|
| 432 |
+
@triton.jit(do_not_specialize=["max_spec_len"])
|
| 433 |
+
def rejection_greedy_sample_kernel(
|
| 434 |
+
output_token_ids_ptr, # [batch_size, max_spec_len + 1]
|
| 435 |
+
cu_num_draft_tokens_ptr, # [batch_size]
|
| 436 |
+
draft_token_ids_ptr, # [num_tokens]
|
| 437 |
+
target_argmax_ptr, # [num_tokens]
|
| 438 |
+
bonus_token_ids_ptr, # [batch_size]
|
| 439 |
+
is_greedy_ptr, # [batch_size] or None
|
| 440 |
+
max_spec_len,
|
| 441 |
+
):
|
| 442 |
+
req_idx = tl.program_id(0)
|
| 443 |
+
# FIXME(woosuk): Because is_greedy_ptr is not None at profiling run,
|
| 444 |
+
# re-compilation may happen during runtime when is_greedy_ptr is None.
|
| 445 |
+
if is_greedy_ptr is None:
|
| 446 |
+
is_greedy = True
|
| 447 |
+
else:
|
| 448 |
+
is_greedy = tl.load(is_greedy_ptr + req_idx)
|
| 449 |
+
if not is_greedy:
|
| 450 |
+
# Early exit for non-greedy sampling requests.
|
| 451 |
+
return
|
| 452 |
+
|
| 453 |
+
if req_idx == 0:
|
| 454 |
+
start_idx = 0
|
| 455 |
+
else:
|
| 456 |
+
start_idx = tl.load(cu_num_draft_tokens_ptr + req_idx - 1)
|
| 457 |
+
end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx)
|
| 458 |
+
num_draft_tokens = end_idx - start_idx
|
| 459 |
+
|
| 460 |
+
rejected = False
|
| 461 |
+
for pos in range(num_draft_tokens):
|
| 462 |
+
if not rejected:
|
| 463 |
+
draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos)
|
| 464 |
+
target_argmax_id = tl.load(target_argmax_ptr + start_idx + pos)
|
| 465 |
+
tl.store(output_token_ids_ptr + req_idx * (max_spec_len + 1) + pos,
|
| 466 |
+
target_argmax_id)
|
| 467 |
+
if draft_token_id != target_argmax_id:
|
| 468 |
+
# Reject.
|
| 469 |
+
rejected = True
|
| 470 |
+
|
| 471 |
+
if not rejected:
|
| 472 |
+
# If all tokens are accepted, append the bonus token.
|
| 473 |
+
bonus_token_id = tl.load(bonus_token_ids_ptr + req_idx)
|
| 474 |
+
tl.store(
|
| 475 |
+
output_token_ids_ptr + req_idx * (max_spec_len + 1) +
|
| 476 |
+
num_draft_tokens, bonus_token_id)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
# NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation.
|
| 480 |
+
@triton.jit(do_not_specialize=["max_spec_len"])
|
| 481 |
+
def rejection_random_sample_kernel(
|
| 482 |
+
output_token_ids_ptr, # [batch_size, max_spec_len + 1]
|
| 483 |
+
cu_num_draft_tokens_ptr, # [batch_size]
|
| 484 |
+
draft_token_ids_ptr, # [num_tokens]
|
| 485 |
+
draft_probs_ptr, # [num_tokens, vocab_size] or None
|
| 486 |
+
target_probs_ptr, # [num_tokens, vocab_size]
|
| 487 |
+
bonus_token_ids_ptr, # [batch_size]
|
| 488 |
+
recovered_token_ids_ptr, # [num_tokens]
|
| 489 |
+
uniform_probs_ptr, # [num_tokens]
|
| 490 |
+
is_greedy_ptr, # [batch_size]
|
| 491 |
+
max_spec_len,
|
| 492 |
+
vocab_size,
|
| 493 |
+
NO_DRAFT_PROBS: tl.constexpr,
|
| 494 |
+
):
|
| 495 |
+
req_idx = tl.program_id(0)
|
| 496 |
+
is_greedy = tl.load(is_greedy_ptr + req_idx)
|
| 497 |
+
if is_greedy:
|
| 498 |
+
# Early exit for greedy sampling requests.
|
| 499 |
+
return
|
| 500 |
+
|
| 501 |
+
if req_idx == 0:
|
| 502 |
+
start_idx = 0
|
| 503 |
+
else:
|
| 504 |
+
start_idx = tl.load(cu_num_draft_tokens_ptr + req_idx - 1)
|
| 505 |
+
end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx)
|
| 506 |
+
num_draft_tokens = end_idx - start_idx
|
| 507 |
+
|
| 508 |
+
rejected = False
|
| 509 |
+
for pos in range(num_draft_tokens):
|
| 510 |
+
if not rejected:
|
| 511 |
+
draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos)
|
| 512 |
+
if NO_DRAFT_PROBS:
|
| 513 |
+
draft_prob = 1
|
| 514 |
+
else:
|
| 515 |
+
draft_prob = tl.load(draft_probs_ptr +
|
| 516 |
+
(start_idx + pos) * vocab_size +
|
| 517 |
+
draft_token_id)
|
| 518 |
+
target_prob = tl.load(target_probs_ptr +
|
| 519 |
+
(start_idx + pos) * vocab_size +
|
| 520 |
+
draft_token_id)
|
| 521 |
+
uniform_prob = tl.load(uniform_probs_ptr + start_idx + pos)
|
| 522 |
+
# NOTE(woosuk): While the draft probability should never be 0,
|
| 523 |
+
# we check it to avoid NaNs. If it happens to be 0, we reject.
|
| 524 |
+
if draft_prob > 0 and target_prob / draft_prob >= uniform_prob:
|
| 525 |
+
# Accept.
|
| 526 |
+
token_id = draft_token_id
|
| 527 |
+
else:
|
| 528 |
+
# Reject. Use recovered token.
|
| 529 |
+
rejected = True
|
| 530 |
+
token_id = tl.load(recovered_token_ids_ptr + start_idx + pos)
|
| 531 |
+
tl.store(output_token_ids_ptr + req_idx * (max_spec_len + 1) + pos,
|
| 532 |
+
token_id)
|
| 533 |
+
|
| 534 |
+
if not rejected:
|
| 535 |
+
# If all tokens are accepted, append the bonus token.
|
| 536 |
+
bonus_token_id = tl.load(bonus_token_ids_ptr + req_idx)
|
| 537 |
+
tl.store(
|
| 538 |
+
output_token_ids_ptr + req_idx * (max_spec_len + 1) +
|
| 539 |
+
num_draft_tokens, bonus_token_id)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
# NOTE(woosuk): Avoid specialization to prevent unnecessary recompilation.
|
| 543 |
+
@triton.jit(do_not_specialize=["replace_from", "replace_to"])
|
| 544 |
+
def expand_kernel(
|
| 545 |
+
output_ptr, # [num_tokens]
|
| 546 |
+
input_ptr, # [batch_size]
|
| 547 |
+
cu_num_tokens_ptr, # [batch_size]
|
| 548 |
+
replace_from,
|
| 549 |
+
replace_to,
|
| 550 |
+
MAX_NUM_TOKENS: tl.constexpr,
|
| 551 |
+
):
|
| 552 |
+
req_idx = tl.program_id(0)
|
| 553 |
+
if req_idx == 0: # noqa: SIM108
|
| 554 |
+
start_idx = 0
|
| 555 |
+
else:
|
| 556 |
+
start_idx = tl.load(cu_num_tokens_ptr + req_idx - 1)
|
| 557 |
+
end_idx = tl.load(cu_num_tokens_ptr + req_idx)
|
| 558 |
+
num_tokens = end_idx - start_idx
|
| 559 |
+
|
| 560 |
+
src_val = tl.load(input_ptr + req_idx)
|
| 561 |
+
src_val = tl.where(src_val == replace_from, replace_to, src_val)
|
| 562 |
+
offset = tl.arange(0, MAX_NUM_TOKENS)
|
| 563 |
+
tl.store(output_ptr + start_idx + offset,
|
| 564 |
+
src_val,
|
| 565 |
+
mask=offset < num_tokens)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
@triton.jit
|
| 569 |
+
def sample_recovered_tokens_kernel(
|
| 570 |
+
output_token_ids_ptr, # [num_tokens]
|
| 571 |
+
cu_num_draft_tokens_ptr, # [batch_size]
|
| 572 |
+
draft_token_ids_ptr, # [num_tokens]
|
| 573 |
+
draft_probs_ptr, # [num_tokens, vocab_size] or None
|
| 574 |
+
target_probs_ptr, # [num_tokens, vocab_size]
|
| 575 |
+
q_ptr, # [batch_size, vocab_size]
|
| 576 |
+
vocab_size,
|
| 577 |
+
PADDED_VOCAB_SIZE: tl.constexpr,
|
| 578 |
+
NO_DRAFT_PROBS: tl.constexpr,
|
| 579 |
+
):
|
| 580 |
+
req_idx = tl.program_id(0)
|
| 581 |
+
if req_idx == 0:
|
| 582 |
+
start_idx = 0
|
| 583 |
+
else:
|
| 584 |
+
start_idx = tl.load(cu_num_draft_tokens_ptr + req_idx - 1)
|
| 585 |
+
end_idx = tl.load(cu_num_draft_tokens_ptr + req_idx)
|
| 586 |
+
num_draft_tokens = end_idx - start_idx
|
| 587 |
+
|
| 588 |
+
# Early exit for out-of-range positions.
|
| 589 |
+
pos = tl.program_id(1)
|
| 590 |
+
if pos >= num_draft_tokens:
|
| 591 |
+
return
|
| 592 |
+
|
| 593 |
+
vocab_offset = tl.arange(0, PADDED_VOCAB_SIZE)
|
| 594 |
+
if NO_DRAFT_PROBS:
|
| 595 |
+
draft_token_id = tl.load(draft_token_ids_ptr + start_idx + pos)
|
| 596 |
+
orig_prob = tl.load(target_probs_ptr + (start_idx + pos) * vocab_size +
|
| 597 |
+
draft_token_id)
|
| 598 |
+
# Temporarily zero out the probability of the draft token.
|
| 599 |
+
# This is essentially the same as target_prob - draft_prob, except that
|
| 600 |
+
# n-gram does not have draft_prob. We regard it as 1.
|
| 601 |
+
tl.store(
|
| 602 |
+
target_probs_ptr + (start_idx + pos) * vocab_size + draft_token_id,
|
| 603 |
+
0)
|
| 604 |
+
prob = tl.load(target_probs_ptr + (start_idx + pos) * vocab_size +
|
| 605 |
+
vocab_offset,
|
| 606 |
+
mask=vocab_offset < vocab_size,
|
| 607 |
+
other=0)
|
| 608 |
+
else:
|
| 609 |
+
draft_prob = tl.load(draft_probs_ptr + (start_idx + pos) * vocab_size +
|
| 610 |
+
vocab_offset,
|
| 611 |
+
mask=vocab_offset < vocab_size,
|
| 612 |
+
other=0)
|
| 613 |
+
target_prob = tl.load(target_probs_ptr +
|
| 614 |
+
(start_idx + pos) * vocab_size + vocab_offset,
|
| 615 |
+
mask=vocab_offset < vocab_size,
|
| 616 |
+
other=0)
|
| 617 |
+
prob = tl.maximum(target_prob - draft_prob, 0)
|
| 618 |
+
# NOTE(woosuk): We don't need `prob = prob / tl.sum(prob)` here because
|
| 619 |
+
# `tl.argmax` will select the maximum value.
|
| 620 |
+
|
| 621 |
+
q = tl.load(q_ptr + req_idx * vocab_size + vocab_offset,
|
| 622 |
+
mask=vocab_offset < vocab_size,
|
| 623 |
+
other=float("-inf"))
|
| 624 |
+
recovered_id = tl.argmax(prob / q, axis=-1)
|
| 625 |
+
tl.store(output_token_ids_ptr + start_idx + pos, recovered_id)
|
| 626 |
+
|
| 627 |
+
if NO_DRAFT_PROBS:
|
| 628 |
+
# Restore the original probability.
|
| 629 |
+
tl.store(
|
| 630 |
+
target_probs_ptr + (start_idx + pos) * vocab_size + draft_token_id,
|
| 631 |
+
orig_prob)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/sampler.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
"""A layer that samples the next tokens from the model's outputs."""
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from vllm.config import LogprobsMode
|
| 9 |
+
from vllm.utils import is_pin_memory_available
|
| 10 |
+
from vllm.v1.outputs import LogprobsTensors, SamplerOutput
|
| 11 |
+
from vllm.v1.sample.metadata import SamplingMetadata
|
| 12 |
+
from vllm.v1.sample.ops.bad_words import apply_bad_words
|
| 13 |
+
from vllm.v1.sample.ops.logprobs import batched_count_greater_than
|
| 14 |
+
from vllm.v1.sample.ops.penalties import apply_all_penalties
|
| 15 |
+
from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler
|
| 16 |
+
|
| 17 |
+
_SAMPLING_EPS = 1e-5
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Sampler(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(self, logprobs_mode: LogprobsMode = "raw_logprobs"):
|
| 23 |
+
super().__init__()
|
| 24 |
+
self.topk_topp_sampler = TopKTopPSampler()
|
| 25 |
+
self.pin_memory = is_pin_memory_available()
|
| 26 |
+
self.logprobs_mode = logprobs_mode
|
| 27 |
+
|
| 28 |
+
def forward(
|
| 29 |
+
self,
|
| 30 |
+
logits: torch.Tensor,
|
| 31 |
+
sampling_metadata: SamplingMetadata,
|
| 32 |
+
) -> SamplerOutput:
|
| 33 |
+
# NOTE(woosuk): Use the original logits (before any penalties or
|
| 34 |
+
# temperature scaling) for the top-k logprobs.
|
| 35 |
+
# This is different from the V0 sampler, which uses the logits that
|
| 36 |
+
# is used for sampling (after penalties and temperature scaling).
|
| 37 |
+
# TODO(rob): provide option for logprobs post sampling.
|
| 38 |
+
# See https://vllm-dev.slack.com/archives/C07UUL8E61Z/p1735907856007919 # noqa: E501
|
| 39 |
+
num_logprobs = sampling_metadata.max_num_logprobs
|
| 40 |
+
if num_logprobs is not None:
|
| 41 |
+
if self.logprobs_mode == "raw_logprobs":
|
| 42 |
+
raw_logprobs = self.compute_logprobs(logits)
|
| 43 |
+
elif self.logprobs_mode == "raw_logits":
|
| 44 |
+
raw_logprobs = logits.clone()
|
| 45 |
+
|
| 46 |
+
# Use float32 for the logits.
|
| 47 |
+
logits = logits.to(torch.float32)
|
| 48 |
+
# Apply allowed token ids.
|
| 49 |
+
logits = self.apply_allowed_token_ids(logits, sampling_metadata)
|
| 50 |
+
# Apply bad words exclusion.
|
| 51 |
+
logits = self.apply_bad_words(logits, sampling_metadata)
|
| 52 |
+
|
| 53 |
+
# Apply logits processors which can impact greedy sampling
|
| 54 |
+
for processor in (sampling_metadata.logitsprocs.non_argmax_invariant):
|
| 55 |
+
logits = processor.apply(logits)
|
| 56 |
+
|
| 57 |
+
# Apply penalties (e.g., min_tokens, freq_penalties).
|
| 58 |
+
logits = self.apply_penalties(logits, sampling_metadata)
|
| 59 |
+
|
| 60 |
+
# Get the process logprobs or logits.
|
| 61 |
+
if num_logprobs is not None:
|
| 62 |
+
if self.logprobs_mode == "processed_logprobs":
|
| 63 |
+
raw_logprobs = self.compute_logprobs(logits)
|
| 64 |
+
elif self.logprobs_mode == "processed_logits":
|
| 65 |
+
raw_logprobs = logits.clone()
|
| 66 |
+
|
| 67 |
+
# Sample the next token.
|
| 68 |
+
sampled = self.sample(logits, sampling_metadata)
|
| 69 |
+
# Convert sampled token ids to int64 (long) type to ensure compatibility
|
| 70 |
+
# with subsequent operations that may use these values as indices.
|
| 71 |
+
# This conversion is necessary because FlashInfer sampling operations
|
| 72 |
+
# return int32 (while PyTorch argmax and topk return int64).
|
| 73 |
+
sampled = sampled.long()
|
| 74 |
+
|
| 75 |
+
# Gather the logprobs of the topk and sampled token (if requested).
|
| 76 |
+
# Get logprobs and rank tensors (if requested)
|
| 77 |
+
logprobs_tensors = None if num_logprobs is None else \
|
| 78 |
+
self.gather_logprobs(raw_logprobs, num_logprobs, token_ids=sampled)
|
| 79 |
+
|
| 80 |
+
# Use int32 to reduce the tensor size.
|
| 81 |
+
sampled = sampled.to(torch.int32)
|
| 82 |
+
|
| 83 |
+
# These are GPU tensors.
|
| 84 |
+
sampler_output = SamplerOutput(
|
| 85 |
+
# The sampled tokens are expanded to 2D tensor with shape
|
| 86 |
+
# [num_requests, 1], where each row represents one generated
|
| 87 |
+
# token per request.
|
| 88 |
+
sampled_token_ids=sampled.unsqueeze(-1),
|
| 89 |
+
logprobs_tensors=logprobs_tensors,
|
| 90 |
+
)
|
| 91 |
+
return sampler_output
|
| 92 |
+
|
| 93 |
+
def apply_temperature(
|
| 94 |
+
self,
|
| 95 |
+
logits: torch.Tensor,
|
| 96 |
+
temp: torch.Tensor,
|
| 97 |
+
) -> torch.Tensor:
|
| 98 |
+
# Use in-place division to avoid creating a new tensor.
|
| 99 |
+
return logits.div_(temp.unsqueeze(dim=1))
|
| 100 |
+
|
| 101 |
+
def greedy_sample(self, logits: torch.Tensor) -> torch.Tensor:
|
| 102 |
+
return logits.argmax(dim=-1).view(-1)
|
| 103 |
+
|
| 104 |
+
def sample(
|
| 105 |
+
self,
|
| 106 |
+
logits: torch.Tensor,
|
| 107 |
+
sampling_metadata: SamplingMetadata,
|
| 108 |
+
) -> torch.Tensor:
|
| 109 |
+
"""Sample logits based on sampling metadata.
|
| 110 |
+
|
| 111 |
+
The various logits processing functions called in this method
|
| 112 |
+
may update the logits tensor in-place.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
assert not (sampling_metadata.all_greedy
|
| 116 |
+
and sampling_metadata.all_random)
|
| 117 |
+
if sampling_metadata.all_random:
|
| 118 |
+
greedy_sampled = None
|
| 119 |
+
else:
|
| 120 |
+
greedy_sampled = self.greedy_sample(logits)
|
| 121 |
+
if sampling_metadata.all_greedy:
|
| 122 |
+
return greedy_sampled
|
| 123 |
+
|
| 124 |
+
assert sampling_metadata.temperature is not None
|
| 125 |
+
|
| 126 |
+
# Apply temperature.
|
| 127 |
+
logits = self.apply_temperature(logits, sampling_metadata.temperature)
|
| 128 |
+
|
| 129 |
+
# Apply logits processors that only apply to random sampling
|
| 130 |
+
# (argmax invariant)
|
| 131 |
+
for processor in sampling_metadata.logitsprocs.argmax_invariant:
|
| 132 |
+
logits = processor.apply(logits)
|
| 133 |
+
|
| 134 |
+
# Apply top_k and/or top_p.
|
| 135 |
+
random_sampled = self.topk_topp_sampler(
|
| 136 |
+
logits,
|
| 137 |
+
sampling_metadata.generators,
|
| 138 |
+
sampling_metadata.top_k,
|
| 139 |
+
sampling_metadata.top_p,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
if greedy_sampled is None:
|
| 143 |
+
return random_sampled
|
| 144 |
+
|
| 145 |
+
sampled = torch.where(
|
| 146 |
+
sampling_metadata.temperature < _SAMPLING_EPS,
|
| 147 |
+
greedy_sampled,
|
| 148 |
+
random_sampled,
|
| 149 |
+
out=greedy_sampled, # Reuse tensor
|
| 150 |
+
)
|
| 151 |
+
return sampled
|
| 152 |
+
|
| 153 |
+
def compute_logprobs(self, logits: torch.Tensor) -> torch.Tensor:
|
| 154 |
+
return logits.log_softmax(dim=-1, dtype=torch.float32)
|
| 155 |
+
|
| 156 |
+
def gather_logprobs(
|
| 157 |
+
self,
|
| 158 |
+
logprobs: torch.Tensor,
|
| 159 |
+
num_logprobs: int,
|
| 160 |
+
token_ids: torch.Tensor,
|
| 161 |
+
) -> LogprobsTensors:
|
| 162 |
+
"""
|
| 163 |
+
Gather logprobs for topk and sampled/prompt token.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
logprobs: (num tokens) x (vocab) tensor
|
| 167 |
+
num_logprobs: minimum number of logprobs to
|
| 168 |
+
retain per token
|
| 169 |
+
token_ids: prompt tokens (if prompt logprobs)
|
| 170 |
+
or sampled tokens (if sampled
|
| 171 |
+
logprobs); 1D token ID tensor
|
| 172 |
+
with (num tokens) elements
|
| 173 |
+
Must be int64.
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
Top-k int indices tensor, (num tokens) x (num_logprobs + 1)
|
| 177 |
+
Top-k float logprobs tensor, (num tokens) x (num_logprobs + 1)
|
| 178 |
+
Sampled token rank tensor, (num tokens)
|
| 179 |
+
"""
|
| 180 |
+
assert token_ids.dtype == torch.int64
|
| 181 |
+
# Find the topK values.
|
| 182 |
+
topk_logprobs, topk_indices = torch.topk(logprobs,
|
| 183 |
+
num_logprobs,
|
| 184 |
+
dim=-1)
|
| 185 |
+
|
| 186 |
+
# Get with the logprob of the prompt or sampled token.
|
| 187 |
+
token_ids = token_ids.unsqueeze(-1)
|
| 188 |
+
token_logprobs = logprobs.gather(-1, token_ids)
|
| 189 |
+
|
| 190 |
+
# Compute the ranks of the actual token.
|
| 191 |
+
token_ranks = batched_count_greater_than(logprobs, token_logprobs)
|
| 192 |
+
|
| 193 |
+
# Concatenate together with the topk.
|
| 194 |
+
indices = torch.cat((token_ids, topk_indices), dim=1)
|
| 195 |
+
logprobs = torch.cat((token_logprobs, topk_logprobs), dim=1)
|
| 196 |
+
|
| 197 |
+
# Use int32 to reduce the tensor size.
|
| 198 |
+
indices = indices.to(torch.int32)
|
| 199 |
+
|
| 200 |
+
return LogprobsTensors(indices, logprobs, token_ranks)
|
| 201 |
+
|
| 202 |
+
def apply_penalties(
|
| 203 |
+
self,
|
| 204 |
+
logits: torch.Tensor,
|
| 205 |
+
sampling_metadata: SamplingMetadata,
|
| 206 |
+
) -> torch.Tensor:
|
| 207 |
+
if not sampling_metadata.no_penalties:
|
| 208 |
+
assert sampling_metadata.prompt_token_ids is not None
|
| 209 |
+
logits = apply_all_penalties(
|
| 210 |
+
logits,
|
| 211 |
+
sampling_metadata.prompt_token_ids,
|
| 212 |
+
sampling_metadata.presence_penalties,
|
| 213 |
+
sampling_metadata.frequency_penalties,
|
| 214 |
+
sampling_metadata.repetition_penalties,
|
| 215 |
+
sampling_metadata.output_token_ids,
|
| 216 |
+
)
|
| 217 |
+
return logits
|
| 218 |
+
|
| 219 |
+
def apply_allowed_token_ids(
|
| 220 |
+
self,
|
| 221 |
+
logits: torch.Tensor,
|
| 222 |
+
sampling_metadata: SamplingMetadata,
|
| 223 |
+
) -> torch.Tensor:
|
| 224 |
+
if sampling_metadata.allowed_token_ids_mask is not None:
|
| 225 |
+
logits.masked_fill_(sampling_metadata.allowed_token_ids_mask,
|
| 226 |
+
float("-inf"))
|
| 227 |
+
return logits
|
| 228 |
+
|
| 229 |
+
def apply_bad_words(
|
| 230 |
+
self,
|
| 231 |
+
logits: torch.Tensor,
|
| 232 |
+
sampling_metadata: SamplingMetadata,
|
| 233 |
+
) -> torch.Tensor:
|
| 234 |
+
if sampling_metadata.bad_words_token_ids:
|
| 235 |
+
apply_bad_words(
|
| 236 |
+
logits,
|
| 237 |
+
sampling_metadata.bad_words_token_ids,
|
| 238 |
+
sampling_metadata.output_token_ids,
|
| 239 |
+
)
|
| 240 |
+
return logits
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/eagle.py
ADDED
|
@@ -0,0 +1,722 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
import ast
|
| 4 |
+
from dataclasses import replace
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
|
| 11 |
+
from vllm.attention.layer import Attention
|
| 12 |
+
from vllm.config import (CompilationLevel, VllmConfig,
|
| 13 |
+
get_layers_from_vllm_config)
|
| 14 |
+
from vllm.distributed.parallel_state import get_pp_group
|
| 15 |
+
from vllm.forward_context import set_forward_context
|
| 16 |
+
from vllm.logger import init_logger
|
| 17 |
+
from vllm.model_executor.model_loader import get_model
|
| 18 |
+
from vllm.model_executor.models import supports_multimodal
|
| 19 |
+
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
|
| 20 |
+
from vllm.platforms import current_platform
|
| 21 |
+
from vllm.utils import is_pin_memory_available
|
| 22 |
+
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
|
| 23 |
+
from vllm.v1.attention.backends.rocm_aiter_fa import (
|
| 24 |
+
AiterFlashAttentionMetadata)
|
| 25 |
+
from vllm.v1.attention.backends.tree_attn import (TreeAttentionMetadata,
|
| 26 |
+
TreeAttentionMetadataBuilder)
|
| 27 |
+
from vllm.v1.attention.backends.triton_attn import TritonAttentionMetadata
|
| 28 |
+
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
| 29 |
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
| 30 |
+
from vllm.v1.sample.metadata import SamplingMetadata
|
| 31 |
+
|
| 32 |
+
logger = init_logger(__name__)
|
| 33 |
+
|
| 34 |
+
PADDING_SLOT_ID = -1
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class EagleProposer:
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
vllm_config: VllmConfig,
|
| 42 |
+
device: torch.device,
|
| 43 |
+
runner=None,
|
| 44 |
+
):
|
| 45 |
+
self.vllm_config = vllm_config
|
| 46 |
+
self.speculative_config = vllm_config.speculative_config
|
| 47 |
+
self.draft_model_config = self.speculative_config.draft_model_config
|
| 48 |
+
self.method = self.speculative_config.method
|
| 49 |
+
|
| 50 |
+
self.runner = runner
|
| 51 |
+
self.dtype = vllm_config.model_config.dtype
|
| 52 |
+
self.max_model_len = vllm_config.model_config.max_model_len
|
| 53 |
+
self.block_size = vllm_config.cache_config.block_size
|
| 54 |
+
self.num_speculative_tokens = (
|
| 55 |
+
self.speculative_config.num_speculative_tokens)
|
| 56 |
+
self.max_num_tokens = (
|
| 57 |
+
vllm_config.scheduler_config.max_num_batched_tokens)
|
| 58 |
+
self.token_arange_np = np.arange(self.max_num_tokens)
|
| 59 |
+
# We need to get the hidden size from the draft model config because
|
| 60 |
+
# the draft model's hidden size can be different from the target model's
|
| 61 |
+
# hidden size (e.g., Llama 3.3 70B).
|
| 62 |
+
self.hidden_size = self.draft_model_config.get_hidden_size()
|
| 63 |
+
|
| 64 |
+
self.is_multimodal_model = vllm_config.model_config \
|
| 65 |
+
.is_multimodal_model
|
| 66 |
+
|
| 67 |
+
self.use_cuda_graph = (self.vllm_config.compilation_config.level
|
| 68 |
+
== CompilationLevel.PIECEWISE and
|
| 69 |
+
not self.vllm_config.model_config.enforce_eager)
|
| 70 |
+
self.cudagraph_batch_sizes = list(
|
| 71 |
+
reversed(
|
| 72 |
+
self.vllm_config.compilation_config.cudagraph_capture_sizes))
|
| 73 |
+
|
| 74 |
+
# persistent buffers for cuda graph
|
| 75 |
+
self.input_ids = torch.zeros(self.max_num_tokens,
|
| 76 |
+
dtype=torch.int32,
|
| 77 |
+
device=device)
|
| 78 |
+
self.positions = torch.zeros(self.max_num_tokens,
|
| 79 |
+
dtype=torch.int64,
|
| 80 |
+
device=device)
|
| 81 |
+
self.hidden_states = torch.zeros(
|
| 82 |
+
(self.max_num_tokens, self.hidden_size),
|
| 83 |
+
dtype=self.dtype,
|
| 84 |
+
device=device)
|
| 85 |
+
|
| 86 |
+
max_batch_size = vllm_config.scheduler_config.max_num_seqs
|
| 87 |
+
self.arange = torch.arange(
|
| 88 |
+
# We need +1 here because the arange is used to set query_start_loc,
|
| 89 |
+
# which has one more element than batch_size.
|
| 90 |
+
max_batch_size + 1,
|
| 91 |
+
device=device,
|
| 92 |
+
dtype=torch.int32,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
self.inputs_embeds = torch.zeros(
|
| 96 |
+
(self.max_num_tokens, self.hidden_size),
|
| 97 |
+
dtype=self.dtype,
|
| 98 |
+
device=device)
|
| 99 |
+
|
| 100 |
+
# Parse the speculative token tree.
|
| 101 |
+
spec_token_tree = self.speculative_config.speculative_token_tree
|
| 102 |
+
self.tree_choices: list[tuple[int,
|
| 103 |
+
...]] = ast.literal_eval(spec_token_tree)
|
| 104 |
+
tree_depth = len(self.tree_choices[-1])
|
| 105 |
+
# Precompute per-level properties of the tree.
|
| 106 |
+
num_drafts_per_level = [0] * tree_depth
|
| 107 |
+
for node in self.tree_choices:
|
| 108 |
+
num_drafts_per_level[len(node) - 1] += 1
|
| 109 |
+
self.cu_drafts_per_level = [num_drafts_per_level[0]]
|
| 110 |
+
self.child_drafts_per_level = [num_drafts_per_level[0]]
|
| 111 |
+
for level in range(1, tree_depth):
|
| 112 |
+
self.cu_drafts_per_level.append(self.cu_drafts_per_level[-1] +
|
| 113 |
+
num_drafts_per_level[level])
|
| 114 |
+
self.child_drafts_per_level.append(num_drafts_per_level[level] //
|
| 115 |
+
num_drafts_per_level[level - 1])
|
| 116 |
+
# Precompute draft position offsets in flattened tree.
|
| 117 |
+
self.tree_draft_pos_offsets = torch.arange(
|
| 118 |
+
1,
|
| 119 |
+
len(self.tree_choices) + 1,
|
| 120 |
+
device=device,
|
| 121 |
+
dtype=torch.int32,
|
| 122 |
+
).repeat(max_batch_size, 1)
|
| 123 |
+
|
| 124 |
+
def propose(
|
| 125 |
+
self,
|
| 126 |
+
# [num_tokens]
|
| 127 |
+
target_token_ids: torch.Tensor,
|
| 128 |
+
# [num_tokens]
|
| 129 |
+
target_positions: torch.Tensor,
|
| 130 |
+
# [num_tokens, hidden_size]
|
| 131 |
+
target_hidden_states: torch.Tensor,
|
| 132 |
+
# [batch_size]
|
| 133 |
+
next_token_ids: torch.Tensor,
|
| 134 |
+
common_attn_metadata: CommonAttentionMetadata,
|
| 135 |
+
sampling_metadata: SamplingMetadata,
|
| 136 |
+
mm_embeds: Optional[list[torch.Tensor]] = None,
|
| 137 |
+
) -> torch.Tensor:
|
| 138 |
+
num_tokens = target_token_ids.shape[0]
|
| 139 |
+
batch_size = next_token_ids.shape[0]
|
| 140 |
+
last_token_indices = common_attn_metadata.query_start_loc[1:] - 1
|
| 141 |
+
|
| 142 |
+
if self.method == "eagle3":
|
| 143 |
+
assert isinstance(self.model, Eagle3LlamaForCausalLM)
|
| 144 |
+
target_hidden_states = self.model.combine_hidden_states(
|
| 145 |
+
target_hidden_states)
|
| 146 |
+
assert target_hidden_states.shape[-1] == self.hidden_size
|
| 147 |
+
|
| 148 |
+
# Shift the input ids by one token.
|
| 149 |
+
# E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3]
|
| 150 |
+
self.input_ids[:num_tokens - 1] = target_token_ids[1:]
|
| 151 |
+
# Replace the last token with the next token.
|
| 152 |
+
# E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4]
|
| 153 |
+
self.input_ids[last_token_indices] = next_token_ids
|
| 154 |
+
|
| 155 |
+
assert self.runner is not None
|
| 156 |
+
|
| 157 |
+
# FIXME: need to consider multiple kv_cache_groups
|
| 158 |
+
attn_metadata = self.runner.attn_groups[0][0].metadata_builder\
|
| 159 |
+
.build_for_drafting(common_attn_metadata=common_attn_metadata,
|
| 160 |
+
draft_index=0)
|
| 161 |
+
|
| 162 |
+
# At this moment, we assume all eagle layers belong to the same KV
|
| 163 |
+
# cache group, thus using the same attention metadata.
|
| 164 |
+
per_layer_attn_metadata = {}
|
| 165 |
+
for layer_name in self.attn_layer_names:
|
| 166 |
+
per_layer_attn_metadata[layer_name] = attn_metadata
|
| 167 |
+
if self.use_cuda_graph and \
|
| 168 |
+
num_tokens <= self.cudagraph_batch_sizes[-1]:
|
| 169 |
+
num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
|
| 170 |
+
else:
|
| 171 |
+
num_input_tokens = num_tokens
|
| 172 |
+
# copy inputs to buffer for cudagraph
|
| 173 |
+
self.positions[:num_tokens] = target_positions
|
| 174 |
+
self.hidden_states[:num_tokens] = target_hidden_states
|
| 175 |
+
if self.is_multimodal_model:
|
| 176 |
+
input_ids = self.input_ids[:num_tokens]
|
| 177 |
+
inputs_embeds = self.model.get_input_embeddings(
|
| 178 |
+
input_ids,
|
| 179 |
+
multimodal_embeddings=mm_embeds or None,
|
| 180 |
+
)
|
| 181 |
+
self.inputs_embeds[:num_tokens] = inputs_embeds
|
| 182 |
+
inputs_embeds = self.inputs_embeds[:num_input_tokens]
|
| 183 |
+
input_ids = None
|
| 184 |
+
else:
|
| 185 |
+
inputs_embeds = None
|
| 186 |
+
input_ids = self.input_ids[:num_input_tokens]
|
| 187 |
+
|
| 188 |
+
with set_forward_context(per_layer_attn_metadata,
|
| 189 |
+
self.vllm_config,
|
| 190 |
+
num_tokens=num_input_tokens):
|
| 191 |
+
ret_hidden_states = self.model(
|
| 192 |
+
input_ids=input_ids,
|
| 193 |
+
positions=self.positions[:num_input_tokens],
|
| 194 |
+
hidden_states=self.hidden_states[:num_input_tokens],
|
| 195 |
+
inputs_embeds=inputs_embeds,
|
| 196 |
+
)
|
| 197 |
+
if self.method == "deepseek_mtp":
|
| 198 |
+
last_hidden_states = ret_hidden_states
|
| 199 |
+
else:
|
| 200 |
+
last_hidden_states, hidden_states = ret_hidden_states
|
| 201 |
+
sample_hidden_states = last_hidden_states[last_token_indices]
|
| 202 |
+
logits = self.model.compute_logits(sample_hidden_states, None)
|
| 203 |
+
positions = target_positions[last_token_indices]
|
| 204 |
+
hidden_states = hidden_states[last_token_indices]
|
| 205 |
+
|
| 206 |
+
if isinstance(attn_metadata, TreeAttentionMetadata):
|
| 207 |
+
# Draft using tree attention.
|
| 208 |
+
draft_token_ids_list = self.propose_tree(
|
| 209 |
+
batch_size=batch_size,
|
| 210 |
+
logits=logits,
|
| 211 |
+
positions=positions,
|
| 212 |
+
hidden_states=hidden_states,
|
| 213 |
+
common_attn_metadata=common_attn_metadata,
|
| 214 |
+
)
|
| 215 |
+
# [batch_size, num_tree_tokens]
|
| 216 |
+
return torch.cat(draft_token_ids_list, dim=1)
|
| 217 |
+
|
| 218 |
+
draft_token_ids = logits.argmax(dim=-1)
|
| 219 |
+
|
| 220 |
+
# Early exit if there is only one draft token to be generated.
|
| 221 |
+
if self.num_speculative_tokens == 1:
|
| 222 |
+
# [batch_size, 1]
|
| 223 |
+
return draft_token_ids.view(-1, 1)
|
| 224 |
+
|
| 225 |
+
# TODO: Currently, MTP module released by deepseek only has
|
| 226 |
+
# one layer. Adapt this code to support multiple layers once
|
| 227 |
+
# there's a multi-layer MTP module.
|
| 228 |
+
|
| 229 |
+
# On ROCm, both AiterFlashAttention and TritonAttention
|
| 230 |
+
# support multi-token eagle spec decode.
|
| 231 |
+
if current_platform.is_rocm():
|
| 232 |
+
assert isinstance(
|
| 233 |
+
attn_metadata,
|
| 234 |
+
(TritonAttentionMetadata, AiterFlashAttentionMetadata,
|
| 235 |
+
FlashAttentionMetadata))
|
| 236 |
+
else:
|
| 237 |
+
# Currently, only FlashAttention supports multi-token eagle spec
|
| 238 |
+
# decode. This is because the code below makes assumptions about
|
| 239 |
+
# attn_metadata attributes available.
|
| 240 |
+
assert isinstance(attn_metadata, FlashAttentionMetadata)
|
| 241 |
+
|
| 242 |
+
# Generate the remaining draft tokens.
|
| 243 |
+
draft_token_ids_list = [draft_token_ids]
|
| 244 |
+
|
| 245 |
+
if self.use_cuda_graph and \
|
| 246 |
+
batch_size <= self.cudagraph_batch_sizes[-1]:
|
| 247 |
+
input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size)
|
| 248 |
+
else:
|
| 249 |
+
input_batch_size = batch_size
|
| 250 |
+
attn_metadata.num_actual_tokens = batch_size
|
| 251 |
+
attn_metadata.max_query_len = 1
|
| 252 |
+
attn_metadata.query_start_loc = self.arange[:batch_size + 1]
|
| 253 |
+
for _ in range(self.num_speculative_tokens - 1):
|
| 254 |
+
# Update the inputs.
|
| 255 |
+
# cast to int32 is crucial when eagle model is compiled.
|
| 256 |
+
# tensor.argmax() returns int64 by default.
|
| 257 |
+
input_ids = draft_token_ids_list[-1].int()
|
| 258 |
+
positions += 1
|
| 259 |
+
|
| 260 |
+
# NOTE(woosuk): We should handle the case where the draft model
|
| 261 |
+
# generates tokens beyond the max model length. Since it is complex
|
| 262 |
+
# to remove such requests from the batch, we keep them in the batch
|
| 263 |
+
# but adjust the position ids and slot mappings to avoid the
|
| 264 |
+
# out-of-range access during the model execution. The draft tokens
|
| 265 |
+
# generated with this adjustment should be ignored.
|
| 266 |
+
exceeds_max_model_len = positions >= self.max_model_len
|
| 267 |
+
# Mask out the position ids that exceed the max model length.
|
| 268 |
+
# Otherwise, we may get out-of-range error in RoPE.
|
| 269 |
+
clamped_positions = torch.where(exceeds_max_model_len, 0,
|
| 270 |
+
positions)
|
| 271 |
+
|
| 272 |
+
# Increment the sequence lengths.
|
| 273 |
+
attn_metadata.max_seq_len += 1
|
| 274 |
+
attn_metadata.seq_lens += 1
|
| 275 |
+
# Consider max model length.
|
| 276 |
+
attn_metadata.max_seq_len = min(attn_metadata.max_seq_len,
|
| 277 |
+
self.max_model_len)
|
| 278 |
+
# For the requests that exceed the max model length, we set the
|
| 279 |
+
# sequence length to 1 to minimize their overheads in attention.
|
| 280 |
+
attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len, 1)
|
| 281 |
+
|
| 282 |
+
# Compute the slot mapping.
|
| 283 |
+
block_numbers = clamped_positions // self.block_size
|
| 284 |
+
block_ids = attn_metadata.block_table.gather(
|
| 285 |
+
dim=1, index=block_numbers.view(-1, 1))
|
| 286 |
+
block_ids = block_ids.view(-1)
|
| 287 |
+
attn_metadata.slot_mapping = (block_ids * self.block_size +
|
| 288 |
+
clamped_positions % self.block_size)
|
| 289 |
+
# Mask out the slot mappings that exceed the max model length.
|
| 290 |
+
# Otherwise, the KV cache will be inadvertently updated with the
|
| 291 |
+
# padding tokens.
|
| 292 |
+
attn_metadata.slot_mapping.masked_fill_(exceeds_max_model_len,
|
| 293 |
+
PADDING_SLOT_ID)
|
| 294 |
+
|
| 295 |
+
# copy inputs to buffer for cudagraph
|
| 296 |
+
self.input_ids[:batch_size] = input_ids
|
| 297 |
+
self.positions[:batch_size] = clamped_positions
|
| 298 |
+
self.hidden_states[:batch_size] = hidden_states
|
| 299 |
+
if self.is_multimodal_model:
|
| 300 |
+
inputs_embeds = self.model.get_input_embeddings(input_ids)
|
| 301 |
+
self.inputs_embeds[:batch_size] = inputs_embeds
|
| 302 |
+
inputs_embeds = self.inputs_embeds[:input_batch_size]
|
| 303 |
+
input_ids = None
|
| 304 |
+
else:
|
| 305 |
+
inputs_embeds = None
|
| 306 |
+
input_ids = self.input_ids[:input_batch_size]
|
| 307 |
+
|
| 308 |
+
# Run the model.
|
| 309 |
+
with set_forward_context(per_layer_attn_metadata,
|
| 310 |
+
self.vllm_config,
|
| 311 |
+
num_tokens=input_batch_size):
|
| 312 |
+
last_hidden_states, hidden_states = self.model(
|
| 313 |
+
input_ids=input_ids,
|
| 314 |
+
positions=self.positions[:input_batch_size],
|
| 315 |
+
hidden_states=self.hidden_states[:input_batch_size],
|
| 316 |
+
inputs_embeds=inputs_embeds,
|
| 317 |
+
)
|
| 318 |
+
hidden_states = hidden_states[:batch_size]
|
| 319 |
+
logits = self.model.compute_logits(last_hidden_states[:batch_size],
|
| 320 |
+
None)
|
| 321 |
+
draft_token_ids = logits.argmax(dim=-1)
|
| 322 |
+
draft_token_ids_list.append(draft_token_ids)
|
| 323 |
+
|
| 324 |
+
# [batch_size, num_speculative_tokens]
|
| 325 |
+
draft_token_ids = torch.stack(draft_token_ids_list, dim=1)
|
| 326 |
+
return draft_token_ids
|
| 327 |
+
|
| 328 |
+
def propose_tree(
|
| 329 |
+
self,
|
| 330 |
+
batch_size: int,
|
| 331 |
+
# [num_tokens, vocab_size]
|
| 332 |
+
logits: torch.Tensor,
|
| 333 |
+
# [num_tokens]
|
| 334 |
+
positions: torch.Tensor,
|
| 335 |
+
# [num_tokens, hidden_size]
|
| 336 |
+
hidden_states: torch.Tensor,
|
| 337 |
+
common_attn_metadata: CommonAttentionMetadata,
|
| 338 |
+
) -> list[torch.Tensor]:
|
| 339 |
+
tree_attn_metadata_builder = \
|
| 340 |
+
self.runner.attn_groups[0][0].metadata_builder
|
| 341 |
+
assert isinstance(tree_attn_metadata_builder,
|
| 342 |
+
TreeAttentionMetadataBuilder)
|
| 343 |
+
|
| 344 |
+
total_num_drafts = self.cu_drafts_per_level[0]
|
| 345 |
+
level_num_drafts = total_num_drafts
|
| 346 |
+
# Sample a draft token for each child at the tree root level.
|
| 347 |
+
num_children = self.child_drafts_per_level[0]
|
| 348 |
+
if num_children == 1:
|
| 349 |
+
draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
|
| 350 |
+
else:
|
| 351 |
+
draft_token_ids = torch.topk(logits, num_children,
|
| 352 |
+
dim=-1).indices.view(batch_size, -1)
|
| 353 |
+
draft_token_ids_list = [draft_token_ids]
|
| 354 |
+
draft_hidden_states = hidden_states.view(batch_size, 1, -1)
|
| 355 |
+
|
| 356 |
+
# Initialize empty tensors for concatenation with the level outputs.
|
| 357 |
+
tree_input_ids = torch.empty(0,
|
| 358 |
+
device=self.input_ids.device,
|
| 359 |
+
dtype=self.input_ids.dtype)
|
| 360 |
+
tree_positions = torch.empty(0,
|
| 361 |
+
device=self.positions.device,
|
| 362 |
+
dtype=self.positions.dtype)
|
| 363 |
+
tree_hidden_states = torch.empty(0,
|
| 364 |
+
device=self.hidden_states.device,
|
| 365 |
+
dtype=self.hidden_states.dtype)
|
| 366 |
+
# Precompute the draft token positions.
|
| 367 |
+
flattened_draft_positions = (
|
| 368 |
+
positions.view(batch_size, -1) +
|
| 369 |
+
self.tree_draft_pos_offsets[:batch_size, :])
|
| 370 |
+
tree_depth = len(self.cu_drafts_per_level)
|
| 371 |
+
for level in range(tree_depth - 1):
|
| 372 |
+
# Get draft positions for RoPE.
|
| 373 |
+
draft_positions = positions + (level + 1)
|
| 374 |
+
exceeds_max_model_len = (positions +
|
| 375 |
+
total_num_drafts) >= self.max_model_len
|
| 376 |
+
# Mask out the position ids that exceed the max model length.
|
| 377 |
+
# Otherwise, we may get out-of-range error in RoPE.
|
| 378 |
+
draft_positions = torch.where(
|
| 379 |
+
exceeds_max_model_len,
|
| 380 |
+
0,
|
| 381 |
+
draft_positions,
|
| 382 |
+
).view(batch_size, -1)
|
| 383 |
+
|
| 384 |
+
if level_num_drafts > 1:
|
| 385 |
+
# Repeat the positions for each draft at this level.
|
| 386 |
+
draft_positions = draft_positions.repeat_interleave(
|
| 387 |
+
level_num_drafts, dim=1)
|
| 388 |
+
|
| 389 |
+
if num_children > 1:
|
| 390 |
+
# Repeat draft hidden states for each child.
|
| 391 |
+
draft_hidden_states = draft_hidden_states.repeat_interleave(
|
| 392 |
+
num_children, dim=1)
|
| 393 |
+
|
| 394 |
+
# Concatenate the draft tokens, positions, and hidden states.
|
| 395 |
+
tree_input_ids = torch.cat([tree_input_ids, draft_token_ids],
|
| 396 |
+
dim=1)
|
| 397 |
+
tree_positions = torch.cat([tree_positions, draft_positions],
|
| 398 |
+
dim=1)
|
| 399 |
+
tree_hidden_states = torch.cat(
|
| 400 |
+
[tree_hidden_states, draft_hidden_states], dim=1)
|
| 401 |
+
|
| 402 |
+
# Build new attention metadata for the next level of drafts.
|
| 403 |
+
# This is necessary to support tree attention.
|
| 404 |
+
query_len = total_num_drafts
|
| 405 |
+
common_attn_metadata = replace(
|
| 406 |
+
common_attn_metadata,
|
| 407 |
+
query_start_loc=query_len * self.arange[:batch_size + 1],
|
| 408 |
+
seq_lens=common_attn_metadata.seq_lens + level_num_drafts,
|
| 409 |
+
num_actual_tokens=batch_size * query_len,
|
| 410 |
+
max_query_len=query_len,
|
| 411 |
+
)
|
| 412 |
+
attn_metadata = tree_attn_metadata_builder.build_for_drafting(
|
| 413 |
+
common_attn_metadata=common_attn_metadata,
|
| 414 |
+
draft_index=level + 1,
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
# Apply new attention metadata to all layers.
|
| 418 |
+
per_layer_attn_metadata = {}
|
| 419 |
+
for layer_name in self.attn_layer_names:
|
| 420 |
+
per_layer_attn_metadata[layer_name] = attn_metadata
|
| 421 |
+
|
| 422 |
+
# Consider max model length.
|
| 423 |
+
attn_metadata.max_seq_len = min(attn_metadata.max_seq_len,
|
| 424 |
+
self.max_model_len)
|
| 425 |
+
# For the requests that exceed the max model length, we set the
|
| 426 |
+
# sequence length to 1 to minimize their overheads in attention.
|
| 427 |
+
attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len, 1)
|
| 428 |
+
|
| 429 |
+
# Compute the slot mapping.
|
| 430 |
+
query_positions = flattened_draft_positions[:, level:level +
|
| 431 |
+
query_len]
|
| 432 |
+
block_numbers = query_positions // self.block_size
|
| 433 |
+
block_ids = attn_metadata.block_table.gather(dim=1,
|
| 434 |
+
index=block_numbers)
|
| 435 |
+
slot_mapping = (block_ids * self.block_size +
|
| 436 |
+
query_positions % self.block_size)
|
| 437 |
+
# Mask out the slot mappings that exceed the max model length.
|
| 438 |
+
# Otherwise, the KV cache will be inadvertently updated with the
|
| 439 |
+
# padding tokens.
|
| 440 |
+
slot_mapping[exceeds_max_model_len] = PADDING_SLOT_ID
|
| 441 |
+
attn_metadata.slot_mapping = slot_mapping.view(-1)
|
| 442 |
+
|
| 443 |
+
# Copy inputs to buffer for cudagraph.
|
| 444 |
+
num_tokens = attn_metadata.num_actual_tokens
|
| 445 |
+
input_ids = tree_input_ids.view(-1)
|
| 446 |
+
self.input_ids[:num_tokens] = input_ids
|
| 447 |
+
self.positions[:num_tokens] = tree_positions.view(-1)
|
| 448 |
+
self.hidden_states[:num_tokens] = tree_hidden_states.view(
|
| 449 |
+
num_tokens, -1)
|
| 450 |
+
|
| 451 |
+
if self.use_cuda_graph and \
|
| 452 |
+
num_tokens <= self.cudagraph_batch_sizes[-1]:
|
| 453 |
+
num_input_tokens = self.vllm_config.pad_for_cudagraph(
|
| 454 |
+
num_tokens)
|
| 455 |
+
else:
|
| 456 |
+
num_input_tokens = num_tokens
|
| 457 |
+
# Run the model.
|
| 458 |
+
with set_forward_context(per_layer_attn_metadata,
|
| 459 |
+
self.vllm_config,
|
| 460 |
+
num_tokens=num_input_tokens):
|
| 461 |
+
last_hidden_states, hidden_states = self.model(
|
| 462 |
+
input_ids=self.input_ids[:num_input_tokens],
|
| 463 |
+
positions=self.positions[:num_input_tokens],
|
| 464 |
+
hidden_states=self.hidden_states[:num_input_tokens],
|
| 465 |
+
inputs_embeds=None,
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
# Get the output hidden states for the draft tokens.
|
| 469 |
+
draft_hidden_states = hidden_states[:num_tokens].view(
|
| 470 |
+
batch_size, query_len, -1)[:, -level_num_drafts:]
|
| 471 |
+
draft_last_hidden_states = last_hidden_states[:num_tokens].view(
|
| 472 |
+
batch_size, query_len, -1)[:, -level_num_drafts:]
|
| 473 |
+
|
| 474 |
+
# Get the output logits for the draft tokens.
|
| 475 |
+
logits = self.model.compute_logits(
|
| 476 |
+
draft_last_hidden_states.reshape(batch_size * level_num_drafts,
|
| 477 |
+
-1),
|
| 478 |
+
None,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# Sample a draft token for each child at the next tree level.
|
| 482 |
+
num_children = self.child_drafts_per_level[level + 1]
|
| 483 |
+
if num_children == 1:
|
| 484 |
+
draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
|
| 485 |
+
else:
|
| 486 |
+
draft_token_ids = torch.topk(logits, num_children,
|
| 487 |
+
dim=-1).indices.view(
|
| 488 |
+
batch_size, -1)
|
| 489 |
+
draft_token_ids_list.append(draft_token_ids)
|
| 490 |
+
|
| 491 |
+
# Update the # drafts counters for the next tree level.
|
| 492 |
+
level_num_drafts = self.cu_drafts_per_level[level +
|
| 493 |
+
1] - total_num_drafts
|
| 494 |
+
total_num_drafts = self.cu_drafts_per_level[level + 1]
|
| 495 |
+
return draft_token_ids_list
|
| 496 |
+
|
| 497 |
+
def prepare_inputs(
|
| 498 |
+
self,
|
| 499 |
+
common_attn_metadata: CommonAttentionMetadata,
|
| 500 |
+
# [batch_size]
|
| 501 |
+
num_rejected_tokens: torch.Tensor
|
| 502 |
+
) -> tuple[CommonAttentionMetadata, torch.Tensor]:
|
| 503 |
+
"""
|
| 504 |
+
This function is used to prepare the inputs for the spec decode.
|
| 505 |
+
It updates to the common_attn_metadata to account for the rejected
|
| 506 |
+
tokens (and newly sampled tokens). It also returns the token indices
|
| 507 |
+
of the tokens that should be fed to the speculator.
|
| 508 |
+
"""
|
| 509 |
+
# E.g.
|
| 510 |
+
# common_attn_metadata.query_start_loc{_cpu}:
|
| 511 |
+
# [0, q1, q1 + q2, q1 + q2 + q3]
|
| 512 |
+
# common_attn_metadata.seq_lens{_cpu}: [s1, s2, s3]
|
| 513 |
+
# num_rejected_tokens: [n1, n2, n3]
|
| 514 |
+
# This function computes the intermediate values:
|
| 515 |
+
# num_tokens_per_req: [q1 - n1, q2 - n2, q3 - n3]
|
| 516 |
+
# And returns:
|
| 517 |
+
# common_attn_metadata.query_start_loc{_cpu}:
|
| 518 |
+
# [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
|
| 519 |
+
# common_attn_metadata.seq_lens{_cpu}:
|
| 520 |
+
# [s1 - n1 + 1, s2 - n2 + 1, s3 - n3 + 1]
|
| 521 |
+
# token_indices: [0, 1, ..., q1 - n1 - 1,
|
| 522 |
+
# q1, q1 + 1, ..., q1 + q2 - n2 - 1,
|
| 523 |
+
# q1 + q2, q1 + q2 + 1, ..., q1 + q2 + q3 - n3 - 1]
|
| 524 |
+
|
| 525 |
+
device = common_attn_metadata.query_start_loc.device
|
| 526 |
+
query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
|
| 527 |
+
new_seq_lens_cpu = common_attn_metadata.seq_lens_cpu \
|
| 528 |
+
- num_rejected_tokens
|
| 529 |
+
|
| 530 |
+
# [0, q1, q1 + q2, q1 + q2 + q3] -> [q1, q2, q3]
|
| 531 |
+
new_query_len_per_req = (query_start_loc_cpu[1:] -
|
| 532 |
+
query_start_loc_cpu[:-1])
|
| 533 |
+
# [q1, q2, q3] -> [q1 - n1, q2 - n2, q3 - n3]
|
| 534 |
+
new_num_tokens_per_req = new_query_len_per_req - num_rejected_tokens
|
| 535 |
+
new_num_tokens_per_req_np = new_num_tokens_per_req.numpy()
|
| 536 |
+
|
| 537 |
+
# [q1 - n1, q2 - n2, q3 - n3] ->
|
| 538 |
+
# [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
|
| 539 |
+
new_query_start_loc_cpu = torch.zeros(
|
| 540 |
+
query_start_loc_cpu.shape,
|
| 541 |
+
dtype=torch.int32,
|
| 542 |
+
pin_memory=is_pin_memory_available())
|
| 543 |
+
new_query_start_loc_np = new_query_start_loc_cpu.numpy()
|
| 544 |
+
np.cumsum(new_num_tokens_per_req_np, out=new_query_start_loc_np[1:])
|
| 545 |
+
|
| 546 |
+
total_num_tokens = new_query_start_loc_np[-1]
|
| 547 |
+
# Example assuming num_tokens_per_req_np = [2, 4, 3]
|
| 548 |
+
# this implies that `new_query_start_locs` is:
|
| 549 |
+
# [0, 2, 6, 9] ->
|
| 550 |
+
# [0, 0, 2, 2, 2, 2, 6, 6, 6]
|
| 551 |
+
# _r1_ ____r2____ ___r3__
|
| 552 |
+
new_query_start_locs_expanded = np.repeat(new_query_start_loc_np[:-1],
|
| 553 |
+
new_num_tokens_per_req_np)
|
| 554 |
+
# [0, 1, 2, 3, 4, 5, 6, 7, 8] ->
|
| 555 |
+
# [0, 1, 0, 1, 2, 3, 0, 1, 2]
|
| 556 |
+
# _r1_ ____r2____ ___r3__
|
| 557 |
+
token_offests = self.token_arange_np[:total_num_tokens] \
|
| 558 |
+
- new_query_start_locs_expanded
|
| 559 |
+
|
| 560 |
+
# Expand starting positions to match token pattern
|
| 561 |
+
# [0, q1, q1 + q2] ->
|
| 562 |
+
# [0, 0, q1, q1, q1, q1, q1 + q2, q1 + q2, q1 + q2]
|
| 563 |
+
# _r1_ _____r2_______ ___________r3____________
|
| 564 |
+
old_query_start_locs_expanded = np.repeat(
|
| 565 |
+
query_start_loc_cpu[:-1].numpy(), new_num_tokens_per_req_np)
|
| 566 |
+
# Final token indices are:
|
| 567 |
+
# [0, 1, // req 1
|
| 568 |
+
# q1 + 0, q1 + 1, q1 + 2, q1 + 3, // req 2
|
| 569 |
+
# q1 + q2 + 0, q1 + q2 + 1, q1 + q2 + 2] // req 3
|
| 570 |
+
token_indices_np = token_offests + old_query_start_locs_expanded
|
| 571 |
+
token_indices = torch.from_numpy(token_indices_np).to(
|
| 572 |
+
device, non_blocking=True)
|
| 573 |
+
|
| 574 |
+
spec_common_attn_metadata = CommonAttentionMetadata(
|
| 575 |
+
query_start_loc=new_query_start_loc_cpu.to(device,
|
| 576 |
+
non_blocking=True),
|
| 577 |
+
seq_lens=new_seq_lens_cpu.to(device, non_blocking=True),
|
| 578 |
+
query_start_loc_cpu=new_query_start_loc_cpu,
|
| 579 |
+
seq_lens_cpu=new_seq_lens_cpu,
|
| 580 |
+
num_computed_tokens_cpu=common_attn_metadata.
|
| 581 |
+
num_computed_tokens_cpu,
|
| 582 |
+
num_reqs=common_attn_metadata.num_reqs,
|
| 583 |
+
num_actual_tokens=total_num_tokens,
|
| 584 |
+
max_query_len=new_query_len_per_req.max().item(),
|
| 585 |
+
block_table_tensor=common_attn_metadata.block_table_tensor,
|
| 586 |
+
slot_mapping=common_attn_metadata.slot_mapping[token_indices],
|
| 587 |
+
causal=True,
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
return spec_common_attn_metadata, token_indices
|
| 591 |
+
|
| 592 |
+
def load_model(self, target_model: nn.Module) -> None:
|
| 593 |
+
draft_model_config = \
|
| 594 |
+
self.vllm_config.speculative_config.draft_model_config
|
| 595 |
+
target_attn_layer_names = set(
|
| 596 |
+
get_layers_from_vllm_config(self.vllm_config, Attention).keys())
|
| 597 |
+
|
| 598 |
+
from vllm.compilation.backends import set_model_tag
|
| 599 |
+
with set_model_tag("eagle_head"):
|
| 600 |
+
self.model = get_model(vllm_config=self.vllm_config,
|
| 601 |
+
model_config=draft_model_config)
|
| 602 |
+
|
| 603 |
+
draft_attn_layer_names = (
|
| 604 |
+
get_layers_from_vllm_config(self.vllm_config, Attention).keys() -
|
| 605 |
+
target_attn_layer_names)
|
| 606 |
+
|
| 607 |
+
self.attn_layer_names = list(draft_attn_layer_names)
|
| 608 |
+
|
| 609 |
+
if supports_multimodal(target_model):
|
| 610 |
+
# handle multimodality
|
| 611 |
+
self.model.config.image_token_index = (
|
| 612 |
+
target_model.config.image_token_index)
|
| 613 |
+
target_language_model = target_model.get_language_model()
|
| 614 |
+
else:
|
| 615 |
+
target_language_model = target_model
|
| 616 |
+
# share embed_tokens with the target model if needed
|
| 617 |
+
if get_pp_group().world_size == 1 \
|
| 618 |
+
and self.model.model.embed_tokens.weight.shape \
|
| 619 |
+
== target_language_model.model.embed_tokens.weight.shape:
|
| 620 |
+
logger.info(
|
| 621 |
+
"Assuming the EAGLE head shares the same vocab embedding" \
|
| 622 |
+
" with the target model."
|
| 623 |
+
)
|
| 624 |
+
del self.model.model.embed_tokens
|
| 625 |
+
self.model.model.embed_tokens = (
|
| 626 |
+
target_language_model.model.embed_tokens)
|
| 627 |
+
else:
|
| 628 |
+
logger.info(
|
| 629 |
+
"The EAGLE head's vocab embedding will be loaded separately" \
|
| 630 |
+
" from the target model."
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
# share lm_head with the target model if needed
|
| 634 |
+
# some model definition do not define lm_head explicitly
|
| 635 |
+
# and reuse embed_tokens for lm_head, e.g., CohereForCausalLM
|
| 636 |
+
if self.vllm_config.speculative_config.method != "eagle3" and \
|
| 637 |
+
hasattr(target_language_model, "lm_head"):
|
| 638 |
+
logger.info("Loading EAGLE LM head weights from the target model.")
|
| 639 |
+
self.model.lm_head = target_language_model.lm_head
|
| 640 |
+
|
| 641 |
+
@torch.inference_mode()
|
| 642 |
+
def dummy_run(
|
| 643 |
+
self,
|
| 644 |
+
num_tokens: int,
|
| 645 |
+
) -> None:
|
| 646 |
+
with set_forward_context(None, self.vllm_config,
|
| 647 |
+
num_tokens=num_tokens):
|
| 648 |
+
if self.is_multimodal_model:
|
| 649 |
+
input_ids = None
|
| 650 |
+
inputs_embeds = self.inputs_embeds[:num_tokens]
|
| 651 |
+
else:
|
| 652 |
+
input_ids = self.input_ids[:num_tokens]
|
| 653 |
+
inputs_embeds = None
|
| 654 |
+
|
| 655 |
+
self.model(
|
| 656 |
+
input_ids=input_ids,
|
| 657 |
+
positions=self.positions[:num_tokens],
|
| 658 |
+
hidden_states=self.hidden_states[:num_tokens],
|
| 659 |
+
inputs_embeds=inputs_embeds,
|
| 660 |
+
)
|
| 661 |
+
|
| 662 |
+
def validate_same_kv_cache_group(self,
|
| 663 |
+
kv_cache_config: KVCacheConfig) -> None:
|
| 664 |
+
"""
|
| 665 |
+
Validate that all eagle layers belong to the same KVCacheGroup.
|
| 666 |
+
Need this assumption to ensure all eagle layers can use the
|
| 667 |
+
same AttentionMetadata.
|
| 668 |
+
May extend to multiple AttentionMetadata in the future.
|
| 669 |
+
"""
|
| 670 |
+
kv_cache_groups: dict[str, int] = {}
|
| 671 |
+
for id, kv_cache_group in enumerate(kv_cache_config.kv_cache_groups):
|
| 672 |
+
for layer_name in kv_cache_group.layer_names:
|
| 673 |
+
kv_cache_groups[layer_name] = id
|
| 674 |
+
assert len(
|
| 675 |
+
set([
|
| 676 |
+
kv_cache_groups[layer_name]
|
| 677 |
+
for layer_name in self.attn_layer_names
|
| 678 |
+
])
|
| 679 |
+
) == 1, "All eagle layers should belong to the same kv cache group"
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
# NOTE(woosuk): Currently, the below code is not used and we always use argmax
|
| 683 |
+
# to sample the draft tokens. We will use this after we find a way to manage
|
| 684 |
+
# the draft prob tensor.
|
| 685 |
+
# Refer to https://github.com/vllm-project/vllm/pull/16899 for the details.
|
| 686 |
+
# FIXME(woosuk): The logic here is duplicated with the main sampling code.
|
| 687 |
+
# We should refactor this to reuse the same sampling implementation.
|
| 688 |
+
def compute_probs_and_sample_next_token(
|
| 689 |
+
logits: torch.Tensor,
|
| 690 |
+
sampling_metadata: SamplingMetadata,
|
| 691 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 692 |
+
if sampling_metadata.all_greedy:
|
| 693 |
+
# For greedy requests, draft_probs is not used in rejection sampling.
|
| 694 |
+
# Therefore, we can just return the logits.
|
| 695 |
+
probs = logits
|
| 696 |
+
next_token_ids = logits.argmax(dim=-1)
|
| 697 |
+
return next_token_ids, probs
|
| 698 |
+
|
| 699 |
+
is_greedy = sampling_metadata.temperature == -1
|
| 700 |
+
temperature = torch.where(is_greedy, 1.0, sampling_metadata.temperature)
|
| 701 |
+
logits.div_(temperature.view(-1, 1))
|
| 702 |
+
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
| 703 |
+
|
| 704 |
+
# NOTE(woosuk): Currently, we ignore most of the sampling parameters in
|
| 705 |
+
# generating the draft tokens. We only use the temperature. While this
|
| 706 |
+
# could degrade the acceptance rate, it does not affect the distribution
|
| 707 |
+
# of the generated tokens after rejection sampling.
|
| 708 |
+
|
| 709 |
+
# TODO(woosuk): Consider seeds.
|
| 710 |
+
q = torch.empty_like(probs)
|
| 711 |
+
q.exponential_()
|
| 712 |
+
# NOTE(woosuk): We shouldn't use `probs.div_(q)` because the draft_probs
|
| 713 |
+
# will be used later for rejection sampling.
|
| 714 |
+
next_token_ids = probs.div(q).argmax(dim=-1).view(-1)
|
| 715 |
+
if not sampling_metadata.all_random:
|
| 716 |
+
greedy_token_ids = probs.argmax(dim=-1)
|
| 717 |
+
next_token_ids = torch.where(
|
| 718 |
+
is_greedy,
|
| 719 |
+
greedy_token_ids,
|
| 720 |
+
next_token_ids,
|
| 721 |
+
)
|
| 722 |
+
return next_token_ids, probs
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/medusa.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from vllm.config import VllmConfig
|
| 8 |
+
from vllm.forward_context import set_forward_context
|
| 9 |
+
from vllm.logger import init_logger
|
| 10 |
+
from vllm.model_executor.model_loader import get_model
|
| 11 |
+
from vllm.v1.sample.metadata import SamplingMetadata
|
| 12 |
+
|
| 13 |
+
# Initialize logger
|
| 14 |
+
logger = init_logger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MedusaProposer:
|
| 18 |
+
"""
|
| 19 |
+
Medusa proposer class for generating token sequences
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
vllm_config: VllmConfig,
|
| 25 |
+
device: torch.device,
|
| 26 |
+
):
|
| 27 |
+
# Save config parameters
|
| 28 |
+
self.vllm_config = vllm_config
|
| 29 |
+
self.device = device
|
| 30 |
+
self.max_num_tokens = (
|
| 31 |
+
vllm_config.scheduler_config.max_num_batched_tokens)
|
| 32 |
+
self.hidden_size = vllm_config.speculative_config.\
|
| 33 |
+
draft_model_config.get_hidden_size(
|
| 34 |
+
)
|
| 35 |
+
self.dtype = vllm_config.model_config.dtype
|
| 36 |
+
|
| 37 |
+
def propose(
|
| 38 |
+
self,
|
| 39 |
+
target_hidden_states: torch.Tensor,
|
| 40 |
+
sampling_metadata: SamplingMetadata,
|
| 41 |
+
) -> torch.Tensor:
|
| 42 |
+
# Generate blocks and compute logits
|
| 43 |
+
blocks = self.model(target_hidden_states)
|
| 44 |
+
logits = self.model.compute_logits(blocks, None)
|
| 45 |
+
|
| 46 |
+
# Get draft tokens and transpose the result
|
| 47 |
+
draft_tokens = [logit.argmax(dim=-1).tolist() for logit in logits]
|
| 48 |
+
return [list(row) for row in zip(*draft_tokens)]
|
| 49 |
+
|
| 50 |
+
def load_model(self, target_model: nn.Module) -> None:
|
| 51 |
+
from vllm.compilation.backends import set_model_tag
|
| 52 |
+
with set_model_tag("medusa_head"):
|
| 53 |
+
self.model = get_model(vllm_config=self.vllm_config,
|
| 54 |
+
model_config=self.vllm_config.
|
| 55 |
+
speculative_config.draft_model_config)
|
| 56 |
+
|
| 57 |
+
@torch.inference_mode()
|
| 58 |
+
def dummy_run(self, num_tokens: int) -> None:
|
| 59 |
+
hidden_states = torch.zeros((self.max_num_tokens, self.hidden_size),
|
| 60 |
+
dtype=self.dtype,
|
| 61 |
+
device=self.device)
|
| 62 |
+
with set_forward_context(None, self.vllm_config,
|
| 63 |
+
num_tokens=num_tokens):
|
| 64 |
+
self.model(hidden_states)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/metadata.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class SpecDecodeMetadata:
|
| 11 |
+
|
| 12 |
+
# [num_tokens]
|
| 13 |
+
draft_token_ids: torch.Tensor
|
| 14 |
+
# [batch_size]
|
| 15 |
+
num_draft_tokens: list[int]
|
| 16 |
+
# [batch_size]
|
| 17 |
+
cu_num_draft_tokens: torch.Tensor
|
| 18 |
+
# [num_tokens]
|
| 19 |
+
target_logits_indices: torch.Tensor
|
| 20 |
+
# [batch_size]
|
| 21 |
+
bonus_logits_indices: torch.Tensor
|
| 22 |
+
# [num_tokens + batch_size]
|
| 23 |
+
logits_indices: torch.Tensor
|
| 24 |
+
|
| 25 |
+
def __post_init__(self):
|
| 26 |
+
self.max_spec_len = max(self.num_draft_tokens)
|
| 27 |
+
|
| 28 |
+
@classmethod
|
| 29 |
+
def make_dummy(
|
| 30 |
+
cls,
|
| 31 |
+
draft_token_ids: list[list[int]],
|
| 32 |
+
device: torch.device,
|
| 33 |
+
) -> "SpecDecodeMetadata":
|
| 34 |
+
batch_size = len(draft_token_ids)
|
| 35 |
+
num_draft_tokens = [len(ids) for ids in draft_token_ids]
|
| 36 |
+
flattened_draft_token_ids = sum(draft_token_ids, [])
|
| 37 |
+
num_tokens = len(flattened_draft_token_ids)
|
| 38 |
+
|
| 39 |
+
draft_token_ids_tensor = torch.tensor(flattened_draft_token_ids,
|
| 40 |
+
dtype=torch.int32,
|
| 41 |
+
device=device)
|
| 42 |
+
cu_num_draft_tokens = np.cumsum(num_draft_tokens, dtype=np.int32)
|
| 43 |
+
cu_num_draft_tokens_tensor = torch.from_numpy(cu_num_draft_tokens).to(
|
| 44 |
+
device)
|
| 45 |
+
|
| 46 |
+
target_logits_indices = torch.zeros(num_tokens,
|
| 47 |
+
dtype=torch.int32,
|
| 48 |
+
device=device)
|
| 49 |
+
bonus_logits_indices = torch.zeros(batch_size,
|
| 50 |
+
dtype=torch.int32,
|
| 51 |
+
device=device)
|
| 52 |
+
logits_indices = torch.zeros(num_tokens + batch_size,
|
| 53 |
+
dtype=torch.int32,
|
| 54 |
+
device=device)
|
| 55 |
+
return cls(
|
| 56 |
+
draft_token_ids=draft_token_ids_tensor,
|
| 57 |
+
num_draft_tokens=num_draft_tokens,
|
| 58 |
+
cu_num_draft_tokens=cu_num_draft_tokens_tensor,
|
| 59 |
+
target_logits_indices=target_logits_indices,
|
| 60 |
+
bonus_logits_indices=bonus_logits_indices,
|
| 61 |
+
logits_indices=logits_indices,
|
| 62 |
+
)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/metrics.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import prometheus_client
|
| 9 |
+
|
| 10 |
+
from vllm.config import SpeculativeConfig
|
| 11 |
+
from vllm.logger import init_logger
|
| 12 |
+
|
| 13 |
+
logger = init_logger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class SpecDecodingStats:
|
| 18 |
+
"""Per-step iteration decoding stats from scheduler.
|
| 19 |
+
|
| 20 |
+
Each scheduler step, statistics on spec decoding performance are
|
| 21 |
+
aggregated across requests by the scheduler and returned to the
|
| 22 |
+
frontend in EngineCoreOutputs->SchedulerStats.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
num_spec_tokens: int
|
| 26 |
+
num_drafts: int = 0
|
| 27 |
+
num_draft_tokens: int = 0
|
| 28 |
+
num_accepted_tokens: int = 0
|
| 29 |
+
num_accepted_tokens_per_pos: list[int] = field(default_factory=list)
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def new(cls, num_spec_tokens: int) -> "SpecDecodingStats":
|
| 33 |
+
return cls(num_spec_tokens=num_spec_tokens,
|
| 34 |
+
num_accepted_tokens_per_pos=[0] * num_spec_tokens)
|
| 35 |
+
|
| 36 |
+
def observe_draft(self, num_draft_tokens: int, num_accepted_tokens: int):
|
| 37 |
+
self.num_drafts += 1
|
| 38 |
+
self.num_draft_tokens += num_draft_tokens
|
| 39 |
+
self.num_accepted_tokens += num_accepted_tokens
|
| 40 |
+
assert num_accepted_tokens <= self.num_spec_tokens
|
| 41 |
+
for i in range(num_accepted_tokens):
|
| 42 |
+
self.num_accepted_tokens_per_pos[i] += 1
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class SpecDecodingLogging:
|
| 46 |
+
"""Aggregate and log spec decoding metrics.
|
| 47 |
+
|
| 48 |
+
LoggingStatLogger aggregates per-iteration metrics over a set
|
| 49 |
+
time interval using observe() and then logs them using log()
|
| 50 |
+
before resetting to zero.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self):
|
| 54 |
+
self.reset()
|
| 55 |
+
|
| 56 |
+
def reset(self):
|
| 57 |
+
self.num_drafts: list[int] = []
|
| 58 |
+
self.num_draft_tokens: list[int] = []
|
| 59 |
+
self.num_accepted_tokens: list[int] = []
|
| 60 |
+
self.accepted_tokens_per_pos_lists: list[list[int]] = []
|
| 61 |
+
|
| 62 |
+
def observe(self, spec_decoding_stats: SpecDecodingStats):
|
| 63 |
+
self.num_drafts.append(spec_decoding_stats.num_drafts)
|
| 64 |
+
self.num_draft_tokens.append(spec_decoding_stats.num_draft_tokens)
|
| 65 |
+
self.num_accepted_tokens.append(
|
| 66 |
+
spec_decoding_stats.num_accepted_tokens)
|
| 67 |
+
self.accepted_tokens_per_pos_lists.append(
|
| 68 |
+
spec_decoding_stats.num_accepted_tokens_per_pos)
|
| 69 |
+
|
| 70 |
+
def log(self, log_fn=logger.info):
|
| 71 |
+
if not self.num_drafts:
|
| 72 |
+
return
|
| 73 |
+
num_drafts = np.sum(self.num_drafts)
|
| 74 |
+
num_draft_tokens = np.sum(self.num_draft_tokens)
|
| 75 |
+
num_accepted_tokens = np.sum(self.num_accepted_tokens)
|
| 76 |
+
|
| 77 |
+
draft_acceptance_rate = (num_accepted_tokens / num_draft_tokens *
|
| 78 |
+
100 if num_draft_tokens > 0 else float("nan"))
|
| 79 |
+
|
| 80 |
+
# Conventionally, mean acceptance length includes the bonus token
|
| 81 |
+
mean_acceptance_length = 1 + (num_accepted_tokens / num_drafts)
|
| 82 |
+
|
| 83 |
+
pos_matrix = np.array(self.accepted_tokens_per_pos_lists)
|
| 84 |
+
acceptance_rates = np.sum(pos_matrix, axis=0) / num_drafts
|
| 85 |
+
rates_str = ", ".join(f"{p:.3f}" for p in acceptance_rates)
|
| 86 |
+
|
| 87 |
+
log_fn(
|
| 88 |
+
"SpecDecoding metrics: "
|
| 89 |
+
"Draft acceptance rate: %.1f%%, "
|
| 90 |
+
"Mean acceptance length: %.2f, "
|
| 91 |
+
"Accepted: %d tokens, "
|
| 92 |
+
"Drafted: %d tokens, "
|
| 93 |
+
"Per-position acceptance rate: %s",
|
| 94 |
+
draft_acceptance_rate,
|
| 95 |
+
mean_acceptance_length,
|
| 96 |
+
num_accepted_tokens,
|
| 97 |
+
num_draft_tokens,
|
| 98 |
+
rates_str,
|
| 99 |
+
)
|
| 100 |
+
self.reset()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class SpecDecodingProm:
|
| 104 |
+
"""Record spec decoding metrics in Prometheus.
|
| 105 |
+
|
| 106 |
+
The acceptance rate can be calculated using a PromQL query:
|
| 107 |
+
|
| 108 |
+
rate(vllm:spec_decode_num_accepted_tokens_total[$interval]) /
|
| 109 |
+
rate(vllm:spec_decode_num_draft_tokens_total[$interval])
|
| 110 |
+
|
| 111 |
+
The mean acceptance length (conventionally including bonus tokens)
|
| 112 |
+
can be calculated using:
|
| 113 |
+
|
| 114 |
+
1 + (
|
| 115 |
+
rate(vllm:spec_decode_num_accepted_tokens_total[$interval]) /
|
| 116 |
+
rate(vllm:spec_decode_num_drafts[$interval]))
|
| 117 |
+
|
| 118 |
+
A per-position acceptance rate vector can be computed using
|
| 119 |
+
|
| 120 |
+
vllm:spec_decode_num_accepted_tokens_per_pos[$interval] /
|
| 121 |
+
vllm:spec_decode_num_drafts[$interval]
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
_counter_cls = prometheus_client.Counter
|
| 125 |
+
|
| 126 |
+
def __init__(
|
| 127 |
+
self,
|
| 128 |
+
speculative_config: Optional[SpeculativeConfig],
|
| 129 |
+
labelnames: list[str],
|
| 130 |
+
labelvalues: list[str],
|
| 131 |
+
):
|
| 132 |
+
self.spec_decoding_enabled = speculative_config is not None
|
| 133 |
+
if not self.spec_decoding_enabled:
|
| 134 |
+
return
|
| 135 |
+
|
| 136 |
+
self.counter_spec_decode_num_drafts = \
|
| 137 |
+
self._counter_cls(
|
| 138 |
+
name="vllm:spec_decode_num_drafts",
|
| 139 |
+
documentation="Number of spec decoding drafts.",
|
| 140 |
+
labelnames=labelnames).labels(*labelvalues)
|
| 141 |
+
self.counter_spec_decode_num_draft_tokens = \
|
| 142 |
+
self._counter_cls(
|
| 143 |
+
name="vllm:spec_decode_num_draft_tokens",
|
| 144 |
+
documentation="Number of draft tokens.",
|
| 145 |
+
labelnames=labelnames,).labels(*labelvalues)
|
| 146 |
+
self.counter_spec_decode_num_accepted_tokens = \
|
| 147 |
+
self._counter_cls(
|
| 148 |
+
name="vllm:spec_decode_num_accepted_tokens",
|
| 149 |
+
documentation="Number of accepted tokens.",
|
| 150 |
+
labelnames=labelnames).labels(*labelvalues)
|
| 151 |
+
|
| 152 |
+
assert speculative_config is not None
|
| 153 |
+
num_spec_tokens = (speculative_config.num_speculative_tokens
|
| 154 |
+
if self.spec_decoding_enabled else 0)
|
| 155 |
+
pos_labelnames = labelnames + ["position"]
|
| 156 |
+
base_counter = self._counter_cls(
|
| 157 |
+
name="vllm:spec_decode_num_accepted_tokens_per_pos",
|
| 158 |
+
documentation="Accepted tokens per draft position.",
|
| 159 |
+
labelnames=pos_labelnames,
|
| 160 |
+
)
|
| 161 |
+
self.counter_spec_decode_num_accepted_tokens_per_pos: list[
|
| 162 |
+
prometheus_client.Counter] = []
|
| 163 |
+
for pos in range(num_spec_tokens):
|
| 164 |
+
pos_labelvalues = labelvalues + [str(pos)]
|
| 165 |
+
self.counter_spec_decode_num_accepted_tokens_per_pos.append(
|
| 166 |
+
base_counter.labels(*pos_labelvalues))
|
| 167 |
+
|
| 168 |
+
def observe(self, spec_decoding_stats: SpecDecodingStats):
|
| 169 |
+
if not self.spec_decoding_enabled:
|
| 170 |
+
return
|
| 171 |
+
self.counter_spec_decode_num_drafts.inc(spec_decoding_stats.num_drafts)
|
| 172 |
+
self.counter_spec_decode_num_draft_tokens.inc(
|
| 173 |
+
spec_decoding_stats.num_draft_tokens)
|
| 174 |
+
self.counter_spec_decode_num_accepted_tokens.inc(
|
| 175 |
+
spec_decoding_stats.num_accepted_tokens)
|
| 176 |
+
for pos, counter in enumerate(
|
| 177 |
+
self.counter_spec_decode_num_accepted_tokens_per_pos):
|
| 178 |
+
counter.inc(spec_decoding_stats.num_accepted_tokens_per_pos[pos])
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/ngram_proposer.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numba import jit
|
| 7 |
+
|
| 8 |
+
from vllm.config import VllmConfig
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class NgramProposer:
|
| 12 |
+
|
| 13 |
+
def __init__(self, vllm_config: VllmConfig):
|
| 14 |
+
assert vllm_config.speculative_config is not None
|
| 15 |
+
assert vllm_config.speculative_config.prompt_lookup_min is not None
|
| 16 |
+
assert vllm_config.speculative_config.prompt_lookup_max is not None
|
| 17 |
+
|
| 18 |
+
# Minimum length of the n-gram to match.
|
| 19 |
+
self.min_n = vllm_config.speculative_config.prompt_lookup_min
|
| 20 |
+
# Maximum length of the n-gram to match.
|
| 21 |
+
self.max_n = vllm_config.speculative_config.prompt_lookup_max
|
| 22 |
+
# Number of tokens follow the match. If there are less than k
|
| 23 |
+
# tokens follow the match, we will return the maximum amount of
|
| 24 |
+
# tokens until the end.
|
| 25 |
+
self.k = vllm_config.speculative_config.num_speculative_tokens
|
| 26 |
+
# Maximum length of the model.
|
| 27 |
+
self.max_model_len = vllm_config.model_config.max_model_len
|
| 28 |
+
|
| 29 |
+
# Trigger Numba JIT compilation for N-gram proposer.
|
| 30 |
+
# This usually takes less than 1 second.
|
| 31 |
+
self.propose(np.zeros(1024, dtype=np.int32))
|
| 32 |
+
|
| 33 |
+
def propose(
|
| 34 |
+
self,
|
| 35 |
+
context_token_ids: np.ndarray,
|
| 36 |
+
) -> Optional[np.ndarray]:
|
| 37 |
+
"""Proposes the next sequence of tokens based on n-gram pattern
|
| 38 |
+
matching in the context. The function finds matches of the last n
|
| 39 |
+
tokens in the previous context, and returns k tokens that followed
|
| 40 |
+
that match.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
context_token_ids: Numpy array of token IDs representing the
|
| 44 |
+
context sequence.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
np.ndarray: The sequence of tokens that followed
|
| 48 |
+
the matched n-gram in the context.
|
| 49 |
+
None: If no matching n-gram pattern is found.
|
| 50 |
+
|
| 51 |
+
Example:
|
| 52 |
+
If context_token_ids = [1,2,3,4,2,3], min_n = 2, max_n = 3, and
|
| 53 |
+
k = 4:
|
| 54 |
+
- The last 3 (= max_n) tokens [4,2,3] cannot find a match.
|
| 55 |
+
- The last 2 tokens [2,3] will be matched against the previous
|
| 56 |
+
4 tokens [1,2,3,4].
|
| 57 |
+
- Finding a match of [2,3] would return the tokens that
|
| 58 |
+
followed that pattern. Here we will return [4,2,3] because
|
| 59 |
+
we only have three tokens after the match.
|
| 60 |
+
"""
|
| 61 |
+
# TODO(woosuk): Optimize this.
|
| 62 |
+
return _find_longest_matched_ngram_and_propose_tokens(
|
| 63 |
+
origin_tokens=context_token_ids,
|
| 64 |
+
min_ngram=self.min_n,
|
| 65 |
+
max_ngram=self.max_n,
|
| 66 |
+
max_model_len=self.max_model_len,
|
| 67 |
+
k=self.k)
|
| 68 |
+
|
| 69 |
+
def load_model(self, *args, **kwargs):
|
| 70 |
+
# No model to load.
|
| 71 |
+
pass
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@jit(nopython=True)
|
| 75 |
+
def _find_longest_matched_ngram_and_propose_tokens(
|
| 76 |
+
origin_tokens: np.ndarray, min_ngram: int, max_ngram: int,
|
| 77 |
+
max_model_len: int, k: int) -> Optional[np.ndarray]:
|
| 78 |
+
"""
|
| 79 |
+
Find the longest n-gram which matches the suffix of the given tokens
|
| 80 |
+
whose length is within [min_ngram, max_ngram] (inclusive).
|
| 81 |
+
|
| 82 |
+
If found, we will extract k right after the matched ngram.
|
| 83 |
+
"""
|
| 84 |
+
# Do not generate draft tokens is context is shorter than minimum n-gram
|
| 85 |
+
total_token = origin_tokens.shape[0]
|
| 86 |
+
if total_token < min_ngram:
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
# Do not generate draft tokens beyond the max model length.
|
| 90 |
+
k = min(k, max_model_len - total_token)
|
| 91 |
+
if k <= 0:
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
+
# Flip tokens, and the goal become to find longest ngram
|
| 95 |
+
# on the rightmost position which matches the prefix with
|
| 96 |
+
# length [min_n, max_n] (inclusive).
|
| 97 |
+
tokens = origin_tokens[::-1]
|
| 98 |
+
|
| 99 |
+
# Longest prefix (not including itself) which is a suffix of
|
| 100 |
+
# the current position.
|
| 101 |
+
# lps[i] = max{v, where tokens[0:v] == tokens[i+1-v:i+1]}
|
| 102 |
+
#
|
| 103 |
+
# As ngram is capped by max_ngram to save memory, we only need to
|
| 104 |
+
# store lps for the first max_ngram prefix.
|
| 105 |
+
lps = np.zeros(max_ngram, dtype=np.int32)
|
| 106 |
+
|
| 107 |
+
longest_ngram = 0
|
| 108 |
+
position = 0
|
| 109 |
+
|
| 110 |
+
# lps[0] always equal to 0, we starts with index 1
|
| 111 |
+
prev_lps = 0
|
| 112 |
+
i = 1
|
| 113 |
+
while i < total_token:
|
| 114 |
+
# tokens[:prev_lps] is the longest prefix as a suffix of tokens[:i]
|
| 115 |
+
if tokens[prev_lps] == tokens[i]:
|
| 116 |
+
# Token match: tokens[:prev_lps+1] is the longest prefix as
|
| 117 |
+
# a suffix of tokens[:i+1]
|
| 118 |
+
prev_lps += 1
|
| 119 |
+
# Check if we found a longer valid ngram.
|
| 120 |
+
#
|
| 121 |
+
# Update position when longest_ngram matched prev_lps,
|
| 122 |
+
# as we want to get the target n-gram of the earliest position
|
| 123 |
+
# in the original tokens (i.e.
|
| 124 |
+
# latest position in the reversed tokens)
|
| 125 |
+
if prev_lps >= longest_ngram:
|
| 126 |
+
longest_ngram = prev_lps
|
| 127 |
+
position = i
|
| 128 |
+
if i < max_ngram:
|
| 129 |
+
# Store LPS for the first max_ngram prefix
|
| 130 |
+
lps[i] = prev_lps
|
| 131 |
+
if prev_lps == max_ngram:
|
| 132 |
+
# When prev_lps reached max_ngram, update prev_lps
|
| 133 |
+
# to lps[max_ngram-1] to avoid matching ngram
|
| 134 |
+
# longer than max_ngram
|
| 135 |
+
prev_lps = lps[max_ngram - 1]
|
| 136 |
+
i += 1
|
| 137 |
+
elif prev_lps != 0:
|
| 138 |
+
# Token mismatch: try the second longest prefix
|
| 139 |
+
# among all suffix of tokens[:i],
|
| 140 |
+
# which is the longest prefix of tokens[:prev_lps]
|
| 141 |
+
prev_lps = lps[prev_lps - 1]
|
| 142 |
+
else:
|
| 143 |
+
# Token mismatch, and no more prefix (except empty string)
|
| 144 |
+
# as a suffix of tokens[:i]
|
| 145 |
+
i += 1
|
| 146 |
+
|
| 147 |
+
if longest_ngram < min_ngram:
|
| 148 |
+
# No valid ngram is found
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
# Flip the position back, so in origin_tokens,
|
| 152 |
+
# origin_tokens[total_token-1-position:total_token-1-position+longest_ngram]
|
| 153 |
+
# is the matched ngram, so we should start drafting tokens from
|
| 154 |
+
# total_token-1-position+longest_ngram
|
| 155 |
+
start_position = total_token - 1 - position + longest_ngram
|
| 156 |
+
k = min(k, total_token - start_position)
|
| 157 |
+
return origin_tokens[start_position:start_position + k]
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from vllm.sampling_params import SamplingParams
|
| 4 |
+
|
| 5 |
+
_SAMPLING_EPS = 1e-5
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def is_spec_decode_unsupported(sampling_params: SamplingParams) -> bool:
|
| 9 |
+
"""True if request is incompatible with speculative decoding"""
|
| 10 |
+
return (sampling_params.frequency_penalty != 0.0
|
| 11 |
+
or sampling_params.presence_penalty != 0.0
|
| 12 |
+
or sampling_params.repetition_penalty != 1.0
|
| 13 |
+
or sampling_params.min_p > _SAMPLING_EPS
|
| 14 |
+
or sampling_params.logprobs is not None)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/__init__.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import multiprocessing
|
| 6 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
| 7 |
+
from typing import TYPE_CHECKING, Optional
|
| 8 |
+
|
| 9 |
+
from vllm.config import VllmConfig
|
| 10 |
+
from vllm.logger import init_logger
|
| 11 |
+
from vllm.reasoning import ReasoningParserManager
|
| 12 |
+
from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs
|
| 13 |
+
from vllm.utils import LazyLoader
|
| 14 |
+
from vllm.v1.structured_output.backend_guidance import GuidanceBackend
|
| 15 |
+
from vllm.v1.structured_output.backend_types import (StructuredOutputBackend,
|
| 16 |
+
StructuredOutputGrammar)
|
| 17 |
+
from vllm.v1.structured_output.backend_xgrammar import XgrammarBackend
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
import numpy as np
|
| 21 |
+
import numpy.typing as npt
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
from vllm.reasoning import ReasoningParser
|
| 25 |
+
from vllm.v1.request import Request
|
| 26 |
+
else:
|
| 27 |
+
torch = LazyLoader("torch", globals(), "torch")
|
| 28 |
+
|
| 29 |
+
logger = init_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class StructuredOutputManager:
|
| 33 |
+
"""Engine-level manager for structured output requests."""
|
| 34 |
+
|
| 35 |
+
def __init__(self, vllm_config: VllmConfig):
|
| 36 |
+
self.backend: Optional[StructuredOutputBackend] = None
|
| 37 |
+
self.reasoner: Optional[ReasoningParser] = None
|
| 38 |
+
self.vllm_config = vllm_config
|
| 39 |
+
|
| 40 |
+
self._grammar_bitmask: Optional[torch.Tensor] = None
|
| 41 |
+
self._full_mask = torch.tensor(-1, dtype=torch.int32)
|
| 42 |
+
|
| 43 |
+
max_batch_size = self.vllm_config.scheduler_config.max_num_seqs
|
| 44 |
+
self.fill_bitmask_parallel_threshold = 128
|
| 45 |
+
if self.fill_bitmask_parallel_threshold < max_batch_size:
|
| 46 |
+
self.fill_bitmask_parallel_batch_size = 16
|
| 47 |
+
# Use:
|
| 48 |
+
# - at least 1 CPU
|
| 49 |
+
# - at most half the number of CPUs or 8, whichever is less
|
| 50 |
+
max_workers = max(1, min(multiprocessing.cpu_count() // 2, 8))
|
| 51 |
+
self.executor_for_fillmask = ThreadPoolExecutor(
|
| 52 |
+
max_workers=max_workers)
|
| 53 |
+
|
| 54 |
+
if not self.vllm_config.model_config.skip_tokenizer_init:
|
| 55 |
+
# The default max_workers if not specified is the number of
|
| 56 |
+
# CPUs * 5, which is way too high since these tasks are CPU-bound,
|
| 57 |
+
# not I/O bound. We also know we would never dominate CPU usage
|
| 58 |
+
# with just grammar compilation, so we set it to half the number
|
| 59 |
+
# of CPUs.
|
| 60 |
+
max_workers = max(1, (multiprocessing.cpu_count() + 1) // 2)
|
| 61 |
+
self.executor = ThreadPoolExecutor(max_workers=max_workers)
|
| 62 |
+
self.tokenizer = init_tokenizer_from_configs(
|
| 63 |
+
model_config=self.vllm_config.model_config,
|
| 64 |
+
scheduler_config=self.vllm_config.scheduler_config,
|
| 65 |
+
lora_config=self.vllm_config.lora_config,
|
| 66 |
+
).get_lora_tokenizer(None)
|
| 67 |
+
reasoning_backend = \
|
| 68 |
+
self.vllm_config.decoding_config.reasoning_backend
|
| 69 |
+
if reasoning_backend:
|
| 70 |
+
reasoner_cls = ReasoningParserManager.get_reasoning_parser(
|
| 71 |
+
reasoning_backend)
|
| 72 |
+
self.reasoner = reasoner_cls(tokenizer=self.tokenizer)
|
| 73 |
+
|
| 74 |
+
def grammar_init(self, request: Request) -> None:
|
| 75 |
+
if request.structured_output_request is None:
|
| 76 |
+
return
|
| 77 |
+
|
| 78 |
+
if TYPE_CHECKING:
|
| 79 |
+
assert request.sampling_params is not None and \
|
| 80 |
+
request.sampling_params.guided_decoding is not None
|
| 81 |
+
|
| 82 |
+
# Initialize the backend the first time it is needed.
|
| 83 |
+
#
|
| 84 |
+
# NOTE: We only support a single backend. We do NOT support different
|
| 85 |
+
# backends on a per-request basis in V1 (for now, anyway...).
|
| 86 |
+
if self.backend is None:
|
| 87 |
+
assert request.sampling_params is not None
|
| 88 |
+
backend = request.sampling_params.guided_decoding.backend
|
| 89 |
+
vocab_size = self.vllm_config.model_config.get_vocab_size()
|
| 90 |
+
if backend == "xgrammar":
|
| 91 |
+
self.backend = XgrammarBackend(
|
| 92 |
+
self.vllm_config,
|
| 93 |
+
tokenizer=self.tokenizer,
|
| 94 |
+
vocab_size=vocab_size,
|
| 95 |
+
)
|
| 96 |
+
elif backend == "guidance":
|
| 97 |
+
self.backend = GuidanceBackend(
|
| 98 |
+
self.vllm_config,
|
| 99 |
+
tokenizer=self.tokenizer,
|
| 100 |
+
vocab_size=vocab_size,
|
| 101 |
+
)
|
| 102 |
+
elif backend == "outlines":
|
| 103 |
+
from vllm.v1.structured_output.backend_outlines import (
|
| 104 |
+
OutlinesBackend)
|
| 105 |
+
|
| 106 |
+
self.backend = OutlinesBackend(
|
| 107 |
+
self.vllm_config,
|
| 108 |
+
tokenizer=self.tokenizer,
|
| 109 |
+
vocab_size=vocab_size,
|
| 110 |
+
)
|
| 111 |
+
else:
|
| 112 |
+
raise ValueError(
|
| 113 |
+
f"Unsupported structured output backend: {backend}")
|
| 114 |
+
|
| 115 |
+
grammar = self.executor.submit(self._async_create_grammar, request)
|
| 116 |
+
request.structured_output_request.grammar = grammar # type: ignore[assignment]
|
| 117 |
+
|
| 118 |
+
def _async_create_grammar(
|
| 119 |
+
self,
|
| 120 |
+
request: Request,
|
| 121 |
+
) -> StructuredOutputGrammar:
|
| 122 |
+
key = request.structured_output_request.structured_output_key # type: ignore[union-attr]
|
| 123 |
+
|
| 124 |
+
# Note that the request was validated in the engine core client,
|
| 125 |
+
# so at this point we know it is a supported type of request.
|
| 126 |
+
#
|
| 127 |
+
# TODO: we still need to handle xgrammar compilation failures,
|
| 128 |
+
# though it should be unlikely as we test that up front as well.
|
| 129 |
+
request_type, grammar_spec = key
|
| 130 |
+
|
| 131 |
+
assert self.backend is not None
|
| 132 |
+
return self.backend.compile_grammar(request_type, grammar_spec)
|
| 133 |
+
|
| 134 |
+
def _fill_bitmasks(
|
| 135 |
+
self,
|
| 136 |
+
batch: list[tuple[StructuredOutputGrammar, int, bool]],
|
| 137 |
+
) -> None:
|
| 138 |
+
assert self._grammar_bitmask is not None
|
| 139 |
+
for grammar, index, apply_bitmask in batch:
|
| 140 |
+
if apply_bitmask and not grammar.is_terminated():
|
| 141 |
+
grammar.fill_bitmask(self._grammar_bitmask, index)
|
| 142 |
+
else:
|
| 143 |
+
# Note that for thinking support, we will need to
|
| 144 |
+
# reset the relevant part of the bitmask for consequent
|
| 145 |
+
# requests here.
|
| 146 |
+
self._grammar_bitmask[index].fill_(self._full_mask)
|
| 147 |
+
|
| 148 |
+
def _async_submit_fill_bitmask(
|
| 149 |
+
self,
|
| 150 |
+
batch: list[tuple[StructuredOutputGrammar, int, bool]],
|
| 151 |
+
) -> Future:
|
| 152 |
+
return self.executor_for_fillmask.submit(self._fill_bitmasks, batch)
|
| 153 |
+
|
| 154 |
+
def grammar_bitmask(
|
| 155 |
+
self,
|
| 156 |
+
requests: dict[str, Request],
|
| 157 |
+
structured_output_request_ids: dict[str, int],
|
| 158 |
+
scheduled_spec_decode_tokens: dict[str, list[int]],
|
| 159 |
+
) -> Optional[npt.NDArray[np.int32]]:
|
| 160 |
+
# Prepare the structured output bitmask for this batch.
|
| 161 |
+
if not structured_output_request_ids:
|
| 162 |
+
return None
|
| 163 |
+
|
| 164 |
+
max_num_spec_tokens = 0
|
| 165 |
+
if self.vllm_config.speculative_config is not None:
|
| 166 |
+
max_num_spec_tokens = \
|
| 167 |
+
self.vllm_config.speculative_config.num_speculative_tokens
|
| 168 |
+
|
| 169 |
+
if self._grammar_bitmask is None:
|
| 170 |
+
assert self.backend is not None
|
| 171 |
+
max_batch_size = self.vllm_config.scheduler_config.max_num_seqs
|
| 172 |
+
|
| 173 |
+
# Allocate a bitmask for each token needing to be checked:
|
| 174 |
+
# one for each speculative position, and one more for the
|
| 175 |
+
# bonus token / non-speculative token.
|
| 176 |
+
self._grammar_bitmask = \
|
| 177 |
+
self.backend.allocate_token_bitmask(
|
| 178 |
+
max_batch_size * (1 + max_num_spec_tokens))
|
| 179 |
+
|
| 180 |
+
# Generate a batched bitmask for all structured output requests.
|
| 181 |
+
# When speculative decoding is enabled, we need to include multiple
|
| 182 |
+
# masks for each request, one for each possible bonus token position.
|
| 183 |
+
# These are stored inline in the tensor and unpacked by the gpu runner.
|
| 184 |
+
cumulative_index = 0
|
| 185 |
+
ordered_seq = sorted(structured_output_request_ids.items(),
|
| 186 |
+
key=lambda x: x[1])
|
| 187 |
+
|
| 188 |
+
# Optimized parallel filling of bitmasks for
|
| 189 |
+
# non-spec, large-batch-size cases
|
| 190 |
+
if len(ordered_seq) > self.fill_bitmask_parallel_threshold and \
|
| 191 |
+
max_num_spec_tokens == 0:
|
| 192 |
+
promises = []
|
| 193 |
+
batch = []
|
| 194 |
+
for req_id, _ in ordered_seq:
|
| 195 |
+
request = requests[req_id]
|
| 196 |
+
structured_output_request = request.structured_output_request
|
| 197 |
+
if TYPE_CHECKING:
|
| 198 |
+
assert structured_output_request is not None
|
| 199 |
+
assert structured_output_request.grammar is not None
|
| 200 |
+
|
| 201 |
+
apply_bitmask = self.should_fill_bitmask(request)
|
| 202 |
+
batch.append((structured_output_request.grammar,
|
| 203 |
+
cumulative_index, apply_bitmask))
|
| 204 |
+
if len(batch) == self.fill_bitmask_parallel_batch_size:
|
| 205 |
+
promises.append(self._async_submit_fill_bitmask(batch))
|
| 206 |
+
batch = []
|
| 207 |
+
|
| 208 |
+
cumulative_index += 1
|
| 209 |
+
if batch:
|
| 210 |
+
promises.append(self._async_submit_fill_bitmask(batch))
|
| 211 |
+
|
| 212 |
+
# Wait for all bitmask filling tasks to complete.
|
| 213 |
+
for promise in promises:
|
| 214 |
+
promise.result()
|
| 215 |
+
else:
|
| 216 |
+
# Fallback to serial filling of bitmasks for small-batch-size cases
|
| 217 |
+
for req_id, _ in ordered_seq:
|
| 218 |
+
request = requests[req_id]
|
| 219 |
+
structured_output_request = request.structured_output_request
|
| 220 |
+
|
| 221 |
+
if TYPE_CHECKING:
|
| 222 |
+
assert structured_output_request is not None
|
| 223 |
+
assert structured_output_request.grammar is not None
|
| 224 |
+
apply_bitmask = self.should_fill_bitmask(request)
|
| 225 |
+
|
| 226 |
+
state_advancements = 0
|
| 227 |
+
req_tokens = scheduled_spec_decode_tokens.get(req_id, [])
|
| 228 |
+
for i, token in enumerate(req_tokens + [None]):
|
| 229 |
+
self._fill_bitmasks([(structured_output_request.grammar,
|
| 230 |
+
cumulative_index, apply_bitmask)])
|
| 231 |
+
|
| 232 |
+
if apply_bitmask and token is not None and \
|
| 233 |
+
not structured_output_request.grammar.is_terminated():
|
| 234 |
+
assert structured_output_request.grammar.accept_tokens(
|
| 235 |
+
req_id, [token])
|
| 236 |
+
state_advancements += 1
|
| 237 |
+
cumulative_index += 1
|
| 238 |
+
if state_advancements > 0:
|
| 239 |
+
structured_output_request.grammar.rollback(
|
| 240 |
+
state_advancements)
|
| 241 |
+
|
| 242 |
+
bitmask_tensor = self._grammar_bitmask
|
| 243 |
+
if cumulative_index < bitmask_tensor.shape[0]:
|
| 244 |
+
bitmask_tensor = bitmask_tensor[:cumulative_index]
|
| 245 |
+
|
| 246 |
+
# After finishing with the xgrammar operations, we convert to
|
| 247 |
+
# np.ndarray, because that is much more efficient for serialization
|
| 248 |
+
# and deserialization when sending this to the GPU workers.
|
| 249 |
+
return bitmask_tensor.numpy()
|
| 250 |
+
|
| 251 |
+
def should_fill_bitmask(self, request: Request) -> bool:
|
| 252 |
+
if self.reasoner is not None:
|
| 253 |
+
assert request.structured_output_request is not None
|
| 254 |
+
if request.structured_output_request.reasoning_ended is None:
|
| 255 |
+
request.structured_output_request.reasoning_ended = \
|
| 256 |
+
self.reasoner.is_reasoning_end(request.prompt_token_ids)
|
| 257 |
+
return request.structured_output_request.reasoning_ended
|
| 258 |
+
return True
|
| 259 |
+
|
| 260 |
+
def should_advance(self, request: Request) -> bool:
|
| 261 |
+
if not request.use_structured_output:
|
| 262 |
+
return False
|
| 263 |
+
|
| 264 |
+
# To determine whether we can advance the FSM.
|
| 265 |
+
# Supports thinking usage where we skip the reasoning components.
|
| 266 |
+
if TYPE_CHECKING:
|
| 267 |
+
assert request.structured_output_request is not None
|
| 268 |
+
assert request.structured_output_request.grammar is not None
|
| 269 |
+
# by default, we should always advance
|
| 270 |
+
# for cases that doesn't uses thinking mode.
|
| 271 |
+
if self.reasoner is not None:
|
| 272 |
+
structured_req = request.structured_output_request
|
| 273 |
+
|
| 274 |
+
if structured_req.reasoning_ended:
|
| 275 |
+
return True
|
| 276 |
+
|
| 277 |
+
# Check if reasoning ends in *this* step
|
| 278 |
+
if self.reasoner.is_reasoning_end(request.all_token_ids):
|
| 279 |
+
# Reasoning just ended, so we shouldn't advanced til
|
| 280 |
+
# next pass
|
| 281 |
+
structured_req.reasoning_ended = True
|
| 282 |
+
|
| 283 |
+
return False
|
| 284 |
+
else:
|
| 285 |
+
return True
|
| 286 |
+
|
| 287 |
+
def clear_backend(self) -> None:
|
| 288 |
+
if self.backend is not None:
|
| 289 |
+
self.backend.destroy()
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_guidance.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import copy
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import TYPE_CHECKING, Any, Optional, Union
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
from vllm.logger import init_logger
|
| 15 |
+
from vllm.sampling_params import SamplingParams
|
| 16 |
+
from vllm.utils import LazyLoader
|
| 17 |
+
from vllm.v1.structured_output.backend_types import (StructuredOutputBackend,
|
| 18 |
+
StructuredOutputGrammar,
|
| 19 |
+
StructuredOutputOptions)
|
| 20 |
+
from vllm.v1.structured_output.request import get_structured_output_key
|
| 21 |
+
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
import llguidance
|
| 24 |
+
import llguidance.hf as llguidance_hf
|
| 25 |
+
import llguidance.torch as llguidance_torch
|
| 26 |
+
else:
|
| 27 |
+
llguidance = LazyLoader("llguidance", globals(), "llguidance")
|
| 28 |
+
llguidance_hf = LazyLoader("llguidance.hf", globals(), "llguidance.hf")
|
| 29 |
+
llguidance_torch = LazyLoader("llguidance.torch", globals(),
|
| 30 |
+
"llguidance.torch")
|
| 31 |
+
|
| 32 |
+
logger = init_logger(__name__)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _walk_json_for_additional_properties(data: object):
|
| 36 |
+
if isinstance(data, dict):
|
| 37 |
+
for value in data.values():
|
| 38 |
+
_walk_json_for_additional_properties(value)
|
| 39 |
+
if 'additionalProperties' not in data and \
|
| 40 |
+
('properties' in data or 'patternProperties' in data):
|
| 41 |
+
data['additionalProperties'] = False
|
| 42 |
+
elif isinstance(data, list):
|
| 43 |
+
for item in data:
|
| 44 |
+
_walk_json_for_additional_properties(item)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def process_for_additional_properties(
|
| 48 |
+
guide_json: Union[str, dict[str, Any]]) -> dict[str, Any]:
|
| 49 |
+
if isinstance(guide_json, str):
|
| 50 |
+
guide_json_obj = json.loads(guide_json)
|
| 51 |
+
else:
|
| 52 |
+
# copy for modifications
|
| 53 |
+
guide_json_obj = copy.deepcopy(guide_json)
|
| 54 |
+
_walk_json_for_additional_properties(guide_json_obj)
|
| 55 |
+
return guide_json_obj
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass
|
| 59 |
+
class GuidanceBackend(StructuredOutputBackend):
|
| 60 |
+
|
| 61 |
+
def __post_init__(self):
|
| 62 |
+
self.disable_any_whitespace = \
|
| 63 |
+
self.vllm_config.decoding_config.disable_any_whitespace
|
| 64 |
+
self.disable_additional_properties = \
|
| 65 |
+
self.vllm_config.decoding_config.disable_additional_properties
|
| 66 |
+
|
| 67 |
+
self.ll_tokenizer = llguidance_hf.from_tokenizer(
|
| 68 |
+
self.tokenizer, self.vocab_size)
|
| 69 |
+
|
| 70 |
+
def compile_grammar(self, request_type: StructuredOutputOptions,
|
| 71 |
+
grammar_spec: str) -> StructuredOutputGrammar:
|
| 72 |
+
self.serialized_grammar = serialize_guidance_grammar(
|
| 73 |
+
request_type, grammar_spec, self.disable_any_whitespace,
|
| 74 |
+
self.disable_additional_properties)
|
| 75 |
+
|
| 76 |
+
ll_matcher = llguidance.LLMatcher(
|
| 77 |
+
self.ll_tokenizer,
|
| 78 |
+
self.serialized_grammar,
|
| 79 |
+
log_level=int(os.environ.get("LLGUIDANCE_LOG_LEVEL", "1")),
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
r = GuidanceGrammar(
|
| 83 |
+
ll_matcher=ll_matcher,
|
| 84 |
+
ll_tokenizer=self.ll_tokenizer,
|
| 85 |
+
vocab_size=self.vocab_size,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
r.check_error()
|
| 89 |
+
return r
|
| 90 |
+
|
| 91 |
+
def allocate_token_bitmask(self, max_num_seqs: int):
|
| 92 |
+
return llguidance_torch.allocate_token_bitmask(
|
| 93 |
+
max_num_seqs, self.ll_tokenizer.vocab_size)
|
| 94 |
+
|
| 95 |
+
def destroy(self):
|
| 96 |
+
pass
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class GuidanceGrammar(StructuredOutputGrammar):
|
| 101 |
+
ll_matcher: llguidance.LLMatcher
|
| 102 |
+
ll_tokenizer: llguidance.LLTokenizer
|
| 103 |
+
vocab_size: int
|
| 104 |
+
printed_error: bool = False
|
| 105 |
+
terminated: bool = False
|
| 106 |
+
|
| 107 |
+
def check_error(self):
|
| 108 |
+
if not self.printed_error:
|
| 109 |
+
err = self.ll_matcher.get_error()
|
| 110 |
+
if err:
|
| 111 |
+
self.printed_error = True
|
| 112 |
+
logger.warning("LLMatcher error: %s", err)
|
| 113 |
+
|
| 114 |
+
def accept_tokens(self, request_id: str, tokens: list[int]) -> bool:
|
| 115 |
+
"""Accepts a list of tokens and advances the parser.
|
| 116 |
+
|
| 117 |
+
Returns True if the parser was advanced successfully.
|
| 118 |
+
Returns False if the parser failed to advance.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
if self.ll_tokenizer.eos_token in tokens:
|
| 122 |
+
self.terminated = True
|
| 123 |
+
|
| 124 |
+
if self.ll_matcher.is_stopped():
|
| 125 |
+
return True
|
| 126 |
+
|
| 127 |
+
# TODO - Add jump decoding support in the future:
|
| 128 |
+
# self.ll_matcher.compute_ff_bytes() - this should always work
|
| 129 |
+
# self.ll_matcher.compute_ff_tokens() - this only works for
|
| 130 |
+
# "canonical" tokenizers
|
| 131 |
+
# For conversion between the two, see
|
| 132 |
+
# https://github.com/guidance-ai/llguidance/blob/main/docs/fast_forward.md
|
| 133 |
+
|
| 134 |
+
r = self.ll_matcher.consume_tokens(tokens)
|
| 135 |
+
|
| 136 |
+
self.check_error()
|
| 137 |
+
|
| 138 |
+
return r
|
| 139 |
+
|
| 140 |
+
def validate_tokens(self, tokens: list[int]) -> list[int]:
|
| 141 |
+
"""Checks if the list of tokens are accepted by the parser in sequence.
|
| 142 |
+
Will not advance the parser.
|
| 143 |
+
|
| 144 |
+
Returns the prefix list of tokens that are accepted by the parser.
|
| 145 |
+
"""
|
| 146 |
+
if len(tokens) == 0:
|
| 147 |
+
return []
|
| 148 |
+
if self.ll_matcher.is_stopped():
|
| 149 |
+
return []
|
| 150 |
+
|
| 151 |
+
num_tokens = self.ll_matcher.validate_tokens(tokens)
|
| 152 |
+
|
| 153 |
+
self.check_error()
|
| 154 |
+
|
| 155 |
+
return tokens[:num_tokens]
|
| 156 |
+
|
| 157 |
+
def rollback(self, num_tokens: int) -> None:
|
| 158 |
+
self.ll_matcher.rollback(num_tokens)
|
| 159 |
+
self.check_error()
|
| 160 |
+
|
| 161 |
+
def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None:
|
| 162 |
+
# this will automatically return [EOS] mask if the matcher is stopped
|
| 163 |
+
# or otherwise in an error state
|
| 164 |
+
llguidance_torch.fill_next_token_bitmask(self.ll_matcher, bitmask, idx)
|
| 165 |
+
self.check_error()
|
| 166 |
+
|
| 167 |
+
def is_terminated(self) -> bool:
|
| 168 |
+
return self.terminated
|
| 169 |
+
|
| 170 |
+
def reset(self):
|
| 171 |
+
# This method may be not needed anymore? TODO
|
| 172 |
+
self.ll_matcher.reset()
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def serialize_guidance_grammar(
|
| 176 |
+
request_type: StructuredOutputOptions,
|
| 177 |
+
grammar_spec: Union[str, dict[str, Any]],
|
| 178 |
+
disable_any_whitespace: bool = False,
|
| 179 |
+
disable_additional_properties: bool = False,
|
| 180 |
+
) -> str:
|
| 181 |
+
|
| 182 |
+
def _process_schema(grammar_spec: Union[str, dict[str, Any]], ) -> str:
|
| 183 |
+
if disable_additional_properties:
|
| 184 |
+
grammar_spec = process_for_additional_properties(grammar_spec)
|
| 185 |
+
return llguidance.LLMatcher.grammar_from_json_schema(
|
| 186 |
+
grammar_spec,
|
| 187 |
+
defaults={
|
| 188 |
+
"whitespace_flexible": not disable_any_whitespace,
|
| 189 |
+
})
|
| 190 |
+
|
| 191 |
+
if request_type == StructuredOutputOptions.JSON:
|
| 192 |
+
return _process_schema(grammar_spec)
|
| 193 |
+
elif request_type == StructuredOutputOptions.JSON_OBJECT:
|
| 194 |
+
return llguidance.LLMatcher.grammar_from_json_schema(
|
| 195 |
+
'{"type": "object"}',
|
| 196 |
+
defaults={
|
| 197 |
+
"whitespace_flexible": not disable_any_whitespace,
|
| 198 |
+
})
|
| 199 |
+
else:
|
| 200 |
+
if request_type == StructuredOutputOptions.REGEX:
|
| 201 |
+
tp = "regex"
|
| 202 |
+
elif request_type == StructuredOutputOptions.GRAMMAR:
|
| 203 |
+
tp = "grammar"
|
| 204 |
+
elif request_type == StructuredOutputOptions.CHOICE:
|
| 205 |
+
tp = "choice"
|
| 206 |
+
elif request_type == StructuredOutputOptions.STRUCTURAL_TAG:
|
| 207 |
+
if isinstance(grammar_spec, str):
|
| 208 |
+
s_tag = json.loads(grammar_spec)
|
| 209 |
+
else:
|
| 210 |
+
s_tag = grammar_spec
|
| 211 |
+
triggers: list[str] = s_tag["triggers"]
|
| 212 |
+
tags: list[llguidance.StructTag] = []
|
| 213 |
+
for s in s_tag["structures"]:
|
| 214 |
+
begin: str = s["begin"]
|
| 215 |
+
trig = next((t for t in triggers if begin.startswith(t)), None)
|
| 216 |
+
if trig is None:
|
| 217 |
+
raise ValueError(
|
| 218 |
+
f"Trigger {begin} not found in triggers {triggers}")
|
| 219 |
+
tags.append(
|
| 220 |
+
llguidance.StructTag(
|
| 221 |
+
trigger=trig,
|
| 222 |
+
begin=s["begin"],
|
| 223 |
+
grammar=_process_schema(s["schema"]),
|
| 224 |
+
end=s["end"],
|
| 225 |
+
))
|
| 226 |
+
if not tags:
|
| 227 |
+
raise ValueError(
|
| 228 |
+
"No structural tags found in the grammar spec.")
|
| 229 |
+
return llguidance.StructTag.to_grammar(tags)
|
| 230 |
+
else:
|
| 231 |
+
logger.error("Validation should have already occurred. "
|
| 232 |
+
"Please file an issue.")
|
| 233 |
+
raise ValueError("grammar is not of valid supported types. "
|
| 234 |
+
f"({request_type!s})")
|
| 235 |
+
return llguidance.grammar_from(tp, grammar_spec)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def validate_guidance_grammar(
|
| 239 |
+
sampling_params: SamplingParams,
|
| 240 |
+
tokenizer: Optional[llguidance.LLTokenizer] = None) -> None:
|
| 241 |
+
tp, grm = get_structured_output_key(sampling_params)
|
| 242 |
+
guidance_grm = serialize_guidance_grammar(tp, grm)
|
| 243 |
+
err = llguidance.LLMatcher.validate_grammar(guidance_grm, tokenizer)
|
| 244 |
+
if err:
|
| 245 |
+
raise ValueError(f"Grammar error: {err}")
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_outlines.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright 2025-present the Outlines developers
|
| 3 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import ast
|
| 7 |
+
import importlib
|
| 8 |
+
import json
|
| 9 |
+
import sys
|
| 10 |
+
from dataclasses import dataclass, field
|
| 11 |
+
from typing import TYPE_CHECKING
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from regex import escape as regex_escape
|
| 15 |
+
|
| 16 |
+
from vllm.sampling_params import SamplingParams
|
| 17 |
+
from vllm.utils import LazyLoader
|
| 18 |
+
from vllm.v1.structured_output.backend_types import (StructuredOutputBackend,
|
| 19 |
+
StructuredOutputGrammar,
|
| 20 |
+
StructuredOutputOptions)
|
| 21 |
+
from vllm.v1.structured_output.utils import (OutlinesVocabulary,
|
| 22 |
+
get_outlines_cache,
|
| 23 |
+
get_outlines_vocabulary)
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
import outlines_core as oc
|
| 27 |
+
import outlines_core.json_schema as json_schema
|
| 28 |
+
else:
|
| 29 |
+
oc = LazyLoader("oc", globals(), "outlines_core")
|
| 30 |
+
json_schema = LazyLoader("json_schema", globals(),
|
| 31 |
+
"outlines_core.json_schema")
|
| 32 |
+
|
| 33 |
+
# Python 3.11+ sre_parse and sre_constants
|
| 34 |
+
# are deprecated, so we must import them from re
|
| 35 |
+
if sys.version_info >= (3, 11):
|
| 36 |
+
# Hack to get around pre-commit regex module rule
|
| 37 |
+
# because going through re is the only way to get sre_parse
|
| 38 |
+
# and sre_constants in Python 3.11+
|
| 39 |
+
_re = importlib.import_module("re")
|
| 40 |
+
sre_parse = _re._parser
|
| 41 |
+
sre_constants = _re._constants
|
| 42 |
+
else:
|
| 43 |
+
import sre_constants
|
| 44 |
+
import sre_parse
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@dataclass
|
| 48 |
+
class OutlinesBackend(StructuredOutputBackend):
|
| 49 |
+
|
| 50 |
+
def __post_init__(self):
|
| 51 |
+
self.vocabulary = get_outlines_vocabulary(self.tokenizer)
|
| 52 |
+
self.cache = get_outlines_cache()
|
| 53 |
+
|
| 54 |
+
def _compile_index(self, regex_string: str,
|
| 55 |
+
vocabulary: OutlinesVocabulary) -> oc.Index:
|
| 56 |
+
cache_key = f"{vocabulary._hash}_{regex_string}"
|
| 57 |
+
if cache_key in self.cache:
|
| 58 |
+
return self.cache[cache_key]
|
| 59 |
+
|
| 60 |
+
index = oc.Index(regex_string, vocabulary.inner)
|
| 61 |
+
self.cache[cache_key] = index
|
| 62 |
+
|
| 63 |
+
return index
|
| 64 |
+
|
| 65 |
+
def compile_grammar(self, request_type: StructuredOutputOptions,
|
| 66 |
+
grammar_spec: str) -> StructuredOutputGrammar:
|
| 67 |
+
if request_type == StructuredOutputOptions.JSON:
|
| 68 |
+
regex = json_schema.build_regex_from_schema(grammar_spec)
|
| 69 |
+
elif request_type == StructuredOutputOptions.REGEX:
|
| 70 |
+
regex = grammar_spec
|
| 71 |
+
elif request_type == StructuredOutputOptions.CHOICE:
|
| 72 |
+
choices = ast.literal_eval(grammar_spec)
|
| 73 |
+
choices = [regex_escape(c) for c in choices]
|
| 74 |
+
regex = "(" + "|".join(choices) + ")"
|
| 75 |
+
else:
|
| 76 |
+
raise ValueError(
|
| 77 |
+
f"Invalid request type for Outlines backend ({request_type!s})"
|
| 78 |
+
)
|
| 79 |
+
index = self._compile_index(regex, self.vocabulary)
|
| 80 |
+
max_rollback_tokens = (
|
| 81 |
+
self.vllm_config.speculative_config.num_speculative_tokens
|
| 82 |
+
if self.vllm_config.speculative_config is not None else 0)
|
| 83 |
+
return OutlinesGrammar(vocab_size=self.vocab_size,
|
| 84 |
+
guide=oc.Guide(
|
| 85 |
+
index, max_rollback=max_rollback_tokens))
|
| 86 |
+
|
| 87 |
+
def allocate_token_bitmask(self, max_num_seqs: int) -> torch.Tensor:
|
| 88 |
+
return torch.full(
|
| 89 |
+
(max_num_seqs, (self.vocab_size + 31) // 32),
|
| 90 |
+
-1,
|
| 91 |
+
dtype=torch.int32,
|
| 92 |
+
pin_memory=torch.cuda.is_available(),
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def destroy(self):
|
| 96 |
+
pass
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class OutlinesGrammar(StructuredOutputGrammar):
|
| 101 |
+
|
| 102 |
+
vocab_size: int
|
| 103 |
+
guide: oc.Guide = field(hash=False)
|
| 104 |
+
num_processed_tokens: int = field(default_factory=lambda: 0,
|
| 105 |
+
repr=False,
|
| 106 |
+
hash=False,
|
| 107 |
+
init=False)
|
| 108 |
+
|
| 109 |
+
# outlines_core signals done on DFA accept; vLLM expects done after EOS.
|
| 110 |
+
# We delay the finished flag by one step so EOS can still be emitted.
|
| 111 |
+
_prev_finished: bool = field(default=False,
|
| 112 |
+
init=False,
|
| 113 |
+
repr=False,
|
| 114 |
+
hash=False)
|
| 115 |
+
|
| 116 |
+
def accept_tokens(self, request_id: str, tokens: list[int]) -> bool:
|
| 117 |
+
"""Accepts a list of tokens and advances the FSM.
|
| 118 |
+
|
| 119 |
+
Returns True if the FSM was advanced successfully.
|
| 120 |
+
Returns False if the FSM failed to advance.
|
| 121 |
+
"""
|
| 122 |
+
if self.guide.accepts_tokens(tokens):
|
| 123 |
+
# Advance cannot fail because we checked Guide.accepts_tokens()
|
| 124 |
+
for t in tokens:
|
| 125 |
+
self.guide.advance(t)
|
| 126 |
+
self.num_processed_tokens += 1
|
| 127 |
+
return True
|
| 128 |
+
return False
|
| 129 |
+
|
| 130 |
+
def rollback(self, num_tokens: int) -> None:
|
| 131 |
+
self.guide.rollback_state(num_tokens)
|
| 132 |
+
self.num_processed_tokens -= num_tokens
|
| 133 |
+
|
| 134 |
+
def validate_tokens(self, tokens: list[int]) -> list[int]:
|
| 135 |
+
accepted: list[int] = []
|
| 136 |
+
for tok in tokens:
|
| 137 |
+
accepted.append(tok)
|
| 138 |
+
if not self.guide.accepts_tokens(accepted):
|
| 139 |
+
accepted.pop()
|
| 140 |
+
break
|
| 141 |
+
return accepted
|
| 142 |
+
|
| 143 |
+
def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None:
|
| 144 |
+
mask = bitmask[idx]
|
| 145 |
+
self.guide.write_mask_into(mask.data_ptr(), mask.numel(),
|
| 146 |
+
mask.element_size())
|
| 147 |
+
|
| 148 |
+
def is_terminated(self) -> bool:
|
| 149 |
+
curr = self.guide.is_finished()
|
| 150 |
+
prev = self._prev_finished
|
| 151 |
+
self._prev_finished = curr
|
| 152 |
+
return prev
|
| 153 |
+
|
| 154 |
+
def reset(self):
|
| 155 |
+
self.num_processed_tokens = 0
|
| 156 |
+
self._prev_finished = False
|
| 157 |
+
self.guide.reset()
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def validate_structured_output_request_outlines(params: SamplingParams):
|
| 161 |
+
if params.guided_decoding is None:
|
| 162 |
+
return
|
| 163 |
+
|
| 164 |
+
gd_params = params.guided_decoding
|
| 165 |
+
|
| 166 |
+
if gd_params.regex:
|
| 167 |
+
validate_regex_is_buildable(gd_params.regex)
|
| 168 |
+
elif gd_params.json:
|
| 169 |
+
if isinstance(gd_params.json, str):
|
| 170 |
+
try:
|
| 171 |
+
# make sure schema is valid json
|
| 172 |
+
json.loads(gd_params.json)
|
| 173 |
+
schema = gd_params.json
|
| 174 |
+
except json.JSONDecodeError as e:
|
| 175 |
+
raise ValueError("Invalid JSON grammar specification.") from e
|
| 176 |
+
else:
|
| 177 |
+
try:
|
| 178 |
+
schema = json.dumps(gd_params.json)
|
| 179 |
+
except Exception as e:
|
| 180 |
+
raise ValueError(
|
| 181 |
+
f"Error serializing guided decoding jsonschema: {e}"
|
| 182 |
+
) from e
|
| 183 |
+
pattern = json_schema.build_regex_from_schema(schema)
|
| 184 |
+
validate_regex_is_buildable(pattern)
|
| 185 |
+
elif gd_params.choice:
|
| 186 |
+
choices = [regex_escape(str(choice)) for choice in gd_params.choice]
|
| 187 |
+
regex = "(" + "|".join(choices) + ")"
|
| 188 |
+
validate_regex_is_buildable(regex)
|
| 189 |
+
elif gd_params.grammar:
|
| 190 |
+
raise ValueError("Outlines guided decoding backend "
|
| 191 |
+
"does not support grammar specifications")
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _prefix_needs_context(parsed) -> bool:
|
| 195 |
+
"""Return True if there's a look-around/anchor before any consumer."""
|
| 196 |
+
|
| 197 |
+
def subpattern_consumes(parsed) -> bool:
|
| 198 |
+
"""Return True if subpattern can consume at least one character."""
|
| 199 |
+
tokens = parsed.data if hasattr(parsed, 'data') else parsed
|
| 200 |
+
for ttype, tval in tokens:
|
| 201 |
+
# literal, character class, or dot always consumes
|
| 202 |
+
if ttype in (sre_parse.LITERAL, sre_parse.IN, sre_parse.ANY):
|
| 203 |
+
return True
|
| 204 |
+
# quantified subpattern: check inner pattern
|
| 205 |
+
elif ttype == sre_parse.MAX_REPEAT:
|
| 206 |
+
_, mx, sub = tval
|
| 207 |
+
if mx != 0 and subpattern_consumes(sub):
|
| 208 |
+
return True
|
| 209 |
+
# alternation: if any branch consumes, the whole does
|
| 210 |
+
elif ttype == sre_parse.BRANCH:
|
| 211 |
+
_, branches = tval
|
| 212 |
+
if any(subpattern_consumes(br) for br in branches):
|
| 213 |
+
return True
|
| 214 |
+
# grouped subpattern: recurse into its contents
|
| 215 |
+
elif ttype == sre_parse.SUBPATTERN and subpattern_consumes(
|
| 216 |
+
tval[3]):
|
| 217 |
+
return True
|
| 218 |
+
# No consumers, return False
|
| 219 |
+
return False
|
| 220 |
+
|
| 221 |
+
tokens = parsed.data if hasattr(parsed, 'data') else parsed
|
| 222 |
+
for ttype, tval in tokens:
|
| 223 |
+
# Direct anchors or look-around
|
| 224 |
+
if ttype == sre_parse.AT or ttype in (sre_constants.ASSERT,
|
| 225 |
+
sre_constants.ASSERT_NOT):
|
| 226 |
+
return True
|
| 227 |
+
|
| 228 |
+
# Nested subpattern: check
|
| 229 |
+
if ttype == sre_parse.SUBPATTERN:
|
| 230 |
+
# tval: (group, add_flags, del_flags, subpattern)
|
| 231 |
+
if _prefix_needs_context(tval[3]):
|
| 232 |
+
return True
|
| 233 |
+
if subpattern_consumes(tval[3]):
|
| 234 |
+
return False
|
| 235 |
+
|
| 236 |
+
# if any branch has a prefix anchor => True,
|
| 237 |
+
# else if at least one branch consumes => prefix ends => False
|
| 238 |
+
elif ttype == sre_parse.BRANCH:
|
| 239 |
+
saw_consumer = False
|
| 240 |
+
for br in tval[1]:
|
| 241 |
+
if _prefix_needs_context(br):
|
| 242 |
+
return True
|
| 243 |
+
if subpattern_consumes(br):
|
| 244 |
+
saw_consumer = True
|
| 245 |
+
if saw_consumer:
|
| 246 |
+
return False
|
| 247 |
+
|
| 248 |
+
# Immediate consumer tokens
|
| 249 |
+
elif ttype in (sre_parse.LITERAL, sre_parse.IN, sre_parse.ANY):
|
| 250 |
+
return False
|
| 251 |
+
|
| 252 |
+
# if subpattern has anchor => True, if it can consume => stop
|
| 253 |
+
elif ttype == sre_parse.MAX_REPEAT:
|
| 254 |
+
if _prefix_needs_context(tval[2]):
|
| 255 |
+
return True
|
| 256 |
+
if subpattern_consumes(tval[2]):
|
| 257 |
+
return False
|
| 258 |
+
|
| 259 |
+
return False
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _check_unsupported(parsed) -> None:
|
| 263 |
+
"""Check for regex features unsupported by regex-automata"""
|
| 264 |
+
tokens = parsed.data if hasattr(parsed, 'data') else parsed
|
| 265 |
+
for ttype, tval in tokens:
|
| 266 |
+
|
| 267 |
+
# backreference
|
| 268 |
+
if ttype in (sre_parse.GROUPREF, sre_parse.GROUPREF_EXISTS):
|
| 269 |
+
raise ValueError("Backreferences are unsupported.")
|
| 270 |
+
|
| 271 |
+
# look-around assertion
|
| 272 |
+
elif ttype in (sre_constants.ASSERT, sre_constants.ASSERT_NOT):
|
| 273 |
+
raise ValueError("Look-Around assertion are unsupported.")
|
| 274 |
+
|
| 275 |
+
# unicode word boundaries
|
| 276 |
+
elif ttype == sre_parse.AT:
|
| 277 |
+
if tval in (sre_constants.AT_BOUNDARY,
|
| 278 |
+
sre_constants.AT_NON_BOUNDARY):
|
| 279 |
+
raise ValueError("Unicode word boundaries are unsupported.")
|
| 280 |
+
|
| 281 |
+
elif ttype == sre_parse.BRANCH:
|
| 282 |
+
# tval is (None, branches)
|
| 283 |
+
for branch in tval[1]:
|
| 284 |
+
_check_unsupported(branch)
|
| 285 |
+
|
| 286 |
+
# tval is (min, max, subpattern)
|
| 287 |
+
elif ttype == sre_parse.MAX_REPEAT:
|
| 288 |
+
_check_unsupported(tval[2])
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def validate_regex_is_buildable(pattern: str) -> None:
|
| 292 |
+
"""
|
| 293 |
+
Validates that the input regex is not using unsupported features
|
| 294 |
+
of the `regex-automata` crate (outlines_core regex engine) and has a
|
| 295 |
+
universal start state.
|
| 296 |
+
definition of universal start state used can be found at:
|
| 297 |
+
https://docs.rs/regex-automata/latest/regex_automata/dfa/trait.Automaton.html#method.universal_start_state
|
| 298 |
+
"""
|
| 299 |
+
try:
|
| 300 |
+
parsed = sre_parse.parse(pattern)
|
| 301 |
+
|
| 302 |
+
except sre_constants.error as e:
|
| 303 |
+
raise ValueError(f"Error parsing regex: {e}") from e
|
| 304 |
+
|
| 305 |
+
try:
|
| 306 |
+
_check_unsupported(parsed)
|
| 307 |
+
except ValueError as e:
|
| 308 |
+
raise ValueError(
|
| 309 |
+
f"Regex uses unsupported feature for guided decoding: {e}. "
|
| 310 |
+
"Only basic matching constructs are supported—lookarounds, "
|
| 311 |
+
"backreferences, and unicode boundaries are not.") from e
|
| 312 |
+
|
| 313 |
+
if _prefix_needs_context(parsed):
|
| 314 |
+
raise ValueError(
|
| 315 |
+
"Regex does not have a anchored universal start state"
|
| 316 |
+
"This means that the Regex uses anchors (^) or look-arounds "
|
| 317 |
+
"in a way which requires context before any token is matched."
|
| 318 |
+
"Guided decoding needs regexes that can match without needing "
|
| 319 |
+
"that context. Try rewriting the pattern without using these "
|
| 320 |
+
f"constructs. Pattern:\n{pattern}")
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_types.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import enum
|
| 7 |
+
from abc import ABC, abstractmethod
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from typing import TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
from vllm.config import VllmConfig
|
| 15 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class StructuredOutputOptions(enum.Enum):
|
| 19 |
+
JSON = enum.auto()
|
| 20 |
+
JSON_OBJECT = enum.auto()
|
| 21 |
+
REGEX = enum.auto()
|
| 22 |
+
GRAMMAR = enum.auto()
|
| 23 |
+
CHOICE = enum.auto()
|
| 24 |
+
STRUCTURAL_TAG = enum.auto()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
StructuredOutputKey = tuple[StructuredOutputOptions, str]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class StructuredOutputGrammar(ABC):
|
| 31 |
+
"""Request-level backend for structured output requests."""
|
| 32 |
+
|
| 33 |
+
@abstractmethod
|
| 34 |
+
def accept_tokens(self, request_id: str, tokens: list[int]) -> bool:
|
| 35 |
+
"""
|
| 36 |
+
Determines whether the provided tokens are accepted for the
|
| 37 |
+
given request.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
request_id (str): The unique identifier for the request.
|
| 41 |
+
tokens (list[int]): A list of token IDs to evaluate.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
bool: True if the tokens are accepted, False otherwise.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
@abstractmethod
|
| 48 |
+
def validate_tokens(self, tokens: list[int]) -> list[int]:
|
| 49 |
+
"""
|
| 50 |
+
Validates the provided tokens against the grammar.
|
| 51 |
+
Will not advance the FSM.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
tokens (list[int]): A list of token IDs to validate.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
list[int]: A list of accepted token IDs. Will be a prefix
|
| 58 |
+
of the input tokens, and empty if none are accepted.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
@abstractmethod
|
| 62 |
+
def rollback(self, num_tokens: int) -> None:
|
| 63 |
+
"""
|
| 64 |
+
Rolls back the state of the grammar by a specified number of tokens.
|
| 65 |
+
Will also revert counters for the number of processed tokens.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
num_tokens (int): The number of tokens to roll back.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
@abstractmethod
|
| 72 |
+
def fill_bitmask(self, bitmask: torch.Tensor, batch_index: int) -> None:
|
| 73 |
+
"""
|
| 74 |
+
Fills the bitmask for a specific batch index.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
bitmask (torch.Tensor): The bitmask to fill
|
| 78 |
+
batch_index (int): The index in the bitmask to fill
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
@abstractmethod
|
| 82 |
+
def is_terminated(self) -> bool:
|
| 83 |
+
"""
|
| 84 |
+
Checks whether the structured output process has terminated.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
bool: True if the process is terminated, False otherwise.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
@abstractmethod
|
| 91 |
+
def reset(self):
|
| 92 |
+
"""
|
| 93 |
+
Resets the state of the structured output grammar.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclass
|
| 98 |
+
class StructuredOutputBackend(ABC):
|
| 99 |
+
"""Engine-level backend for structured output requests."""
|
| 100 |
+
|
| 101 |
+
vllm_config: VllmConfig
|
| 102 |
+
tokenizer: AnyTokenizer
|
| 103 |
+
vocab_size: int
|
| 104 |
+
|
| 105 |
+
@abstractmethod
|
| 106 |
+
def compile_grammar(self, request_type: StructuredOutputOptions,
|
| 107 |
+
grammar_spec: str) -> StructuredOutputGrammar:
|
| 108 |
+
"""
|
| 109 |
+
Compiles a grammar specification into a structured output grammar.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
request_type (StructuredOutputOptions): The type of structured
|
| 113 |
+
output request.
|
| 114 |
+
grammar_spec (str): The grammar specification to compile.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
StructuredOutputGrammar: The compiled structured output grammar.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
@abstractmethod
|
| 121 |
+
def allocate_token_bitmask(self, max_num_seqs: int) -> torch.Tensor:
|
| 122 |
+
"""
|
| 123 |
+
Allocates a token bitmask for the specified maximum number of sequences.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
max_num_seqs (int): The maximum number of sequences for which
|
| 127 |
+
to allocate the bitmask.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
@abstractmethod
|
| 131 |
+
def destroy(self):
|
| 132 |
+
"""
|
| 133 |
+
Backend-specific cleanup.
|
| 134 |
+
"""
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/backend_xgrammar.py
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from typing import TYPE_CHECKING, Any
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
import vllm.envs
|
| 13 |
+
from vllm.logger import init_logger
|
| 14 |
+
from vllm.sampling_params import SamplingParams
|
| 15 |
+
from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer
|
| 16 |
+
from vllm.utils import LazyLoader
|
| 17 |
+
from vllm.v1.structured_output.backend_types import (StructuredOutputBackend,
|
| 18 |
+
StructuredOutputGrammar,
|
| 19 |
+
StructuredOutputOptions)
|
| 20 |
+
from vllm.v1.structured_output.utils import (choice_as_grammar,
|
| 21 |
+
convert_lark_to_ebnf,
|
| 22 |
+
grammar_is_likely_lark)
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
import xgrammar as xgr
|
| 26 |
+
else:
|
| 27 |
+
xgr = LazyLoader("xgr", globals(), "xgrammar")
|
| 28 |
+
|
| 29 |
+
logger = init_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class XgrammarBackend(StructuredOutputBackend):
|
| 34 |
+
|
| 35 |
+
def __post_init__(self):
|
| 36 |
+
self.disable_any_whitespace = \
|
| 37 |
+
self.vllm_config.decoding_config.disable_any_whitespace
|
| 38 |
+
|
| 39 |
+
if isinstance(self.tokenizer, MistralTokenizer):
|
| 40 |
+
# NOTE: ideally, xgrammar should handle this accordingly.
|
| 41 |
+
# refer to https://github.com/mlc-ai/xgrammar/blob/d77c0a0173ef14779c918e3be7966ba852f7910f/python/xgrammar/tokenizer_info.py#L98
|
| 42 |
+
try:
|
| 43 |
+
if self.tokenizer.is_tekken:
|
| 44 |
+
encoded_vocab = self.tokenizer._vocab
|
| 45 |
+
else:
|
| 46 |
+
encoded_vocab = [
|
| 47 |
+
token for token, _ in sorted(
|
| 48 |
+
self.tokenizer.get_vocab().items(),
|
| 49 |
+
key=lambda x: x[1],
|
| 50 |
+
)
|
| 51 |
+
]
|
| 52 |
+
stop_token_ids = None
|
| 53 |
+
if (hasattr(
|
| 54 |
+
self.tokenizer,
|
| 55 |
+
"eos_token_id",
|
| 56 |
+
) and self.tokenizer.eos_token_id is not None):
|
| 57 |
+
stop_token_ids = [self.tokenizer.eos_token_id]
|
| 58 |
+
except AttributeError as e:
|
| 59 |
+
raise ValueError(
|
| 60 |
+
f"Cannot get the vocabulary of the tokenizer "
|
| 61 |
+
f"{type(self.tokenizer)}. The tokenizer should have a "
|
| 62 |
+
"get_vocab method.") from e
|
| 63 |
+
tokenizer_info = xgr.TokenizerInfo( # type: ignore
|
| 64 |
+
encoded_vocab=encoded_vocab,
|
| 65 |
+
# NOTE: https://github.com/mlc-ai/xgrammar/blob/5e141f6ff1ca02bc31f9e512e68b61f2a8ae88e5/tests/python/test_tokenizer_info.py#L43 # noqa: E501
|
| 66 |
+
vocab_type=xgr.VocabType.RAW
|
| 67 |
+
if self.tokenizer.is_tekken else xgr.VocabType.BYTE_FALLBACK,
|
| 68 |
+
vocab_size=self.vocab_size,
|
| 69 |
+
stop_token_ids=stop_token_ids,
|
| 70 |
+
add_prefix_space=True,
|
| 71 |
+
)
|
| 72 |
+
else:
|
| 73 |
+
tokenizer_info = xgr.TokenizerInfo.from_huggingface(
|
| 74 |
+
self.tokenizer,
|
| 75 |
+
vocab_size=self.vocab_size,
|
| 76 |
+
)
|
| 77 |
+
self.compiler = xgr.GrammarCompiler(
|
| 78 |
+
tokenizer_info,
|
| 79 |
+
max_threads=8,
|
| 80 |
+
cache_enabled=True,
|
| 81 |
+
cache_limit_bytes=vllm.envs.VLLM_XGRAMMAR_CACHE_MB * 1024 * 1024,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
self.num_speculative_tokens = 0
|
| 85 |
+
if self.vllm_config.speculative_config is not None:
|
| 86 |
+
self.num_speculative_tokens = \
|
| 87 |
+
self.vllm_config.speculative_config.num_speculative_tokens
|
| 88 |
+
|
| 89 |
+
def compile_grammar(self, request_type: StructuredOutputOptions,
|
| 90 |
+
grammar_spec: str) -> StructuredOutputGrammar:
|
| 91 |
+
if request_type == StructuredOutputOptions.JSON:
|
| 92 |
+
ctx = self.compiler.compile_json_schema(
|
| 93 |
+
grammar_spec, any_whitespace=not self.disable_any_whitespace)
|
| 94 |
+
elif request_type == StructuredOutputOptions.JSON_OBJECT:
|
| 95 |
+
ctx = self.compiler.compile_json_schema(
|
| 96 |
+
'{"type": "object"}',
|
| 97 |
+
any_whitespace=not self.disable_any_whitespace)
|
| 98 |
+
elif request_type == StructuredOutputOptions.GRAMMAR:
|
| 99 |
+
ctx = self.compiler.compile_grammar(grammar_spec)
|
| 100 |
+
elif request_type == StructuredOutputOptions.REGEX:
|
| 101 |
+
ctx = self.compiler.compile_regex(grammar_spec)
|
| 102 |
+
elif request_type == StructuredOutputOptions.STRUCTURAL_TAG:
|
| 103 |
+
s_tag = json.loads(grammar_spec)
|
| 104 |
+
tags = [
|
| 105 |
+
xgr.StructuralTagItem(
|
| 106 |
+
begin=s["begin"],
|
| 107 |
+
schema=json.dumps(s["schema"]),
|
| 108 |
+
end=s["end"],
|
| 109 |
+
) for s in s_tag["structures"]
|
| 110 |
+
]
|
| 111 |
+
ctx = self.compiler.compile_structural_tag(tags, s_tag["triggers"])
|
| 112 |
+
else:
|
| 113 |
+
logger.error(
|
| 114 |
+
"Validation should have already occurred. Please file an issue."
|
| 115 |
+
)
|
| 116 |
+
raise ValueError(
|
| 117 |
+
f"grammar is not of valid supported types. ({request_type!s})")
|
| 118 |
+
|
| 119 |
+
return XgrammarGrammar(
|
| 120 |
+
matcher=xgr.GrammarMatcher(
|
| 121 |
+
ctx,
|
| 122 |
+
max_rollback_tokens=self.num_speculative_tokens,
|
| 123 |
+
),
|
| 124 |
+
vocab_size=self.vocab_size,
|
| 125 |
+
ctx=ctx,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def allocate_token_bitmask(self, max_num_seqs: int):
|
| 129 |
+
return xgr.allocate_token_bitmask(max_num_seqs, self.vocab_size)
|
| 130 |
+
|
| 131 |
+
def destroy(self):
|
| 132 |
+
del self.compiler
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@dataclass
|
| 136 |
+
class XgrammarGrammar(StructuredOutputGrammar):
|
| 137 |
+
# NOTE: This would be a generic-enough class for
|
| 138 |
+
# supporting different backends, in the future.
|
| 139 |
+
# For now, just xgrammar.
|
| 140 |
+
#
|
| 141 |
+
# https://xgrammar.mlc.ai/docs/api/python/index.html#xgrammar.GrammarMatcher.find_jump_forward_string
|
| 142 |
+
# for jump-forward decoding
|
| 143 |
+
|
| 144 |
+
vocab_size: int
|
| 145 |
+
matcher: xgr.GrammarMatcher = field(hash=False)
|
| 146 |
+
ctx: xgr.CompiledGrammar = field(hash=False)
|
| 147 |
+
num_processed_tokens: int = field(default_factory=lambda: 0,
|
| 148 |
+
repr=False,
|
| 149 |
+
hash=False,
|
| 150 |
+
init=False)
|
| 151 |
+
_is_terminated: bool = field(default=False, repr=False, hash=False)
|
| 152 |
+
|
| 153 |
+
def accept_tokens(self, request_id: str, tokens: list[int]) -> bool:
|
| 154 |
+
"""Accepts a list of tokens and advances the FSM.
|
| 155 |
+
|
| 156 |
+
Returns True if the FSM was advanced successfully.
|
| 157 |
+
Returns False if the FSM failed to advance.
|
| 158 |
+
"""
|
| 159 |
+
if self._is_terminated:
|
| 160 |
+
return False
|
| 161 |
+
for token in tokens:
|
| 162 |
+
if not self.matcher.accept_token(token):
|
| 163 |
+
logger.error(
|
| 164 |
+
"Failed to advance FSM for request %s "
|
| 165 |
+
"for tokens %s. Please file an issue.", request_id, token)
|
| 166 |
+
return False
|
| 167 |
+
self.num_processed_tokens += 1
|
| 168 |
+
self._is_terminated = self.matcher.is_terminated()
|
| 169 |
+
return True
|
| 170 |
+
|
| 171 |
+
def validate_tokens(self, tokens: list[int]) -> list[int]:
|
| 172 |
+
"""Checks if the list of tokens are accepted by the FSM in sequence.
|
| 173 |
+
Will not advance the FSM.
|
| 174 |
+
|
| 175 |
+
Returns the prefix list of tokens that are accepted by the FSM.
|
| 176 |
+
"""
|
| 177 |
+
accepted_tokens = []
|
| 178 |
+
for token in tokens:
|
| 179 |
+
if self.matcher.accept_token(token):
|
| 180 |
+
accepted_tokens.append(token)
|
| 181 |
+
else:
|
| 182 |
+
break
|
| 183 |
+
if len(accepted_tokens) > 0:
|
| 184 |
+
# Rollback the FSM to the initial state
|
| 185 |
+
self.matcher.rollback(len(accepted_tokens))
|
| 186 |
+
return accepted_tokens
|
| 187 |
+
|
| 188 |
+
def rollback(self, num_tokens: int) -> None:
|
| 189 |
+
self.matcher.rollback(num_tokens)
|
| 190 |
+
self.num_processed_tokens -= num_tokens
|
| 191 |
+
self._is_terminated = self.matcher.is_terminated()
|
| 192 |
+
|
| 193 |
+
def fill_bitmask(self, bitmask: torch.Tensor, idx: int) -> None:
|
| 194 |
+
self.matcher.fill_next_token_bitmask(bitmask, idx)
|
| 195 |
+
|
| 196 |
+
def is_terminated(self) -> bool:
|
| 197 |
+
return self._is_terminated
|
| 198 |
+
|
| 199 |
+
def reset(self):
|
| 200 |
+
self.num_processed_tokens = 0
|
| 201 |
+
self.matcher.reset()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def has_xgrammar_unsupported_json_features(schema: dict[str, Any]) -> bool:
|
| 205 |
+
"""Check if JSON schema contains features unsupported by xgrammar."""
|
| 206 |
+
|
| 207 |
+
def check_object(obj: dict[str, Any]) -> bool:
|
| 208 |
+
if not isinstance(obj, dict):
|
| 209 |
+
return False
|
| 210 |
+
|
| 211 |
+
# Check for numeric ranges
|
| 212 |
+
if obj.get("type") in ("integer", "number") and ("multipleOf" in obj):
|
| 213 |
+
return True
|
| 214 |
+
|
| 215 |
+
# Check for array unsupported keywords
|
| 216 |
+
if obj.get("type") == "array" and any(
|
| 217 |
+
key in obj for key in ("uniqueItems", "contains",
|
| 218 |
+
"minContains", "maxContains")):
|
| 219 |
+
return True
|
| 220 |
+
|
| 221 |
+
# Unsupported keywords for strings
|
| 222 |
+
if obj.get("type") == "string" and "format" in obj:
|
| 223 |
+
return True
|
| 224 |
+
|
| 225 |
+
# Unsupported keywords for objects
|
| 226 |
+
if obj.get("type") == "object" and any(
|
| 227 |
+
key in obj for key in ("minProperties", "maxProperties",
|
| 228 |
+
"propertyNames", "patternProperties")):
|
| 229 |
+
return True
|
| 230 |
+
|
| 231 |
+
# Recursively check all nested objects and arrays
|
| 232 |
+
for value in obj.values():
|
| 233 |
+
if isinstance(value, dict):
|
| 234 |
+
if check_object(value):
|
| 235 |
+
return True
|
| 236 |
+
elif isinstance(value, list):
|
| 237 |
+
for item in value:
|
| 238 |
+
if isinstance(item, dict) and check_object(item):
|
| 239 |
+
return True
|
| 240 |
+
|
| 241 |
+
return False
|
| 242 |
+
|
| 243 |
+
return check_object(schema)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def validate_xgrammar_grammar(sampling_params: SamplingParams) -> None:
|
| 247 |
+
"""Validate that the request is supported by structured output.
|
| 248 |
+
|
| 249 |
+
Raises ValueError if the request is not supported.
|
| 250 |
+
"""
|
| 251 |
+
if sampling_params.guided_decoding is None:
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
gd_params = sampling_params.guided_decoding
|
| 255 |
+
|
| 256 |
+
if gd_params.regex:
|
| 257 |
+
try:
|
| 258 |
+
xgr.Grammar.from_regex(gd_params.regex)
|
| 259 |
+
except Exception as err:
|
| 260 |
+
raise ValueError("Failed to transform regex into a grammar: "
|
| 261 |
+
f"{err}") from err
|
| 262 |
+
|
| 263 |
+
if gd_params.choice:
|
| 264 |
+
choice_grammar = choice_as_grammar(gd_params.choice)
|
| 265 |
+
try:
|
| 266 |
+
xgr.Grammar.from_ebnf(choice_grammar)
|
| 267 |
+
except Exception as err:
|
| 268 |
+
raise ValueError("Failed to transform choices into a grammar: "
|
| 269 |
+
"{err}") from err
|
| 270 |
+
gd_params.choice = None
|
| 271 |
+
gd_params.grammar = choice_grammar
|
| 272 |
+
return
|
| 273 |
+
|
| 274 |
+
if gd_params.json:
|
| 275 |
+
if isinstance(gd_params.json, str):
|
| 276 |
+
try:
|
| 277 |
+
schema = json.loads(gd_params.json)
|
| 278 |
+
except json.JSONDecodeError as e:
|
| 279 |
+
raise ValueError("Invalid JSON grammar specification.") from e
|
| 280 |
+
else:
|
| 281 |
+
schema = gd_params.json
|
| 282 |
+
|
| 283 |
+
try:
|
| 284 |
+
xgr.Grammar.from_json_schema(schema)
|
| 285 |
+
except Exception as err:
|
| 286 |
+
raise ValueError("Failed to transform json schema into a grammar: "
|
| 287 |
+
f"{err}") from err
|
| 288 |
+
|
| 289 |
+
if has_xgrammar_unsupported_json_features(schema):
|
| 290 |
+
raise ValueError("The provided JSON schema contains features not "
|
| 291 |
+
"supported by xgrammar.")
|
| 292 |
+
return
|
| 293 |
+
|
| 294 |
+
if gd_params.grammar:
|
| 295 |
+
if grammar_is_likely_lark(gd_params.grammar):
|
| 296 |
+
# xgrammar supports EBNF grammars only
|
| 297 |
+
try:
|
| 298 |
+
gd_params.grammar = convert_lark_to_ebnf(gd_params.grammar)
|
| 299 |
+
except ValueError as e:
|
| 300 |
+
raise ValueError(
|
| 301 |
+
"Failed to convert the grammar from Lark to EBNF. ") from e
|
| 302 |
+
|
| 303 |
+
# Test parsing EBNF grammar, possibly already converted from Lark
|
| 304 |
+
try:
|
| 305 |
+
# parse the grammar, but we aren't compiling it.
|
| 306 |
+
xgr.Grammar.from_ebnf(gd_params.grammar)
|
| 307 |
+
except Exception as e:
|
| 308 |
+
raise ValueError("Invalid grammar specification.") from e
|
| 309 |
+
return
|
| 310 |
+
|
| 311 |
+
if gd_params.structural_tag:
|
| 312 |
+
try:
|
| 313 |
+
s_tag = json.loads(gd_params.structural_tag)
|
| 314 |
+
tags = [
|
| 315 |
+
xgr.StructuralTagItem(
|
| 316 |
+
begin=s["begin"],
|
| 317 |
+
schema=json.dumps(s["schema"]),
|
| 318 |
+
end=s["end"],
|
| 319 |
+
) for s in s_tag["structures"]
|
| 320 |
+
]
|
| 321 |
+
xgr.Grammar.from_structural_tag(tags, s_tag["triggers"])
|
| 322 |
+
except Exception as e:
|
| 323 |
+
raise ValueError("Invalid structural tag specification.") from e
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/request.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import dataclasses
|
| 6 |
+
import functools
|
| 7 |
+
import json
|
| 8 |
+
from concurrent.futures import Future
|
| 9 |
+
from concurrent.futures._base import TimeoutError
|
| 10 |
+
from typing import Optional, Union, cast
|
| 11 |
+
|
| 12 |
+
from vllm.sampling_params import SamplingParams
|
| 13 |
+
from vllm.v1.structured_output.backend_types import (StructuredOutputGrammar,
|
| 14 |
+
StructuredOutputKey,
|
| 15 |
+
StructuredOutputOptions)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclasses.dataclass
|
| 19 |
+
class StructuredOutputRequest:
|
| 20 |
+
|
| 21 |
+
sampling_params: SamplingParams
|
| 22 |
+
_grammar: Optional[Union[Future[StructuredOutputGrammar],
|
| 23 |
+
StructuredOutputGrammar]] = None
|
| 24 |
+
reasoning_ended: Optional[bool] = None
|
| 25 |
+
|
| 26 |
+
def _check_grammar_completion(self) -> bool:
|
| 27 |
+
# NOTE: We have to lazy import to gate circular imports
|
| 28 |
+
from vllm.v1.request import RequestStatus
|
| 29 |
+
|
| 30 |
+
if isinstance(self._grammar, Future):
|
| 31 |
+
try:
|
| 32 |
+
# We will check whether the future is ready within 100 us
|
| 33 |
+
self._grammar = self._grammar.result(timeout=0.0001)
|
| 34 |
+
self.status = RequestStatus.WAITING
|
| 35 |
+
except TimeoutError:
|
| 36 |
+
return False
|
| 37 |
+
return True
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def is_grammar_ready(self) -> bool:
|
| 41 |
+
return self._check_grammar_completion()
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def grammar(self) -> Optional[StructuredOutputGrammar]:
|
| 45 |
+
completed = self._check_grammar_completion()
|
| 46 |
+
return cast(Optional[StructuredOutputGrammar],
|
| 47 |
+
self._grammar) if completed else None
|
| 48 |
+
|
| 49 |
+
@grammar.setter
|
| 50 |
+
def grammar(
|
| 51 |
+
self, grammar: Union[StructuredOutputGrammar,
|
| 52 |
+
Future[StructuredOutputGrammar]]
|
| 53 |
+
) -> None:
|
| 54 |
+
self._grammar = grammar
|
| 55 |
+
|
| 56 |
+
@functools.cached_property
|
| 57 |
+
def structured_output_key(self) -> StructuredOutputKey:
|
| 58 |
+
return get_structured_output_key(self.sampling_params)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_structured_output_key(
|
| 62 |
+
sampling_params: SamplingParams) -> StructuredOutputKey:
|
| 63 |
+
params = sampling_params.guided_decoding
|
| 64 |
+
assert params is not None, "params can't be None."
|
| 65 |
+
if params.json is not None:
|
| 66 |
+
if not isinstance(params.json, str):
|
| 67 |
+
json_str = json.dumps(params.json)
|
| 68 |
+
else:
|
| 69 |
+
json_str = params.json
|
| 70 |
+
return (StructuredOutputOptions.JSON, json_str)
|
| 71 |
+
elif params.json_object:
|
| 72 |
+
return (StructuredOutputOptions.JSON_OBJECT, "")
|
| 73 |
+
elif params.regex is not None:
|
| 74 |
+
return (StructuredOutputOptions.REGEX, params.regex)
|
| 75 |
+
elif params.choice is not None:
|
| 76 |
+
if not isinstance(params.choice, str):
|
| 77 |
+
json_str = json.dumps(params.choice)
|
| 78 |
+
else:
|
| 79 |
+
json_str = params.choice
|
| 80 |
+
return (StructuredOutputOptions.CHOICE, json_str)
|
| 81 |
+
elif params.grammar is not None:
|
| 82 |
+
return (StructuredOutputOptions.GRAMMAR, params.grammar)
|
| 83 |
+
elif params.structural_tag is not None:
|
| 84 |
+
return (StructuredOutputOptions.STRUCTURAL_TAG, params.structural_tag)
|
| 85 |
+
else:
|
| 86 |
+
raise ValueError("No valid structured output parameter found")
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/structured_output/utils.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import hashlib
|
| 7 |
+
import importlib.metadata
|
| 8 |
+
import os
|
| 9 |
+
from typing import TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
import regex as re
|
| 12 |
+
from cachetools import LRUCache
|
| 13 |
+
from diskcache import Cache
|
| 14 |
+
|
| 15 |
+
import vllm.envs as envs
|
| 16 |
+
from vllm.logger import init_logger
|
| 17 |
+
from vllm.utils import LazyLoader
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
import outlines_core as oc
|
| 21 |
+
import transformers.file_utils as file_utils
|
| 22 |
+
import transformers.models.gpt2.tokenization_gpt2 as tokenization_gpt2
|
| 23 |
+
|
| 24 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
| 25 |
+
else:
|
| 26 |
+
oc = LazyLoader("oc", globals(), "outlines_core")
|
| 27 |
+
file_utils = LazyLoader("file_utils", globals(), "transformers.file_utils")
|
| 28 |
+
tokenization_gpt2 = LazyLoader(
|
| 29 |
+
"tokenization_gpt2",
|
| 30 |
+
globals(),
|
| 31 |
+
"transformers.models.gpt2.tokenization_gpt2",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
logger = init_logger(__name__)
|
| 35 |
+
|
| 36 |
+
CACHE = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class OutlinesVocabulary:
|
| 40 |
+
"""
|
| 41 |
+
Wrapper class for `outlines_core.Vocabulary`,
|
| 42 |
+
which allows us to store a hash with the vocabulary
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self, vocabulary: oc.Vocabulary) -> None:
|
| 46 |
+
# Actual vocabulary object
|
| 47 |
+
self.inner = vocabulary
|
| 48 |
+
# Have to do abs(hash()) because python hashes can
|
| 49 |
+
# be negative, and we are using hash as a cache key.
|
| 50 |
+
hex_str = hashlib.sha256(
|
| 51 |
+
vocabulary.__repr__().encode('utf-8')).hexdigest()
|
| 52 |
+
hash_int = int(hex_str, 16)
|
| 53 |
+
self._hash = hash_int
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_outlines_cache_path() -> str:
|
| 57 |
+
"""Get the context object that contains previously-computed return values"""
|
| 58 |
+
outlines_cache_dir = os.getenv("OUTLINES_CACHE_DIR")
|
| 59 |
+
xdg_cache_home = os.getenv("XDG_CACHE_HOME")
|
| 60 |
+
home_dir = os.path.expanduser("~")
|
| 61 |
+
|
| 62 |
+
if outlines_cache_dir:
|
| 63 |
+
# OUTLINES_CACHE_DIR takes precedence
|
| 64 |
+
return outlines_cache_dir
|
| 65 |
+
elif xdg_cache_home:
|
| 66 |
+
return os.path.join(xdg_cache_home, ".cache", "outlines")
|
| 67 |
+
# If homedir is "/", we may be inside a container, and thus writing to
|
| 68 |
+
# root would be problematic, so we fallback to using a tempfile.
|
| 69 |
+
# Also validate the path exists, since os.path.expanduser does
|
| 70 |
+
# not garuntee existence.
|
| 71 |
+
elif os.path.isdir(home_dir) and home_dir != "/":
|
| 72 |
+
# Default Unix fallback: ~/.cache/outlines
|
| 73 |
+
return os.path.join(home_dir, ".cache", "outlines")
|
| 74 |
+
else:
|
| 75 |
+
import tempfile
|
| 76 |
+
|
| 77 |
+
# home_dir may be / inside a docker container without existing user
|
| 78 |
+
tempdir = tempfile.gettempdir()
|
| 79 |
+
return os.path.join(tempdir, ".cache", "outlines")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_outlines_cache():
|
| 83 |
+
"""Get the Cache instance to be used for index caching"""
|
| 84 |
+
|
| 85 |
+
cache_dir = get_outlines_cache_path()
|
| 86 |
+
if envs.VLLM_V1_USE_OUTLINES_CACHE:
|
| 87 |
+
logger.warning("Enabling outlines cache. This is an unbounded on-disk "
|
| 88 |
+
"cache. It may consume a lot of disk space and should "
|
| 89 |
+
"not be used with untrusted clients.")
|
| 90 |
+
cache = Cache(cache_dir, eviction_policy="none", cull_limit=0)
|
| 91 |
+
outlines_version = importlib.metadata.version("outlines_core")
|
| 92 |
+
|
| 93 |
+
cached_version = cache.get('__version__', None)
|
| 94 |
+
if cached_version != outlines_version:
|
| 95 |
+
cache.clear()
|
| 96 |
+
cache.set('__version__', outlines_version)
|
| 97 |
+
return cache
|
| 98 |
+
else:
|
| 99 |
+
return LRUCache(maxsize=128)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
re_llama_byte_token = re.compile(r"^<0x[0-9A-F]{2}>$")
|
| 103 |
+
re_replacement_seq = re.compile(r"^.{0,6}�+.{0,6}$")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _reduced_vocabulary(
|
| 107 |
+
tokenizer: AnyTokenizer,
|
| 108 |
+
eos_token_id: int,
|
| 109 |
+
) -> dict[bytes, list[int]]:
|
| 110 |
+
"""Create a map from vocabulary tokens to lists of equivalent token ids.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
A Dict of token string -> equivalent token ids
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
unicode_to_bytes = {
|
| 117 |
+
v: k
|
| 118 |
+
for k, v in tokenization_gpt2.bytes_to_unicode().items()
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
def convert_token_to_string(token: str) -> str:
|
| 122 |
+
|
| 123 |
+
string = tokenizer.convert_tokens_to_string([token])
|
| 124 |
+
|
| 125 |
+
# A hack to handle missing spaces to HF's Llama tokenizers
|
| 126 |
+
if (type(token) is str
|
| 127 |
+
and token.startswith(file_utils.SPIECE_UNDERLINE)
|
| 128 |
+
or token == "<0x20>"):
|
| 129 |
+
return " " + string
|
| 130 |
+
|
| 131 |
+
return string
|
| 132 |
+
|
| 133 |
+
vocabulary: dict[bytes, list[int]] = {}
|
| 134 |
+
empty_token_ids: list[int] = []
|
| 135 |
+
for token, token_idx in tokenizer.get_vocab().items():
|
| 136 |
+
if token in tokenizer.all_special_tokens: # type: ignore
|
| 137 |
+
continue
|
| 138 |
+
|
| 139 |
+
token_str = convert_token_to_string(token)
|
| 140 |
+
if token_str:
|
| 141 |
+
if isinstance(token, (bytes, bytearray)):
|
| 142 |
+
# For BPE tokenizers where tokens are stored as bytes.
|
| 143 |
+
|
| 144 |
+
# safe to ignore since token_str is of type (bytearray, bytes)
|
| 145 |
+
# by this point.
|
| 146 |
+
token_bytes = bytes(token_str) # type: ignore[arg-type]
|
| 147 |
+
|
| 148 |
+
elif "\ufffd" in token_str and not re_replacement_seq.match(
|
| 149 |
+
token_str):
|
| 150 |
+
# Handle tokens with invalid UTF-8 sequences.
|
| 151 |
+
if re_llama_byte_token.match(token):
|
| 152 |
+
# Llama-like tokenizers use <0xXX> for incomplete sequences.
|
| 153 |
+
token_bytes = bytes([int(token[3:5], 16)])
|
| 154 |
+
else:
|
| 155 |
+
# GPT2 tokenizers: map each byte back using unicode_to_bytes
|
| 156 |
+
byte_vals = [unicode_to_bytes.get(c) for c in token]
|
| 157 |
+
if None in byte_vals:
|
| 158 |
+
raise RuntimeError(
|
| 159 |
+
f"Cannot convert token `{token}`"
|
| 160 |
+
f" ({token_idx}) to bytes: {token_str}")
|
| 161 |
+
# safe to ignore, since if None in byte_vals,
|
| 162 |
+
# an error is thrown.
|
| 163 |
+
token_bytes = bytes(byte_vals) # type: ignore[arg-type]
|
| 164 |
+
else:
|
| 165 |
+
token_bytes = token_str.encode('utf-8')
|
| 166 |
+
|
| 167 |
+
if token_idx != eos_token_id:
|
| 168 |
+
vocabulary.setdefault(token_bytes, []).append(token_idx)
|
| 169 |
+
else:
|
| 170 |
+
empty_token_ids.append(token_idx)
|
| 171 |
+
|
| 172 |
+
return vocabulary
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def get_outlines_vocabulary(tokenizer: AnyTokenizer) -> oc.Vocabulary:
|
| 176 |
+
"""Get the `Vocabulary` object for a given tokenizer.
|
| 177 |
+
"""
|
| 178 |
+
if hasattr(tokenizer, "_outlines_vocabulary"):
|
| 179 |
+
return tokenizer._outlines_vocabulary # type: ignore
|
| 180 |
+
|
| 181 |
+
try:
|
| 182 |
+
if hasattr(
|
| 183 |
+
tokenizer,
|
| 184 |
+
"eos_token_id",
|
| 185 |
+
) and tokenizer.eos_token_id is not None:
|
| 186 |
+
eos_token_id = tokenizer.eos_token_id
|
| 187 |
+
else:
|
| 188 |
+
raise ValueError(
|
| 189 |
+
f"Error during structured outputs setup for outlines: Tokenizer ({type(tokenizer)}) has no `eos_token_id` property, but `eos_token_id` is required for structured outputs to work properly." # noqa: E501
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
reduced_vocab = _reduced_vocabulary(
|
| 193 |
+
tokenizer,
|
| 194 |
+
eos_token_id #type: ignore
|
| 195 |
+
)
|
| 196 |
+
vocabulary = OutlinesVocabulary(
|
| 197 |
+
oc.Vocabulary(eos_token_id, reduced_vocab))
|
| 198 |
+
tokenizer._outlines_vocabulary = vocabulary # type: ignore
|
| 199 |
+
|
| 200 |
+
return vocabulary
|
| 201 |
+
except AttributeError as e:
|
| 202 |
+
raise ValueError(f"Cannot get the vocabulary of the tokenizer "
|
| 203 |
+
f"({type(tokenizer)}). The tokenizer should have a "
|
| 204 |
+
"get_vocab method.") from e
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def grammar_is_likely_lark(grammar_str: str) -> bool:
|
| 208 |
+
"""
|
| 209 |
+
Check if grammar appears to use Lark syntax.
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
grammar_str: Input grammar string
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
bool: True if grammar appears to be in Lark format, False otherwise
|
| 216 |
+
|
| 217 |
+
Examples:
|
| 218 |
+
>>> grammar_is_likely_lark("rule: 'abc'")
|
| 219 |
+
True
|
| 220 |
+
>>> grammar_is_likely_lark("rule ::= 'abc'")
|
| 221 |
+
False
|
| 222 |
+
"""
|
| 223 |
+
if not grammar_str or not isinstance(grammar_str, str):
|
| 224 |
+
return False
|
| 225 |
+
|
| 226 |
+
for line in grammar_str.split('\n'):
|
| 227 |
+
# Remove both comment styles
|
| 228 |
+
line = re.sub(r'(#|//).*$', '', line).strip()
|
| 229 |
+
if not line:
|
| 230 |
+
continue
|
| 231 |
+
|
| 232 |
+
# Look for EBNF rule definition
|
| 233 |
+
if '::=' in line:
|
| 234 |
+
return False
|
| 235 |
+
|
| 236 |
+
return True
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def convert_lark_to_ebnf(grammar_str: str) -> str:
|
| 240 |
+
"""
|
| 241 |
+
Convert a Lark grammar string to EBNF format.
|
| 242 |
+
|
| 243 |
+
EBNF reference:
|
| 244 |
+
https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md
|
| 245 |
+
Lark grammar reference:
|
| 246 |
+
https://lark-parser.readthedocs.io/en/latest/grammar.html
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
grammar_str: Input grammar in Lark format
|
| 250 |
+
|
| 251 |
+
Returns:
|
| 252 |
+
str: Converted grammar in EBNF format
|
| 253 |
+
|
| 254 |
+
Examples:
|
| 255 |
+
>>> print(convert_lark_to_ebnf("rule: 'hello'"))
|
| 256 |
+
root ::= rule
|
| 257 |
+
rule ::= "hello"
|
| 258 |
+
"""
|
| 259 |
+
if not isinstance(grammar_str, str):
|
| 260 |
+
raise ValueError(f"Grammar must be a string, got {type(grammar_str)}")
|
| 261 |
+
if not grammar_str.strip():
|
| 262 |
+
raise ValueError("Grammar string cannot be empty")
|
| 263 |
+
|
| 264 |
+
defined_rules = set()
|
| 265 |
+
referenced_rules = set()
|
| 266 |
+
output_lines = []
|
| 267 |
+
|
| 268 |
+
def clean_line(line: str) -> str:
|
| 269 |
+
"""Remove comments and whitespace from line."""
|
| 270 |
+
return re.sub(r'(#|//).*$', '', line).strip()
|
| 271 |
+
|
| 272 |
+
def check_quotes(text: str, rule_name: str, line_num: int) -> None:
|
| 273 |
+
"""Validate quote matching in text."""
|
| 274 |
+
if text.count("'") % 2 != 0 or text.count('"') % 2 != 0:
|
| 275 |
+
raise ValueError(
|
| 276 |
+
f"Mismatched quotes in {rule_name} on line {line_num}")
|
| 277 |
+
|
| 278 |
+
def extract_references(text: str) -> set[str]:
|
| 279 |
+
"""Extract rule references from text."""
|
| 280 |
+
# Remove quoted strings and special characters
|
| 281 |
+
text = re.sub(r'"[^"]*"', '', text)
|
| 282 |
+
text = re.sub(r'[+*?()|\[\]{}]', ' ', text)
|
| 283 |
+
return set(re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', text))
|
| 284 |
+
|
| 285 |
+
# First pass: Find root rule and validate rule definitions
|
| 286 |
+
lines = [clean_line(line) for line in grammar_str.split('\n')]
|
| 287 |
+
first_rule = None
|
| 288 |
+
|
| 289 |
+
for line_num, line in enumerate(lines, 1):
|
| 290 |
+
if not line or line.startswith('|'):
|
| 291 |
+
continue
|
| 292 |
+
|
| 293 |
+
if ':' in line:
|
| 294 |
+
try:
|
| 295 |
+
name = line.split(':', 1)[0].strip().strip('?')
|
| 296 |
+
defined_rules.add(name)
|
| 297 |
+
if first_rule is None:
|
| 298 |
+
first_rule = name
|
| 299 |
+
if name == 'start':
|
| 300 |
+
first_rule = 'start'
|
| 301 |
+
except IndexError as e:
|
| 302 |
+
raise ValueError(f"Invalid rule format on line {line_num}. "
|
| 303 |
+
"Expected 'rule_name: definition'") from e
|
| 304 |
+
|
| 305 |
+
if not defined_rules:
|
| 306 |
+
raise ValueError("No valid rules found in grammar")
|
| 307 |
+
|
| 308 |
+
# Add root rule
|
| 309 |
+
output_lines.append(f"root ::= {first_rule}")
|
| 310 |
+
|
| 311 |
+
# Second pass: Process rule definitions and alternatives
|
| 312 |
+
current_rule = None
|
| 313 |
+
current_definition = []
|
| 314 |
+
|
| 315 |
+
for line_num, line in enumerate(lines, 1):
|
| 316 |
+
if not line:
|
| 317 |
+
continue
|
| 318 |
+
|
| 319 |
+
try:
|
| 320 |
+
if ':' in line and not line.startswith('|'):
|
| 321 |
+
# Save previous rule if exists
|
| 322 |
+
if current_rule:
|
| 323 |
+
output_lines.append(
|
| 324 |
+
f"{current_rule} ::= {' | '.join(current_definition)}")
|
| 325 |
+
|
| 326 |
+
# Process new rule
|
| 327 |
+
name, definition = line.split(':', 1)
|
| 328 |
+
current_rule = name.strip().strip('?')
|
| 329 |
+
|
| 330 |
+
check_quotes(definition, f"rule '{current_rule}'", line_num)
|
| 331 |
+
definition = re.sub(r"'([^']*)'", r'"\1"', definition)
|
| 332 |
+
referenced_rules.update(extract_references(definition))
|
| 333 |
+
current_definition = [definition.strip()]
|
| 334 |
+
|
| 335 |
+
elif line.startswith('|'):
|
| 336 |
+
if not current_rule:
|
| 337 |
+
raise ValueError(f"Alternative '|' on line {line_num} "
|
| 338 |
+
"without a preceding rule definition")
|
| 339 |
+
|
| 340 |
+
alt_def = line[1:].strip()
|
| 341 |
+
check_quotes(alt_def, f"alternative for rule '{current_rule}'",
|
| 342 |
+
line_num)
|
| 343 |
+
alt_def = re.sub(r"'([^']*)'", r'"\1"', alt_def)
|
| 344 |
+
referenced_rules.update(extract_references(alt_def))
|
| 345 |
+
current_definition.append(alt_def)
|
| 346 |
+
|
| 347 |
+
except ValueError as e:
|
| 348 |
+
raise ValueError(f"Error on line {line_num}: {str(e)}") from e
|
| 349 |
+
|
| 350 |
+
# Add final rule if exists
|
| 351 |
+
if current_rule:
|
| 352 |
+
output_lines.append(
|
| 353 |
+
f"{current_rule} ::= {' | '.join(current_definition)}")
|
| 354 |
+
|
| 355 |
+
# Validate all rules are defined
|
| 356 |
+
undefined_rules = referenced_rules - defined_rules - {'root'}
|
| 357 |
+
if undefined_rules:
|
| 358 |
+
raise ValueError("Referenced rules are not defined: "
|
| 359 |
+
f"{', '.join(sorted(undefined_rules))}")
|
| 360 |
+
|
| 361 |
+
return '\n'.join(output_lines)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def choice_as_grammar(choice: list[str]) -> str:
|
| 365 |
+
|
| 366 |
+
def escape_ebnf_string(s: str) -> str:
|
| 367 |
+
"""Escape special characters in a EBNF string."""
|
| 368 |
+
# Escape double quotes and backslashes
|
| 369 |
+
return re.sub(r'(["\\])', r'\\\1', s)
|
| 370 |
+
|
| 371 |
+
escaped_choices = (escape_ebnf_string(c) for c in choice)
|
| 372 |
+
grammar = ('root ::= ' + ' | '.join(f'"{c}"' for c in escaped_choices))
|
| 373 |
+
return grammar
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/block_table.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from vllm.logger import init_logger
|
| 8 |
+
from vllm.utils import cdiv
|
| 9 |
+
|
| 10 |
+
logger = init_logger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BlockTable:
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
block_size: int,
|
| 18 |
+
max_num_reqs: int,
|
| 19 |
+
max_num_blocks_per_req: int,
|
| 20 |
+
max_num_batched_tokens: int,
|
| 21 |
+
pin_memory: bool,
|
| 22 |
+
device: torch.device,
|
| 23 |
+
):
|
| 24 |
+
self.block_size = block_size
|
| 25 |
+
self.max_num_reqs = max_num_reqs
|
| 26 |
+
self.max_num_blocks_per_req = max_num_blocks_per_req
|
| 27 |
+
self.max_num_batched_tokens = max_num_batched_tokens
|
| 28 |
+
self.pin_memory = pin_memory
|
| 29 |
+
self.device = device
|
| 30 |
+
|
| 31 |
+
self.block_table = torch.zeros(
|
| 32 |
+
(max_num_reqs, max_num_blocks_per_req),
|
| 33 |
+
device=self.device,
|
| 34 |
+
dtype=torch.int32,
|
| 35 |
+
)
|
| 36 |
+
self.block_table_cpu = torch.zeros(
|
| 37 |
+
(max_num_reqs, max_num_blocks_per_req),
|
| 38 |
+
device="cpu",
|
| 39 |
+
dtype=torch.int32,
|
| 40 |
+
pin_memory=pin_memory,
|
| 41 |
+
)
|
| 42 |
+
self.block_table_np = self.block_table_cpu.numpy()
|
| 43 |
+
self.num_blocks_per_row = np.zeros(max_num_reqs, dtype=np.int32)
|
| 44 |
+
|
| 45 |
+
self.slot_mapping_cpu = torch.zeros(self.max_num_batched_tokens,
|
| 46 |
+
dtype=torch.int64,
|
| 47 |
+
device="cpu",
|
| 48 |
+
pin_memory=self.pin_memory)
|
| 49 |
+
self.slot_mapping_np = self.slot_mapping_cpu.numpy()
|
| 50 |
+
self.slot_mapping = torch.zeros(self.max_num_batched_tokens,
|
| 51 |
+
dtype=torch.int64,
|
| 52 |
+
device=self.device)
|
| 53 |
+
|
| 54 |
+
def append_row(
|
| 55 |
+
self,
|
| 56 |
+
block_ids: list[int],
|
| 57 |
+
row_idx: int,
|
| 58 |
+
) -> None:
|
| 59 |
+
if not block_ids:
|
| 60 |
+
return
|
| 61 |
+
num_blocks = len(block_ids)
|
| 62 |
+
start = self.num_blocks_per_row[row_idx]
|
| 63 |
+
self.num_blocks_per_row[row_idx] += num_blocks
|
| 64 |
+
self.block_table_np[row_idx, start:start + num_blocks] = block_ids
|
| 65 |
+
|
| 66 |
+
def add_row(self, block_ids: list[int], row_idx: int) -> None:
|
| 67 |
+
self.num_blocks_per_row[row_idx] = 0
|
| 68 |
+
self.append_row(block_ids, row_idx)
|
| 69 |
+
|
| 70 |
+
def move_row(self, src: int, tgt: int) -> None:
|
| 71 |
+
num_blocks = self.num_blocks_per_row[src]
|
| 72 |
+
self.block_table_np[tgt, :num_blocks] = self.block_table_np[
|
| 73 |
+
src, :num_blocks]
|
| 74 |
+
self.num_blocks_per_row[tgt] = num_blocks
|
| 75 |
+
|
| 76 |
+
def swap_row(self, src: int, tgt: int) -> None:
|
| 77 |
+
num_blocks_src = self.num_blocks_per_row[src]
|
| 78 |
+
num_blocks_tgt = self.num_blocks_per_row[tgt]
|
| 79 |
+
self.num_blocks_per_row[src] = num_blocks_tgt
|
| 80 |
+
self.num_blocks_per_row[tgt] = num_blocks_src
|
| 81 |
+
|
| 82 |
+
self.block_table_np[[src, tgt]] = self.block_table_np[[tgt, src]]
|
| 83 |
+
|
| 84 |
+
def compute_slot_mapping(self, req_indices: np.ndarray,
|
| 85 |
+
positions: np.ndarray) -> None:
|
| 86 |
+
# E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
|
| 87 |
+
# -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
|
| 88 |
+
# where K is the max_num_blocks_per_req and the block size is 2.
|
| 89 |
+
# NOTE(woosuk): We can't simply use `token_indices // block_size`
|
| 90 |
+
# here because M (max_model_len) is not necessarily divisible by
|
| 91 |
+
# block_size.
|
| 92 |
+
block_table_indices = (req_indices * self.max_num_blocks_per_req +
|
| 93 |
+
positions // self.block_size)
|
| 94 |
+
block_numbers = self.block_table_np.ravel()[block_table_indices]
|
| 95 |
+
block_offsets = positions % self.block_size
|
| 96 |
+
np.add(block_numbers * self.block_size,
|
| 97 |
+
block_offsets,
|
| 98 |
+
out=self.slot_mapping_np[:req_indices.shape[0]])
|
| 99 |
+
|
| 100 |
+
def commit_block_table(self, num_reqs: int) -> None:
|
| 101 |
+
self.block_table[:num_reqs].copy_(self.block_table_cpu[:num_reqs],
|
| 102 |
+
non_blocking=True)
|
| 103 |
+
|
| 104 |
+
def commit_slot_mapping(self, num_tokens: int) -> None:
|
| 105 |
+
self.slot_mapping[:num_tokens].copy_(
|
| 106 |
+
self.slot_mapping_cpu[:num_tokens], non_blocking=True)
|
| 107 |
+
|
| 108 |
+
def clear(self) -> None:
|
| 109 |
+
self.block_table.fill_(0)
|
| 110 |
+
self.block_table_cpu.fill_(0)
|
| 111 |
+
|
| 112 |
+
def get_device_tensor(self) -> torch.Tensor:
|
| 113 |
+
"""Ruturns the device tensor of the block table."""
|
| 114 |
+
return self.block_table
|
| 115 |
+
|
| 116 |
+
def get_cpu_tensor(self) -> torch.Tensor:
|
| 117 |
+
"""Returns the CPU tensor of the block table."""
|
| 118 |
+
return self.block_table_cpu
|
| 119 |
+
|
| 120 |
+
def get_numpy_array(self) -> np.ndarray:
|
| 121 |
+
"""Returns the numpy array of the block table."""
|
| 122 |
+
return self.block_table_np
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class MultiGroupBlockTable:
|
| 126 |
+
"""The BlockTables for each KV cache group."""
|
| 127 |
+
|
| 128 |
+
def __init__(self, max_num_reqs: int, max_model_len: int,
|
| 129 |
+
max_num_batched_tokens: int, pin_memory: bool,
|
| 130 |
+
device: torch.device, block_sizes: list[int]) -> None:
|
| 131 |
+
self.block_tables = [
|
| 132 |
+
BlockTable(block_size, max_num_reqs, cdiv(max_model_len,
|
| 133 |
+
block_size),
|
| 134 |
+
max_num_batched_tokens, pin_memory, device)
|
| 135 |
+
for block_size in block_sizes
|
| 136 |
+
]
|
| 137 |
+
|
| 138 |
+
def append_row(self, block_ids: tuple[list[int], ...],
|
| 139 |
+
row_idx: int) -> None:
|
| 140 |
+
for i, block_table in enumerate(self.block_tables):
|
| 141 |
+
block_table.append_row(block_ids[i], row_idx)
|
| 142 |
+
|
| 143 |
+
def add_row(self, block_ids: tuple[list[int], ...], row_idx: int) -> None:
|
| 144 |
+
for i, block_table in enumerate(self.block_tables):
|
| 145 |
+
block_table.add_row(block_ids[i], row_idx)
|
| 146 |
+
|
| 147 |
+
def move_row(self, src: int, tgt: int) -> None:
|
| 148 |
+
for block_table in self.block_tables:
|
| 149 |
+
block_table.move_row(src, tgt)
|
| 150 |
+
|
| 151 |
+
def swap_row(self, src: int, tgt: int) -> None:
|
| 152 |
+
for block_table in self.block_tables:
|
| 153 |
+
block_table.swap_row(src, tgt)
|
| 154 |
+
|
| 155 |
+
def compute_slot_mapping(self, req_indices: np.ndarray,
|
| 156 |
+
positions: np.ndarray) -> None:
|
| 157 |
+
for block_table in self.block_tables:
|
| 158 |
+
block_table.compute_slot_mapping(req_indices, positions)
|
| 159 |
+
|
| 160 |
+
def commit_block_table(self, num_reqs: int) -> None:
|
| 161 |
+
for block_table in self.block_tables:
|
| 162 |
+
block_table.commit_block_table(num_reqs)
|
| 163 |
+
|
| 164 |
+
def commit_slot_mapping(self, num_tokens: int) -> None:
|
| 165 |
+
for block_table in self.block_tables:
|
| 166 |
+
block_table.commit_slot_mapping(num_tokens)
|
| 167 |
+
|
| 168 |
+
def clear(self) -> None:
|
| 169 |
+
for block_table in self.block_tables:
|
| 170 |
+
block_table.clear()
|
| 171 |
+
|
| 172 |
+
def __getitem__(self, idx: int) -> "BlockTable":
|
| 173 |
+
"""Returns the BlockTable for the i-th KV cache group."""
|
| 174 |
+
return self.block_tables[idx]
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/cpu_model_runner.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from typing import TYPE_CHECKING, Any
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
|
| 9 |
+
from vllm.config import VllmConfig
|
| 10 |
+
from vllm.logger import init_logger
|
| 11 |
+
from vllm.model_executor.model_loader import get_model
|
| 12 |
+
from vllm.v1.attention.backends.cpu_attn import TorchSDPAMetadataBuilderV1
|
| 13 |
+
from vllm.v1.worker.gpu_model_runner import GPUModelRunner
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from vllm.v1.core.sched.output import SchedulerOutput
|
| 17 |
+
|
| 18 |
+
logger = init_logger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CPUModelRunner(GPUModelRunner):
|
| 22 |
+
|
| 23 |
+
def __init__(self, vllm_config: VllmConfig, device: torch.device):
|
| 24 |
+
super().__init__(vllm_config, device)
|
| 25 |
+
|
| 26 |
+
assert device == torch.device("cpu")
|
| 27 |
+
assert self.speculative_config is None, "spec decode is not supported."
|
| 28 |
+
|
| 29 |
+
self.use_cuda_graph = False
|
| 30 |
+
self.cascade_attn_enabled = False
|
| 31 |
+
|
| 32 |
+
self._postprocess_tensors()
|
| 33 |
+
|
| 34 |
+
def _may_reorder_batch(self, scheduler_output: "SchedulerOutput") -> None:
|
| 35 |
+
"""
|
| 36 |
+
Update the order of requests in the batch based on the attention
|
| 37 |
+
backend's needs. For example, some attention backends (namely MLA) may
|
| 38 |
+
want to separate requests based on if the attention computation will be
|
| 39 |
+
compute-bound or memory-bound.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
scheduler_output: The scheduler output.
|
| 43 |
+
"""
|
| 44 |
+
# Attention free models have zero kv_cache_goups, however models
|
| 45 |
+
# like Mamba are also attention free but use the kv_cache for
|
| 46 |
+
# keeping its internal state. This is why we check the number
|
| 47 |
+
# of kv_cache groups instead of solely checking
|
| 48 |
+
# for self.model_config.is_attention_free.
|
| 49 |
+
if len(self.kv_cache_config.kv_cache_groups) == 0:
|
| 50 |
+
return
|
| 51 |
+
|
| 52 |
+
if len(self.kv_cache_config.kv_cache_groups) > 1:
|
| 53 |
+
raise ValueError("Multiple KVCacheGroups is not"
|
| 54 |
+
"currently supported with CPU model runner.")
|
| 55 |
+
|
| 56 |
+
assert type(self.attn_groups[0]
|
| 57 |
+
[0].metadata_builder) is TorchSDPAMetadataBuilderV1
|
| 58 |
+
|
| 59 |
+
self.attn_groups[0][0].metadata_builder.reorder_batch(
|
| 60 |
+
self.input_batch, scheduler_output)
|
| 61 |
+
|
| 62 |
+
def _postprocess_tensors(self) -> None:
|
| 63 |
+
# Note: replace device tensors with cpu tensors
|
| 64 |
+
def replace_tensor(obj: Any, cpu_attr_name: str,
|
| 65 |
+
device_attr_name) -> None:
|
| 66 |
+
cpu_tensor = getattr(obj, cpu_attr_name, None)
|
| 67 |
+
device_tensor = getattr(obj, device_attr_name, None)
|
| 68 |
+
if cpu_tensor is not None and device_tensor is not None:
|
| 69 |
+
assert isinstance(cpu_tensor, torch.Tensor)
|
| 70 |
+
assert isinstance(device_tensor, torch.Tensor)
|
| 71 |
+
setattr(obj, device_attr_name, cpu_tensor)
|
| 72 |
+
|
| 73 |
+
for k, v in vars(self).items():
|
| 74 |
+
if k.endswith("_cpu") and isinstance(v, torch.Tensor):
|
| 75 |
+
replace_tensor(self, k, k[:-4])
|
| 76 |
+
|
| 77 |
+
for k, v in vars(self.input_batch).items():
|
| 78 |
+
if k.endswith("_cpu_tensor") and isinstance(v, torch.Tensor):
|
| 79 |
+
replace_tensor(self.input_batch, k, k[:-11])
|
| 80 |
+
|
| 81 |
+
for block_table in self.input_batch.block_table.block_tables:
|
| 82 |
+
for k, v in vars(block_table).items():
|
| 83 |
+
if k.endswith("_cpu") and isinstance(v, torch.Tensor):
|
| 84 |
+
replace_tensor(block_table, k, k[:-4])
|
| 85 |
+
|
| 86 |
+
def load_model(self, eep_scale_up: bool = False) -> None:
|
| 87 |
+
logger.info("Starting to load model %s...", self.model_config.model)
|
| 88 |
+
self.model = get_model(vllm_config=self.vllm_config)
|
| 89 |
+
|
| 90 |
+
if self.lora_config:
|
| 91 |
+
self.model = self.load_lora_model(self.model, self.model_config,
|
| 92 |
+
self.scheduler_config,
|
| 93 |
+
self.lora_config, self.device)
|
| 94 |
+
|
| 95 |
+
def get_model(self) -> nn.Module:
|
| 96 |
+
return self.model
|
| 97 |
+
|
| 98 |
+
def warming_up_model(self) -> None:
|
| 99 |
+
logger.info("Warming up model for the compilation...")
|
| 100 |
+
# Only generate graph for the generic shape
|
| 101 |
+
with _set_global_compilation_settings(self.vllm_config):
|
| 102 |
+
self._dummy_run(max(16, self.max_num_reqs))
|
| 103 |
+
logger.info("Warming up done.")
|
| 104 |
+
|
| 105 |
+
def _init_device_properties(self) -> None:
|
| 106 |
+
pass
|
| 107 |
+
|
| 108 |
+
def _sync_device(self) -> None:
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@contextmanager
|
| 113 |
+
def _set_global_compilation_settings(config: VllmConfig):
|
| 114 |
+
import torch._inductor.config
|
| 115 |
+
|
| 116 |
+
inductor_config = config.compilation_config.inductor_compile_config
|
| 117 |
+
try:
|
| 118 |
+
# Note: The MKLDNN and CPPGEMM backend requires freezing parameters.
|
| 119 |
+
freezing_value = torch._inductor.config.freezing
|
| 120 |
+
if inductor_config.get("max_autotune", False):
|
| 121 |
+
torch._inductor.config.freezing = True
|
| 122 |
+
yield
|
| 123 |
+
finally:
|
| 124 |
+
torch._inductor.config.freezing = freezing_value
|