ADAPT-Chase commited on
Commit
8b6682c
·
verified ·
1 Parent(s): 32b73de

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/__init__.cpython-312.pyc +0 -0
  2. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/metadata.cpython-312.pyc +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/rejection_sampler.cpython-312.pyc +0 -0
  4. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/sampler.cpython-312.pyc +0 -0
  5. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/__init__.cpython-312.pyc +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/builtin.cpython-312.pyc +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/interface.cpython-312.pyc +0 -0
  8. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/state.cpython-312.pyc +0 -0
  9. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/__init__.cpython-312.pyc +0 -0
  10. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/bad_words.cpython-312.pyc +0 -0
  11. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/logprobs.cpython-312.pyc +0 -0
  12. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/penalties.cpython-312.pyc +0 -0
  13. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/topk_topp_sampler.cpython-312.pyc +0 -0
  14. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/__init__.cpython-312.pyc +0 -0
  15. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/metadata.cpython-312.pyc +0 -0
  16. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/sampler.cpython-312.pyc +0 -0
  17. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/metadata.py +124 -0
  18. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/sampler.py +146 -0
  19. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__pycache__/__init__.cpython-312.pyc +0 -0
  20. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__pycache__/eagle.cpython-312.pyc +0 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (184 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/metadata.cpython-312.pyc ADDED
Binary file (1.52 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/rejection_sampler.cpython-312.pyc ADDED
Binary file (20.8 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/__pycache__/sampler.cpython-312.pyc ADDED
Binary file (8.76 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (7.19 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/builtin.cpython-312.pyc ADDED
Binary file (13.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/interface.cpython-312.pyc ADDED
Binary file (3.13 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/logits_processor/__pycache__/state.cpython-312.pyc ADDED
Binary file (7 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (188 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/bad_words.cpython-312.pyc ADDED
Binary file (1.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/logprobs.cpython-312.pyc ADDED
Binary file (1.33 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/penalties.cpython-312.pyc ADDED
Binary file (1.81 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/ops/__pycache__/topk_topp_sampler.cpython-312.pyc ADDED
Binary file (12.9 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (188 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/metadata.cpython-312.pyc ADDED
Binary file (5.16 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/__pycache__/sampler.cpython-312.pyc ADDED
Binary file (6.05 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/metadata.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
5
+
6
+ import torch
7
+
8
+ from vllm.v1.worker.tpu_input_batch import InputBatch
9
+
10
+ DEFAULT_SAMPLING_PARAMS = dict(
11
+ temperature=-1.0,
12
+ min_p=0.0,
13
+ # strictly disabled for now
14
+ top_k=0,
15
+ top_p=1.0,
16
+ # frequency_penalties=0.0,
17
+ # presence_penalties=0.0,
18
+ # repetition_penalties=0.0,
19
+ )
20
+
21
+
22
+ @dataclass
23
+ class TPUSupportedSamplingMetadata:
24
+ # This class exposes a more xla-friendly interface than SamplingMetadata
25
+ # on TPU, in particular all arguments should be traceable and no optionals
26
+ # are allowed, to avoid graph recompilation on Nones.
27
+ temperature: torch.Tensor = None
28
+
29
+ min_p: torch.Tensor = None
30
+ top_k: torch.Tensor = None
31
+ top_p: torch.Tensor = None
32
+
33
+ all_greedy: bool = True
34
+
35
+ # Whether logprobs are to be gathered in this batch of request. To balance
36
+ # out compile time and runtime, a fixed `max_number_logprobs` value is used
37
+ # when gathering logprobs, regardless of the values specified in the batch.
38
+ logprobs: bool = False
39
+
40
+ # TODO No penalties for now
41
+ no_penalties: bool = True
42
+ prompt_token_ids = None
43
+ frequency_penalties = None
44
+ presence_penalties = None
45
+ repetition_penalties = None
46
+ # should use tensor
47
+ output_token_ids: list[list[int]] = field(default_factory=lambda: list())
48
+
49
+ min_tokens = None # impl is not vectorized
50
+
51
+ logit_bias: list[Optional[dict[int, float]]] = field(
52
+ default_factory=lambda: list())
53
+
54
+ allowed_token_ids_mask = None
55
+ bad_words_token_ids = None
56
+
57
+ # Generator not supported by xla
58
+ _generators: dict[int,
59
+ torch.Generator] = field(default_factory=lambda: dict())
60
+
61
+ @property
62
+ def generators(self) -> dict[int, torch.Generator]:
63
+ # Generator not supported by torch/xla. This field must be immutable.
64
+ return self._generators
65
+
66
+ @classmethod
67
+ def from_input_batch(
68
+ cls,
69
+ input_batch: InputBatch,
70
+ padded_num_reqs: int,
71
+ xla_device: torch.device,
72
+ generate_params_if_all_greedy: bool = False
73
+ ) -> "TPUSupportedSamplingMetadata":
74
+ """
75
+ Copy sampling tensors slices from `input_batch` to on device tensors.
76
+
77
+ `InputBatch._make_sampling_metadata` causes recompilation on XLA as it
78
+ slices dynamic shapes on device tensors. This impl moves the dynamic
79
+ ops to CPU and produces tensors of fixed `padded_num_reqs` size.
80
+
81
+ Args:
82
+ input_batch: The input batch containing sampling parameters.
83
+ padded_num_reqs: The padded number of requests.
84
+ xla_device: The XLA device.
85
+ generate_params_if_all_greedy: If True, generate sampling parameters
86
+ even if all requests are greedy. this is useful for cases where
87
+ we want to pre-compile a graph with sampling parameters, even if
88
+ they are not strictly needed for greedy decoding.
89
+ """
90
+ needs_logprobs = input_batch.max_num_logprobs>0 if \
91
+ input_batch.max_num_logprobs else False
92
+ # Early return to avoid unnecessary cpu to tpu copy
93
+ if (input_batch.all_greedy is True
94
+ and generate_params_if_all_greedy is False):
95
+ return cls(all_greedy=True, logprobs=needs_logprobs)
96
+
97
+ num_reqs = input_batch.num_reqs
98
+
99
+ def fill_slice(cpu_tensor: torch.Tensor, fill_val) -> torch.Tensor:
100
+ # Pad value is the default one.
101
+ cpu_tensor[num_reqs:padded_num_reqs] = fill_val
102
+
103
+ fill_slice(input_batch.temperature_cpu_tensor,
104
+ DEFAULT_SAMPLING_PARAMS["temperature"])
105
+ fill_slice(input_batch.min_p_cpu_tensor,
106
+ DEFAULT_SAMPLING_PARAMS["min_p"])
107
+ fill_slice(input_batch.top_k_cpu_tensor,
108
+ DEFAULT_SAMPLING_PARAMS["top_k"])
109
+ fill_slice(input_batch.top_p_cpu_tensor,
110
+ DEFAULT_SAMPLING_PARAMS["top_p"])
111
+
112
+ # Slice persistent device tensors to a fixed pre-compiled padded shape.
113
+ return cls(
114
+ temperature=input_batch.temperature_cpu_tensor[:padded_num_reqs].
115
+ to(xla_device),
116
+ all_greedy=input_batch.all_greedy,
117
+ # TODO enable more and avoid returning None values
118
+ top_p=input_batch.top_p_cpu_tensor[:padded_num_reqs].to(
119
+ xla_device),
120
+ top_k=input_batch.top_k_cpu_tensor[:padded_num_reqs].to(
121
+ xla_device),
122
+ min_p=input_batch.min_p_cpu_tensor[:padded_num_reqs].to(
123
+ xla_device),
124
+ logprobs=needs_logprobs)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/sample/tpu/sampler.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Sampler layer implementing TPU supported operations."""
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from vllm.v1.outputs import LogprobsTensors, SamplerOutput
9
+ from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler
10
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
11
+
12
+ _SAMPLING_EPS = 1e-5
13
+
14
+
15
+ class Sampler(nn.Module):
16
+
17
+ def __init__(self):
18
+ # TODO(houseroad): Add support for logprobs_mode.
19
+ super().__init__()
20
+ self.topk_topp_sampler = TopKTopPSampler()
21
+
22
+ def forward(
23
+ self,
24
+ logits: torch.Tensor,
25
+ sampling_metadata: TPUSupportedSamplingMetadata,
26
+ ) -> SamplerOutput:
27
+ # Use float32 for the logits.
28
+ logits = logits.to(torch.float32)
29
+ # Sample the next token.
30
+ sampled = self.sample(logits, sampling_metadata)
31
+
32
+ # These are TPU tensors.
33
+ sampler_output = SamplerOutput(
34
+ # The sampled tokens are expanded to 2D tensor with shape
35
+ # [num_requests, 1], where each row represents one generated
36
+ # token per request.
37
+ sampled_token_ids=sampled.unsqueeze(-1),
38
+ logprobs_tensors=None)
39
+ return sampler_output
40
+
41
+ def apply_temperature(
42
+ self,
43
+ logits: torch.Tensor,
44
+ temp: torch.Tensor,
45
+ ) -> torch.Tensor:
46
+ return logits.div_(temp.unsqueeze(dim=1))
47
+
48
+ def greedy_sample(self, logits: torch.Tensor) -> torch.Tensor:
49
+ return logits.argmax(dim=-1).view(-1)
50
+
51
+ def sample(
52
+ self,
53
+ logits: torch.Tensor,
54
+ sampling_metadata: TPUSupportedSamplingMetadata,
55
+ ) -> torch.Tensor:
56
+ greedy_sampled = self.greedy_sample(logits)
57
+
58
+ assert sampling_metadata.temperature is not None
59
+
60
+ # Apply temperature.
61
+ logits = self.apply_temperature(logits, sampling_metadata.temperature)
62
+
63
+ # Apply min_p.
64
+ if sampling_metadata.min_p is not None:
65
+ logits = self.apply_min_p(logits, sampling_metadata.min_p)
66
+
67
+ # Apply top_k and/or top_p.
68
+ random_sampled = self.topk_topp_sampler(
69
+ logits,
70
+ sampling_metadata.generators,
71
+ sampling_metadata.top_k,
72
+ sampling_metadata.top_p,
73
+ )
74
+
75
+ sampled = torch.where(sampling_metadata.temperature < _SAMPLING_EPS,
76
+ greedy_sampled, random_sampled)
77
+ return sampled
78
+
79
+ def compute_logprobs(self, logits: torch.Tensor) -> torch.Tensor:
80
+ return logits.log_softmax(dim=-1, dtype=torch.float32)
81
+
82
+ def gather_logprobs(
83
+ self,
84
+ logprobs: torch.Tensor,
85
+ num_logprobs: int,
86
+ token_ids: torch.Tensor,
87
+ ) -> LogprobsTensors:
88
+ """
89
+ Gather logprobs for topk and sampled/prompt token.
90
+
91
+ Args:
92
+ logits: (num tokens) x (vocab) tensor
93
+ num_logprobs: minimum number of logprobs to
94
+ retain per token
95
+ token_ids: prompt tokens (if prompt logprobs)
96
+ or sampled tokens (if sampled
97
+ logprobs); 1D token ID tensor
98
+ with (num tokens) elements
99
+
100
+ Returns:
101
+ Top-k int indices tensor, (num tokens) x (num_logprobs + 1)
102
+ Top-k float logprobs tensor, (num tokens) x (num_logprobs + 1)
103
+ Sampled token rank tensor, (num tokens)
104
+ """
105
+ # Find the topK values.
106
+ topk_logprobs, topk_indices = torch.topk(logprobs,
107
+ num_logprobs,
108
+ dim=-1)
109
+
110
+ # Get with the logprob of the prompt or sampled token.
111
+ token_ids = token_ids.unsqueeze(-1)
112
+ token_logprobs = logprobs.gather(-1, token_ids)
113
+
114
+ # Compute the ranks of the actual token.
115
+ token_ranks = (logprobs >= token_logprobs).sum(-1)
116
+
117
+ # Concatenate together with the topk.
118
+ indices = torch.cat((token_ids, topk_indices), dim=1)
119
+ logprobs = torch.cat((token_logprobs, topk_logprobs), dim=1)
120
+
121
+ # Use int32 to reduce the tensor size.
122
+ indices = indices.to(torch.int32)
123
+
124
+ return LogprobsTensors(indices, logprobs, token_ranks)
125
+
126
+ def apply_min_p(
127
+ self,
128
+ logits: torch.Tensor,
129
+ min_p: torch.Tensor,
130
+ ) -> torch.Tensor:
131
+ """
132
+ Filters logits using adaptive probability thresholding.
133
+ """
134
+ # Convert logits to probability distribution
135
+ probability_values = torch.nn.functional.softmax(logits, dim=-1)
136
+ # Calculate maximum probabilities per sequence
137
+ max_probabilities = torch.amax(probability_values,
138
+ dim=-1,
139
+ keepdim=True)
140
+ # Reshape min_p for broadcasting
141
+ adjusted_min_p = min_p.unsqueeze(1) * max_probabilities
142
+ # Identify valid tokens using threshold comparison
143
+ valid_token_mask = probability_values >= adjusted_min_p
144
+ # Apply mask using boolean indexing (xla friendly)
145
+ logits.masked_fill_(~valid_token_mask, -float("inf"))
146
+ return logits
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (189 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/spec_decode/__pycache__/eagle.cpython-312.pyc ADDED
Binary file (26.5 kB). View file