ADAPT-Chase commited on
Commit
d61bae8
·
verified ·
1 Parent(s): 876e8ef

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/lora_model_runner_mixin.cpython-312.pyc +0 -0
  2. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_input_batch.cpython-312.pyc +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_model_runner.cpython-312.pyc +0 -0
  4. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_worker.cpython-312.pyc +0 -0
  5. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/utils.cpython-312.pyc +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/worker_base.cpython-312.pyc +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/xpu_model_runner.cpython-312.pyc +0 -0
  8. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/xpu_worker.cpython-312.pyc +0 -0
  9. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/__pycache__/__init__.cpython-312.pyc +0 -0
  10. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/__pycache__/flash_attn_interface.cpython-312.pyc +0 -0
  11. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__init__.py +0 -0
  12. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__pycache__/__init__.cpython-312.pyc +0 -0
  13. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__pycache__/rotary.cpython-312.pyc +0 -0
  14. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/rotary.py +530 -0
  15. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__init__.py +1 -0
  16. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__pycache__/__init__.cpython-312.pyc +0 -0
  17. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__pycache__/rotary.cpython-312.pyc +0 -0
  18. tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/rotary.py +229 -0
  19. tool_server/.venv/lib/python3.12/site-packages/vllm/worker/__pycache__/__init__.cpython-312.pyc +0 -0
  20. tool_server/.venv/lib/python3.12/site-packages/vllm/worker/__pycache__/cache_engine.cpython-312.pyc +0 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/lora_model_runner_mixin.cpython-312.pyc ADDED
Binary file (7.81 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_input_batch.cpython-312.pyc ADDED
Binary file (27 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_model_runner.cpython-312.pyc ADDED
Binary file (88 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/tpu_worker.cpython-312.pyc ADDED
Binary file (14.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/utils.cpython-312.pyc ADDED
Binary file (12.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/worker_base.cpython-312.pyc ADDED
Binary file (2.63 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/xpu_model_runner.cpython-312.pyc ADDED
Binary file (1.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/worker/__pycache__/xpu_worker.cpython-312.pyc ADDED
Binary file (9.21 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (509 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/__pycache__/flash_attn_interface.cpython-312.pyc ADDED
Binary file (24.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (197 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/__pycache__/rotary.cpython-312.pyc ADDED
Binary file (21.8 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/layers/rotary.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/vllm-project/flash-attention/blob/main/flash_attn/layers/rotary.py
2
+ # Modified lines are marked with `# modified from original` comment
3
+ # Copyright (c) 2023, Tri Dao.
4
+
5
+ import math
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ from einops import rearrange, repeat
10
+ from ..ops.triton.rotary import apply_rotary # modified from original
11
+
12
+
13
+ def rotate_half(x, interleaved=False):
14
+ if not interleaved:
15
+ x1, x2 = x.chunk(2, dim=-1)
16
+ return torch.cat((-x2, x1), dim=-1)
17
+ else:
18
+ x1, x2 = x[..., ::2], x[..., 1::2]
19
+ return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
20
+
21
+
22
+ def apply_rotary_emb_torch(x, cos, sin, interleaved=False):
23
+ """
24
+ x: (batch_size, seqlen, nheads, headdim)
25
+ cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
26
+ """
27
+ ro_dim = cos.shape[-1] * 2
28
+ assert ro_dim <= x.shape[-1]
29
+ cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
30
+ sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
31
+ return torch.cat(
32
+ [x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
33
+ dim=-1,
34
+ )
35
+
36
+
37
+ class ApplyRotaryEmb(torch.autograd.Function):
38
+ @staticmethod
39
+ def forward(
40
+ ctx,
41
+ x,
42
+ cos,
43
+ sin,
44
+ interleaved=False,
45
+ inplace=False,
46
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
47
+ cu_seqlens: Optional[torch.Tensor] = None,
48
+ max_seqlen: Optional[int] = None,
49
+ ):
50
+ out = apply_rotary(
51
+ x,
52
+ cos,
53
+ sin,
54
+ seqlen_offsets=seqlen_offsets,
55
+ cu_seqlens=cu_seqlens,
56
+ max_seqlen=max_seqlen,
57
+ interleaved=interleaved,
58
+ inplace=inplace,
59
+ )
60
+ if isinstance(seqlen_offsets, int):
61
+ ctx.save_for_backward(cos, sin, cu_seqlens) # Can't save int with save_for_backward
62
+ ctx.seqlen_offsets = seqlen_offsets
63
+ else:
64
+ ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
65
+ ctx.seqlen_offsets = None
66
+ ctx.interleaved = interleaved
67
+ ctx.inplace = inplace
68
+ ctx.max_seqlen = max_seqlen
69
+ return out if not inplace else x
70
+
71
+ @staticmethod
72
+ def backward(ctx, do):
73
+ seqlen_offsets = ctx.seqlen_offsets
74
+ if seqlen_offsets is None:
75
+ cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
76
+ else:
77
+ cos, sin, cu_seqlens = ctx.saved_tensors
78
+ # TD [2023-09-02]: For some reason Triton (2.0.0.post1) errors with
79
+ # "[CUDA]: invalid device context", and cloning makes it work. Idk why. Triton 2.1.0 works.
80
+ if not ctx.interleaved and not ctx.inplace:
81
+ do = do.clone()
82
+ dx = apply_rotary(
83
+ do,
84
+ cos,
85
+ sin,
86
+ seqlen_offsets=seqlen_offsets,
87
+ cu_seqlens=cu_seqlens,
88
+ max_seqlen=ctx.max_seqlen,
89
+ interleaved=ctx.interleaved,
90
+ inplace=ctx.inplace,
91
+ conjugate=True,
92
+ )
93
+ return dx, None, None, None, None, None, None, None
94
+
95
+
96
+ def apply_rotary_emb(
97
+ x,
98
+ cos,
99
+ sin,
100
+ interleaved=False,
101
+ inplace=False,
102
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
103
+ cu_seqlens: Optional[torch.Tensor] = None,
104
+ max_seqlen: Optional[int] = None,
105
+ ):
106
+ """
107
+ Arguments:
108
+ x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
109
+ else (total_seqlen, nheads, headdim)
110
+ cos, sin: (seqlen_rotary, rotary_dim / 2)
111
+ interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
112
+ of 1st half and 2nd half (GPT-NeoX style).
113
+ inplace: if True, apply rotary embedding in-place.
114
+ seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.
115
+ Most commonly used in inference when we have KV cache.
116
+ cu_seqlens: (batch + 1,) or None
117
+ max_seqlen: int
118
+ Return:
119
+ out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
120
+ else (total_seqlen, nheads, headdim)
121
+ rotary_dim must be <= headdim
122
+ Apply rotary embedding to the first rotary_dim of x.
123
+ """
124
+ return ApplyRotaryEmb.apply(
125
+ x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen
126
+ )
127
+
128
+
129
+ # For backward compatibility
130
+ apply_rotary_emb_func = apply_rotary_emb
131
+
132
+
133
+ class ApplyRotaryEmbQKV_(torch.autograd.Function):
134
+ @staticmethod
135
+ def forward(
136
+ ctx,
137
+ qkv,
138
+ cos,
139
+ sin,
140
+ cos_k=None,
141
+ sin_k=None,
142
+ interleaved=False,
143
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
144
+ num_heads_q: Union[int] = None,
145
+ ):
146
+ if cos_k is None and sin_k is None and qkv.is_contiguous():
147
+ # Call 1 kernel instead of 2 kernels
148
+ # We need qkv to be contiguous so that when we reshape to combine (3, nheads)
149
+ # dimensions, we get the same tensor
150
+ if qkv.dim() == 5:
151
+ batch, seqlen, three, nheads, headdim = qkv.shape
152
+ assert three == 3
153
+ # qk = rearrange(qkv[:, :, :2], "b s t h d -> b s (t h) d")
154
+ qk = qkv[:, :, :2].reshape(batch, seqlen, -1, headdim)
155
+ else:
156
+ assert qkv.dim() == 4
157
+ assert num_heads_q is not None
158
+ num_heads_k = (qkv.shape[2] - num_heads_q) // 2
159
+ assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
160
+ qk = qkv[:, :, :num_heads_q + num_heads_k]
161
+ apply_rotary(
162
+ qk, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=True
163
+ )
164
+ else:
165
+ cos_k = cos if cos_k is None else cos_k
166
+ sin_k = sin if sin_k is None else sin_k
167
+ if qkv.dim() == 5:
168
+ q, k = qkv[:, :, 0], qkv[:, :, 1]
169
+ else:
170
+ assert qkv.dim() == 4
171
+ assert num_heads_q is not None
172
+ num_heads_k = (qkv.shape[2] - num_heads_q) // 2
173
+ assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
174
+ q, k = qkv[:, :, :num_heads_q], qkv[:, :, num_heads_q : num_heads_q + num_heads_k]
175
+ apply_rotary(q, cos, sin, seqlen_offsets, interleaved=interleaved, inplace=True)
176
+ apply_rotary(k, cos_k, sin_k, seqlen_offsets, interleaved=interleaved, inplace=True)
177
+ ctx.save_for_backward(cos, sin, cos_k, sin_k)
178
+ if isinstance(seqlen_offsets, int):
179
+ ctx.save_for_backward(cos, sin, cos_k, sin_k)
180
+ ctx.seqlen_offsets = seqlen_offsets
181
+ else:
182
+ ctx.save_for_backward(cos, sin, cos_k, sin_k, seqlen_offsets)
183
+ ctx.seqlen_offsets = None
184
+ ctx.interleaved = interleaved
185
+ ctx.num_heads_q = num_heads_q
186
+ return qkv
187
+
188
+ @staticmethod
189
+ def backward(ctx, dqkv):
190
+ seqlen_offsets = ctx.seqlen_offsets
191
+ if seqlen_offsets is None:
192
+ cos, sin, cos_k, sin_k, seqlen_offsets = ctx.saved_tensors
193
+ else:
194
+ cos, sin, cos_k, sin_k = ctx.saved_tensors
195
+ if cos_k is None and sin_k is None and dqkv.is_contiguous():
196
+ # Call 1 kernel instead of 2 kernels
197
+ # We need dqkv to be contiguous so that when we reshape to combine (3, nheads)
198
+ # dimensions, we get the same tensor
199
+ if dqkv.dim() == 5:
200
+ dqk = rearrange(dqkv[:, :, :2], "b s t h d -> b s (t h) d")
201
+ else:
202
+ assert dqkv.dim() == 4
203
+ assert ctx.num_heads_q is not None
204
+ num_heads_k = (dqkv.shape[2] - ctx.num_heads_q) // 2
205
+ assert dqkv.shape[2] == ctx.num_heads_q + 2 * num_heads_k
206
+ dqk = dqkv[:, :, : ctx.num_heads_q + num_heads_k]
207
+ apply_rotary(
208
+ dqk,
209
+ cos,
210
+ sin,
211
+ seqlen_offsets=seqlen_offsets,
212
+ interleaved=ctx.interleaved,
213
+ inplace=True,
214
+ conjugate=True,
215
+ )
216
+ else:
217
+ cos_k = cos if cos_k is None else cos_k
218
+ sin_k = sin if sin_k is None else sin_k
219
+ if dqkv.dim() == 5:
220
+ dq, dk = dqkv[:, :, 0], dqkv[:, :, 1]
221
+ else:
222
+ assert dqkv.dim() == 4
223
+ assert ctx.num_heads_q is not None
224
+ num_heads_k = (dqkv.shape[2] - ctx.num_heads_q) // 2
225
+ assert dqkv.shape[2] == ctx.num_heads_q + 2 * num_heads_k
226
+ dq = dqkv[:, :, : ctx.num_heads_q]
227
+ dk = dqkv[:, :, ctx.num_heads_q : ctx.num_heads_q + num_heads_k]
228
+ apply_rotary(
229
+ dq,
230
+ cos,
231
+ sin,
232
+ seqlen_offsets,
233
+ interleaved=ctx.interleaved,
234
+ inplace=True,
235
+ conjugate=True,
236
+ )
237
+ apply_rotary(
238
+ dk,
239
+ cos_k,
240
+ sin_k,
241
+ seqlen_offsets,
242
+ interleaved=ctx.interleaved,
243
+ inplace=True,
244
+ conjugate=True,
245
+ )
246
+ return dqkv, None, None, None, None, None, None, None
247
+
248
+
249
+ def apply_rotary_emb_qkv_(
250
+ qkv,
251
+ cos,
252
+ sin,
253
+ cos_k=None,
254
+ sin_k=None,
255
+ interleaved=False,
256
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
257
+ num_heads_q: Optional[int] = None,
258
+ ):
259
+ """
260
+ Arguments:
261
+ qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim).
262
+ If qkv has shape (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
263
+ then num_heads_q must be provided.
264
+ cos, sin: (seqlen, rotary_dim / 2)
265
+ cos_k, sin_k: (seqlen, rotary_dim / 2), optional
266
+ interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
267
+ 1st half and 2nd half (GPT-NeoX style).
268
+ seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
269
+ Most commonly used in inference when we have KV cache.
270
+ Return:
271
+ qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim)
272
+ rotary_dim must be <= headdim
273
+ Apply rotary embedding *inplace* to the first rotary_dim of Q and K.
274
+ """
275
+ return ApplyRotaryEmbQKV_.apply(
276
+ qkv, cos, sin, cos_k, sin_k, interleaved, seqlen_offsets, num_heads_q
277
+ )
278
+
279
+
280
+ class ApplyRotaryEmbKV_(torch.autograd.Function):
281
+ @staticmethod
282
+ def forward(ctx, kv, cos, sin, interleaved=False, seqlen_offsets: Union[int, torch.Tensor] = 0):
283
+ batch, seqlen, two, nheads, headdim = kv.shape
284
+ assert two == 2
285
+ k = kv[:, :, 0]
286
+ apply_rotary(
287
+ k, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=True
288
+ )
289
+ if isinstance(seqlen_offsets, int):
290
+ ctx.save_for_backward(cos, sin) # Can't save int with save_for_backward
291
+ ctx.seqlen_offsets = seqlen_offsets
292
+ else:
293
+ ctx.save_for_backward(cos, sin, seqlen_offsets)
294
+ ctx.seqlen_offsets = None
295
+ ctx.interleaved = interleaved
296
+ return kv
297
+
298
+ @staticmethod
299
+ def backward(ctx, dkv):
300
+ seqlen_offsets = ctx.seqlen_offsets
301
+ if seqlen_offsets is None:
302
+ cos, sin, seqlen_offsets = ctx.saved_tensors
303
+ else:
304
+ cos, sin = ctx.saved_tensors
305
+ apply_rotary(
306
+ dkv[:, :, 0],
307
+ cos,
308
+ sin,
309
+ seqlen_offsets=seqlen_offsets,
310
+ interleaved=ctx.interleaved,
311
+ inplace=True,
312
+ conjugate=True,
313
+ )
314
+ return dkv, None, None, None, None
315
+
316
+
317
+ apply_rotary_emb_kv_ = ApplyRotaryEmbKV_.apply
318
+
319
+
320
+ def apply_rotary_emb_kv_(
321
+ kv,
322
+ cos,
323
+ sin,
324
+ interleaved=False,
325
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
326
+ ):
327
+ """
328
+ Arguments:
329
+ kv: (batch_size, seqlen, 2, nheads, headdim)
330
+ cos, sin: (seqlen, rotary_dim / 2)
331
+ interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
332
+ 1st half and 2nd half (GPT-NeoX style).
333
+ seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
334
+ Most commonly used in inference when we have KV cache.
335
+ Return:
336
+ kv: (batch_size, seqlen, 2, nheads, headdim)
337
+ rotary_dim must be <= headdim
338
+ Apply rotary embedding *inplace* to the first rotary_dim of K.
339
+ """
340
+ return ApplyRotaryEmbKV_.apply(kv, cos, sin, interleaved, seqlen_offsets)
341
+
342
+
343
+ class RotaryEmbedding(torch.nn.Module):
344
+ """
345
+ The rotary position embeddings from RoFormer_ (Su et. al).
346
+ A crucial insight from the method is that the query and keys are
347
+ transformed by rotation matrices which depend on the relative positions.
348
+
349
+ Other implementations are available in the Rotary Transformer repo_ and in
350
+ GPT-NeoX_, GPT-NeoX was an inspiration
351
+
352
+ .. _RoFormer: https://arxiv.org/abs/2104.09864
353
+ .. _repo: https://github.com/ZhuiyiTechnology/roformer
354
+ .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
355
+
356
+ If scale_base is not None, this implements XPos (Sun et al., https://arxiv.org/abs/2212.10554).
357
+ A recommended value for scale_base is 512: https://github.com/HazyResearch/flash-attention/issues/96
358
+ Reference: https://github.com/sunyt32/torchscale/blob/main/torchscale/component/xpos_relative_position.py
359
+ """
360
+
361
+ def __init__(
362
+ self,
363
+ dim: int,
364
+ base=10000.0,
365
+ interleaved=False,
366
+ scale_base=None,
367
+ pos_idx_in_fp32=True,
368
+ device=None,
369
+ ):
370
+ """
371
+ interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
372
+ of 1st half and 2nd half (GPT-NeoX style).
373
+ pos_idx_in_fp32: if True, the position indices [0.0, ..., seqlen - 1] are in fp32,
374
+ otherwise they might be in lower precision.
375
+ This option was added because previously (before 2023-07-02), when we construct
376
+ the position indices, we use the dtype of self.inv_freq. In most cases this would
377
+ be fp32, but if the model is trained in pure bf16 (not mixed precision), then
378
+ self.inv_freq would be bf16, and the position indices are also in bf16.
379
+ Because of the limited precision of bf16 (e.g. 1995.0 is rounded to 2000.0), the
380
+ embeddings for some positions will coincide.
381
+ To maintain compatibility with models previously trained in pure bf16,
382
+ we add this option.
383
+ """
384
+ super().__init__()
385
+ self.dim = dim
386
+ self.base = float(base)
387
+ self.pos_idx_in_fp32 = pos_idx_in_fp32
388
+ # Generate and save the inverse frequency buffer (non trainable)
389
+ inv_freq = self._compute_inv_freq(device)
390
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
391
+ self.interleaved = interleaved
392
+ self.scale_base = scale_base
393
+ scale = (
394
+ (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
395
+ if scale_base is not None
396
+ else None
397
+ )
398
+ self.register_buffer("scale", scale, persistent=False)
399
+
400
+ self._seq_len_cached = 0
401
+ self._cos_cached = None
402
+ self._sin_cached = None
403
+ self._cos_k_cached = None
404
+ self._sin_k_cached = None
405
+
406
+ def _compute_inv_freq(self, device=None):
407
+ return 1.0 / (
408
+ self.base
409
+ ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
410
+ )
411
+
412
+ def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
413
+ # Reset the tables if the sequence length has changed,
414
+ # if we're on a new device (possibly due to tracing for instance),
415
+ # or if we're switching from inference mode to training
416
+ if (
417
+ seqlen > self._seq_len_cached
418
+ or self._cos_cached is None
419
+ or self._cos_cached.device != device
420
+ or self._cos_cached.dtype != dtype
421
+ or (self.training and self._cos_cached.is_inference())
422
+ ):
423
+ self._seq_len_cached = seqlen
424
+ # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
425
+ # And the output of arange can be quite large, so bf16 would lose a lot of precision.
426
+ # However, for compatibility reason, we add an option to use the dtype of self.inv_freq.
427
+ if self.pos_idx_in_fp32:
428
+ t = torch.arange(seqlen, device=device, dtype=torch.float32)
429
+ # We want fp32 here as well since inv_freq will be multiplied with t, and the output
430
+ # will be large. Having it in bf16 will lose a lot of precision and cause the
431
+ # cos & sin output to change significantly.
432
+ # We want to recompute self.inv_freq if it was not loaded in fp32
433
+ if self.inv_freq.dtype != torch.float32:
434
+ inv_freq = self._compute_inv_freq(device=device)
435
+ else:
436
+ inv_freq = self.inv_freq
437
+ else:
438
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
439
+ inv_freq = self.inv_freq
440
+ # Don't do einsum, it converts fp32 to fp16 under AMP
441
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
442
+ freqs = torch.outer(t, inv_freq)
443
+ if self.scale is None:
444
+ self._cos_cached = torch.cos(freqs).to(dtype)
445
+ self._sin_cached = torch.sin(freqs).to(dtype)
446
+ else:
447
+ power = (
448
+ torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
449
+ - seqlen // 2
450
+ ) / self.scale_base
451
+ scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
452
+ # We want the multiplication by scale to happen in fp32
453
+ self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
454
+ self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
455
+ self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
456
+ self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
457
+
458
+ def forward(
459
+ self,
460
+ qkv: torch.Tensor,
461
+ kv: Optional[torch.Tensor] = None,
462
+ seqlen_offset: Union[int, torch.Tensor] = 0,
463
+ max_seqlen: Optional[int] = None,
464
+ num_heads_q: Optional[int] = None,
465
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
466
+ """
467
+ qkv: (batch, seqlen, 3, nheads, headdim) or (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim)
468
+ if kv is none, else it's just q of shape (batch, seqlen, nheads, headdim).
469
+ If qkv has shape (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
470
+ then num_heads_q must be provided.
471
+ kv: (batch, seqlen, 2, nheads, headdim)
472
+ seqlen_offset: (batch_size,) or int. Each sequence in x is shifted by this amount.
473
+ Most commonly used in inference when we have KV cache.
474
+ If it's a tensor of shape (batch_size,), then to update the cos / sin cache, one
475
+ should pass in max_seqlen, which will update the cos / sin cache up to that length.
476
+ Apply rotary embedding *inplace* to qkv and / or kv.
477
+ """
478
+ seqlen = qkv.shape[1]
479
+ if max_seqlen is not None:
480
+ self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
481
+ elif isinstance(seqlen_offset, int):
482
+ self._update_cos_sin_cache(seqlen + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
483
+ if kv is None:
484
+ if self.scale is None:
485
+ return apply_rotary_emb_qkv_(
486
+ qkv,
487
+ self._cos_cached,
488
+ self._sin_cached,
489
+ interleaved=self.interleaved,
490
+ seqlen_offsets=seqlen_offset,
491
+ num_heads_q=num_heads_q,
492
+ )
493
+ else:
494
+ return apply_rotary_emb_qkv_(
495
+ qkv,
496
+ self._cos_cached,
497
+ self._sin_cached,
498
+ self._cos_k_cached,
499
+ self._sin_k_cached,
500
+ interleaved=self.interleaved,
501
+ seqlen_offsets=seqlen_offset,
502
+ num_heads_q=num_heads_q,
503
+ )
504
+ else:
505
+ q = qkv
506
+ q = apply_rotary_emb_func(
507
+ q,
508
+ self._cos_cached,
509
+ self._sin_cached,
510
+ interleaved=self.interleaved,
511
+ inplace=True,
512
+ seqlen_offsets=seqlen_offset,
513
+ )
514
+ if self.scale is None:
515
+ kv = apply_rotary_emb_kv_(
516
+ kv,
517
+ self._cos_cached,
518
+ self._sin_cached,
519
+ interleaved=self.interleaved,
520
+ seqlen_offsets=seqlen_offset,
521
+ )
522
+ else:
523
+ kv = apply_rotary_emb_kv_(
524
+ kv,
525
+ self._cos_k_cached,
526
+ self._sin_k_cached,
527
+ interleaved=self.interleaved,
528
+ seqlen_offsets=seqlen_offset,
529
+ )
530
+ return q, kv
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (201 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/__pycache__/rotary.cpython-312.pyc ADDED
Binary file (10.1 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/vllm_flash_attn/ops/triton/rotary.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://github.com/vllm-project/flash-attention/blob/main/flash_attn/ops/triton/rotary.py
2
+ # Copyright (c) 2023, Tri Dao.
3
+
4
+ from typing import Optional, Union
5
+
6
+ import torch
7
+
8
+ import triton
9
+ import triton.language as tl
10
+
11
+
12
+ @triton.jit
13
+ def rotary_kernel(
14
+ OUT, # Pointers to matrices
15
+ X,
16
+ COS,
17
+ SIN,
18
+ CU_SEQLENS,
19
+ SEQLEN_OFFSETS, # this could be int or a pointer
20
+ # Matrix dimensions
21
+ seqlen,
22
+ rotary_dim,
23
+ seqlen_ro,
24
+ # strides
25
+ stride_out_batch,
26
+ stride_out_seqlen,
27
+ stride_out_nheads,
28
+ stride_out_headdim,
29
+ stride_x_batch,
30
+ stride_x_seqlen,
31
+ stride_x_nheads,
32
+ stride_x_headdim,
33
+ # Meta-parameters
34
+ BLOCK_K: tl.constexpr,
35
+ IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr,
36
+ IS_VARLEN: tl.constexpr,
37
+ INTERLEAVED: tl.constexpr,
38
+ CONJUGATE: tl.constexpr,
39
+ BLOCK_M: tl.constexpr,
40
+ ):
41
+ pid_m = tl.program_id(axis=0)
42
+ pid_head = tl.program_id(axis=1)
43
+ pid_batch = tl.program_id(axis=2)
44
+ rotary_dim_half = rotary_dim // 2
45
+
46
+ if not IS_VARLEN:
47
+ X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads
48
+ OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads
49
+ else:
50
+ start_idx = tl.load(CU_SEQLENS + pid_batch)
51
+ seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx
52
+ X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads
53
+ OUT = OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads
54
+
55
+ if pid_m * BLOCK_M >= seqlen:
56
+ return
57
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
58
+ if not IS_SEQLEN_OFFSETS_TENSOR:
59
+ rm_cs = rm + SEQLEN_OFFSETS
60
+ else:
61
+ rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch)
62
+ rk = tl.arange(0, BLOCK_K)
63
+ rk_half = tl.arange(0, BLOCK_K // 2)
64
+
65
+ if not INTERLEAVED:
66
+ # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT
67
+ X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim)
68
+ COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :])
69
+ SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :])
70
+ cos = tl.load(
71
+ COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=1.0
72
+ ).to(tl.float32)
73
+ sin = tl.load(
74
+ SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=0.0
75
+ ).to(tl.float32)
76
+ x0 = tl.load(
77
+ X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0
78
+ ).to(tl.float32)
79
+ x1 = tl.load(
80
+ X + rotary_dim_half * stride_x_headdim,
81
+ mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half),
82
+ other=0.0,
83
+ ).to(tl.float32)
84
+ if CONJUGATE:
85
+ sin = -sin
86
+ o0 = x0 * cos - x1 * sin
87
+ o1 = x0 * sin + x1 * cos
88
+ # write back result
89
+ OUT = OUT + (rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim)
90
+ tl.store(OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half))
91
+ tl.store(
92
+ OUT + rotary_dim_half * stride_out_headdim,
93
+ o1,
94
+ mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half),
95
+ )
96
+ else:
97
+ # We don't want to load X[0, 2, 4, ...] and X[1, 3, 5, ...] separately since both are slow.
98
+ # Instead, we load x0 = X[0, 1, 2, 3, ...] and x1 = X[1, 0, 3, 2, ...].
99
+ # Loading x0 will be fast but x1 will be slow.
100
+ # Then we load cos = COS[0, 0, 1, 1, ...] and sin = SIN[0, 0, 1, 1, ...].
101
+ # Then we do the calculation and use tl.where to pick put the right outputs for the even
102
+ # and for the odd indices.
103
+ rk_swap = rk + ((rk + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ...
104
+ rk_repeat = tl.arange(0, BLOCK_K) // 2
105
+ X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim)
106
+ X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim)
107
+ COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :])
108
+ SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :])
109
+ cos = tl.load(
110
+ COS,
111
+ mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half),
112
+ other=1.0,
113
+ ).to(tl.float32)
114
+ sin = tl.load(
115
+ SIN,
116
+ mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half),
117
+ other=0.0,
118
+ ).to(tl.float32)
119
+ x0 = tl.load(X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0).to(
120
+ tl.float32
121
+ )
122
+ x1 = tl.load(
123
+ X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0
124
+ ).to(tl.float32)
125
+ if CONJUGATE:
126
+ sin = -sin
127
+ x0_cos = x0 * cos
128
+ x1_sin = x1 * sin
129
+ out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin)
130
+ OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim)
131
+ tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim))
132
+
133
+
134
+ def apply_rotary(
135
+ x: torch.Tensor,
136
+ cos: torch.Tensor,
137
+ sin: torch.Tensor,
138
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
139
+ cu_seqlens: Optional[torch.Tensor] = None,
140
+ max_seqlen: Optional[int] = None,
141
+ interleaved=False,
142
+ inplace=False,
143
+ conjugate=False,
144
+ ) -> torch.Tensor:
145
+ """
146
+ Arguments:
147
+ x: (batch, seqlen, nheads, headdim) if cu_seqlens is None
148
+ else (total_seqlen, nheads, headdim).
149
+ cos: (seqlen_ro, rotary_dim / 2)
150
+ sin: (seqlen_ro, rotary_dim / 2)
151
+ seqlen_offsets: integer or integer tensor of size (batch,)
152
+ cu_seqlens: (batch + 1,) or None
153
+ max_seqlen: int
154
+ Returns:
155
+ y: (batch, seqlen, nheads, headdim)
156
+ """
157
+ is_varlen = cu_seqlens is not None
158
+ if not is_varlen:
159
+ batch, seqlen, nheads, headdim = x.shape
160
+ else:
161
+ assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed"
162
+ total_seqlen, nheads, headdim = x.shape
163
+ batch_p_1 = cu_seqlens.shape[0]
164
+ batch = batch_p_1 - 1
165
+ seqlen = max_seqlen
166
+ seqlen_ro, rotary_dim = cos.shape
167
+ assert sin.shape == cos.shape
168
+ rotary_dim *= 2
169
+ assert rotary_dim <= headdim, "rotary_dim must be <= headdim"
170
+ assert headdim <= 256, "Only support headdim <= 256"
171
+ assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen"
172
+
173
+ assert (
174
+ cos.dtype == sin.dtype
175
+ ), f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}"
176
+ assert (
177
+ x.dtype == cos.dtype
178
+ ), f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}"
179
+
180
+ cos, sin = cos.contiguous(), sin.contiguous()
181
+ if isinstance(seqlen_offsets, torch.Tensor):
182
+ assert seqlen_offsets.shape == (batch,)
183
+ assert seqlen_offsets.dtype in [torch.int32, torch.int64]
184
+ seqlen_offsets = seqlen_offsets.contiguous()
185
+ else:
186
+ assert seqlen_offsets + seqlen <= seqlen_ro
187
+
188
+ output = torch.empty_like(x) if not inplace else x
189
+ if rotary_dim < headdim and not inplace:
190
+ output[..., rotary_dim:].copy_(x[..., rotary_dim:])
191
+
192
+ BLOCK_K = (
193
+ 32
194
+ if rotary_dim <= 32
195
+ else (64 if rotary_dim <= 64 else (128 if rotary_dim <= 128 else 256))
196
+ )
197
+ grid = lambda META: (triton.cdiv(seqlen, META["BLOCK_M"]), nheads, batch) # noqa
198
+ BLOCK_M = 4 if interleaved else (8 if rotary_dim <= 128 else 4)
199
+
200
+ # Need this, otherwise Triton tries to launch from cuda:0 and we get
201
+ # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
202
+ with torch.cuda.device(x.device.index):
203
+ rotary_kernel[grid](
204
+ output, # data ptrs
205
+ x,
206
+ cos,
207
+ sin,
208
+ cu_seqlens,
209
+ seqlen_offsets,
210
+ seqlen, # shapes
211
+ rotary_dim,
212
+ seqlen_ro,
213
+ output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
214
+ output.stride(-3), # seqlen_stride or total_seqlen_stride
215
+ output.stride(-2), # nheads_stride
216
+ output.stride(-1), # headdim_stride
217
+ x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
218
+ x.stride(-3), # seqlen stride or total_seqlen_stride
219
+ x.stride(-2), # nheads stride
220
+ x.stride(-1), # headdim stride
221
+ BLOCK_K,
222
+ isinstance(seqlen_offsets, torch.Tensor),
223
+ is_varlen,
224
+ interleaved,
225
+ conjugate,
226
+ BLOCK_M,
227
+ num_warps=2 if rotary_dim <= 64 else 4,
228
+ )
229
+ return output
tool_server/.venv/lib/python3.12/site-packages/vllm/worker/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (181 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/worker/__pycache__/cache_engine.cpython-312.pyc ADDED
Binary file (6.83 kB). View file