ZTWHHH commited on
Commit
c4e046e
·
verified ·
1 Parent(s): 7b9d32b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/INSTALLER +1 -0
  2. vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/LICENSE +21 -0
  3. vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/WHEEL +6 -0
  4. vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/top_level.txt +1 -0
  5. vllm/lib/python3.10/site-packages/gguf/__pycache__/__init__.cpython-310.pyc +0 -0
  6. vllm/lib/python3.10/site-packages/gguf/__pycache__/constants.cpython-310.pyc +0 -0
  7. vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf.cpython-310.pyc +0 -0
  8. vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_reader.cpython-310.pyc +0 -0
  9. vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_writer.cpython-310.pyc +0 -0
  10. vllm/lib/python3.10/site-packages/gguf/__pycache__/lazy.cpython-310.pyc +0 -0
  11. vllm/lib/python3.10/site-packages/gguf/__pycache__/metadata.cpython-310.pyc +0 -0
  12. vllm/lib/python3.10/site-packages/gguf/__pycache__/quants.cpython-310.pyc +0 -0
  13. vllm/lib/python3.10/site-packages/gguf/__pycache__/tensor_mapping.cpython-310.pyc +0 -0
  14. vllm/lib/python3.10/site-packages/gguf/__pycache__/utility.cpython-310.pyc +0 -0
  15. vllm/lib/python3.10/site-packages/gguf/__pycache__/vocab.cpython-310.pyc +0 -0
  16. vllm/lib/python3.10/site-packages/gguf/tensor_mapping.py +657 -0
  17. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/INSTALLER +1 -0
  18. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/LICENSE +46 -0
  19. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/METADATA +32 -0
  20. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/RECORD +19 -0
  21. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/REQUESTED +0 -0
  22. vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/WHEEL +4 -0
  23. vllm/lib/python3.10/site-packages/outlines/__init__.py +20 -0
  24. vllm/lib/python3.10/site-packages/outlines/__pycache__/_version.cpython-310.pyc +0 -0
  25. vllm/lib/python3.10/site-packages/outlines/__pycache__/base.cpython-310.pyc +0 -0
  26. vllm/lib/python3.10/site-packages/outlines/__pycache__/function.cpython-310.pyc +0 -0
  27. vllm/lib/python3.10/site-packages/outlines/caching.py +179 -0
  28. vllm/lib/python3.10/site-packages/outlines/fsm/__init__.py +0 -0
  29. vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/__init__.cpython-310.pyc +0 -0
  30. vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/guide.cpython-310.pyc +0 -0
  31. vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/json_schema.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/parsing.cpython-310.pyc +0 -0
  33. vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/types.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/outlines/fsm/guide.py +276 -0
  35. vllm/lib/python3.10/site-packages/outlines/fsm/json_schema.py +83 -0
  36. vllm/lib/python3.10/site-packages/outlines/fsm/parsing.py +1127 -0
  37. vllm/lib/python3.10/site-packages/outlines/fsm/types.py +81 -0
  38. vllm/lib/python3.10/site-packages/outlines/function.py +117 -0
  39. vllm/lib/python3.10/site-packages/outlines/generate/choice.py +59 -0
  40. vllm/lib/python3.10/site-packages/outlines/generate/text.py +50 -0
  41. vllm/lib/python3.10/site-packages/outlines/grammars.py +14 -0
  42. vllm/lib/python3.10/site-packages/outlines/grammars/arithmetic.lark +18 -0
  43. vllm/lib/python3.10/site-packages/outlines/grammars/common.lark +83 -0
  44. vllm/lib/python3.10/site-packages/outlines/models/__init__.py +19 -0
  45. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/__init__.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/exllamav2.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/llamacpp.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/mlxlm.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/openai.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/outlines/models/__pycache__/tokenizer.cpython-310.pyc +0 -0
vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2015 Stanis Trendelenburg
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.36.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ braceexpand
vllm/lib/python3.10/site-packages/gguf/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (342 Bytes). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/constants.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf.cpython-310.pyc ADDED
Binary file (352 Bytes). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_reader.cpython-310.pyc ADDED
Binary file (9.31 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_writer.cpython-310.pyc ADDED
Binary file (39.4 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/lazy.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/quants.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/tensor_mapping.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/utility.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/__pycache__/vocab.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
vllm/lib/python3.10/site-packages/gguf/tensor_mapping.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Sequence
4
+
5
+ from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
6
+
7
+
8
+ class TensorNameMap:
9
+ mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
10
+ # Token embeddings
11
+ MODEL_TENSOR.TOKEN_EMBD: (
12
+ "gpt_neox.embed_in", # gptneox
13
+ "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
14
+ "transformer.word_embeddings", # falcon
15
+ "word_embeddings", # bloom
16
+ "model.embed_tokens", # llama-hf nemotron
17
+ "tok_embeddings", # llama-pth
18
+ "embeddings.word_embeddings", # bert nomic-bert
19
+ "language_model.embedding.word_embeddings", # persimmon
20
+ "wte", # gpt2
21
+ "transformer.embd.wte", # phi2
22
+ "model.tok_embeddings", # internlm2
23
+ "model.embedding", # mamba-qbert
24
+ "backbone.embedding", # mamba
25
+ "backbone.embeddings", # mamba-hf
26
+ "transformer.in_out_embed", # Grok
27
+ "embedding.word_embeddings", # chatglm
28
+ "transformer.token_embeddings", # openelm
29
+ "shared", # t5
30
+ ),
31
+
32
+ # Token type embeddings
33
+ MODEL_TENSOR.TOKEN_TYPES: (
34
+ "embeddings.token_type_embeddings", # bert nomic-bert
35
+ ),
36
+
37
+ # Normalization of token embeddings
38
+ MODEL_TENSOR.TOKEN_EMBD_NORM: (
39
+ "word_embeddings_layernorm", # bloom
40
+ "embeddings.LayerNorm", # bert
41
+ "emb_ln", # nomic-bert
42
+ "transformer.norm", # openelm
43
+ ),
44
+
45
+ # Position embeddings
46
+ MODEL_TENSOR.POS_EMBD: (
47
+ "transformer.wpe", # gpt2
48
+ "embeddings.position_embeddings", # bert
49
+ "wpe", # gpt2
50
+ ),
51
+
52
+ # Output
53
+ MODEL_TENSOR.OUTPUT: (
54
+ "embed_out", # gptneox
55
+ "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone
56
+ "output", # llama-pth bloom internlm2
57
+ "word_embeddings_for_head", # persimmon
58
+ "lm_head.linear", # phi2
59
+ "output_layer", # chatglm
60
+ ),
61
+
62
+ # Output norm
63
+ MODEL_TENSOR.OUTPUT_NORM: (
64
+ "gpt_neox.final_layer_norm", # gptneox
65
+ "transformer.ln_f", # gpt2 gpt-j falcon jais exaone
66
+ "model.norm", # llama-hf baichuan internlm2
67
+ "norm", # llama-pth
68
+ "transformer.norm_f", # mpt dbrx
69
+ "ln_f", # refact bloom qwen gpt2
70
+ "language_model.encoder.final_layernorm", # persimmon
71
+ "model.final_layernorm", # persimmon
72
+ "lm_head.ln", # phi2
73
+ "model.norm_f", # mamba-qbert
74
+ "backbone.norm_f", # mamba
75
+ "transformer.rms_norm", # Grok
76
+ "encoder.final_layernorm", # chatglm
77
+ "transformer.norm", # openelm
78
+ "model.norm", # nemotron
79
+ ),
80
+
81
+ # Rope frequencies
82
+ MODEL_TENSOR.ROPE_FREQS: (
83
+ "rope.freqs", # llama-pth
84
+ "rotary_pos_emb.inv_freq", # chatglm
85
+ ),
86
+ }
87
+
88
+ block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
89
+ # Attention norm
90
+ MODEL_TENSOR.ATTN_NORM: (
91
+ "gpt_neox.layers.{bid}.input_layernorm", # gptneox
92
+ "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
93
+ "transformer.blocks.{bid}.norm_1", # mpt
94
+ "transformer.h.{bid}.input_layernorm", # falcon7b
95
+ "h.{bid}.input_layernorm", # bloom
96
+ "transformer.h.{bid}.ln_mlp", # falcon40b
97
+ "model.layers.{bid}.input_layernorm", # llama-hf nemotron
98
+ "layers.{bid}.attention_norm", # llama-pth
99
+ "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
100
+ "model.layers.{bid}.ln1", # yi
101
+ "h.{bid}.ln_1", # gpt2
102
+ "transformer.h.{bid}.ln", # phi2
103
+ "model.layers.layers.{bid}.norm", # plamo
104
+ "model.layers.{bid}.attention_norm", # internlm2
105
+ "model.layers.{bid}.norm", # mamba-qbert
106
+ "backbone.layers.{bid}.norm", # mamba
107
+ "transformer.decoder_layer.{bid}.rms_norm", # Grok
108
+ "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
109
+ "encoder.layers.{bid}.input_layernorm", # chatglm
110
+ "transformer.layers.{bid}.attn_norm", # openelm
111
+ ),
112
+
113
+ # Attention norm 2
114
+ MODEL_TENSOR.ATTN_NORM_2: (
115
+ "transformer.h.{bid}.ln_attn", # falcon40b
116
+ "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
117
+ ),
118
+
119
+ # Attention query-key-value
120
+ MODEL_TENSOR.ATTN_QKV: (
121
+ "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
122
+ "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
123
+ "transformer.blocks.{bid}.attn.Wqkv", # mpt
124
+ "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
125
+ "transformer.h.{bid}.self_attention.query_key_value", # falcon
126
+ "h.{bid}.self_attention.query_key_value", # bloom
127
+ "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
128
+ "model.layers.{bid}.self_attn.query_key_value", # persimmon
129
+ "h.{bid}.attn.c_attn", # gpt2
130
+ "transformer.h.{bid}.mixer.Wqkv", # phi2
131
+ "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
132
+ "model.layers.{bid}.self_attn.qkv_proj", # phi3
133
+ "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
134
+ "transformer.layers.{bid}.attn.qkv_proj", # openelm
135
+ ),
136
+
137
+ # Attention query
138
+ MODEL_TENSOR.ATTN_Q: (
139
+ "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron
140
+ "layers.{bid}.attention.wq", # llama-pth
141
+ "encoder.layer.{bid}.attention.self.query", # bert
142
+ "transformer.h.{bid}.attn.q_proj", # gpt-j
143
+ "model.layers.layers.{bid}.self_attn.q_proj", # plamo
144
+ "model.layers.{bid}.attention.wq", # internlm2
145
+ "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
146
+ "transformer.h.{bid}.attn.attention.q_proj", # exaone
147
+ ),
148
+
149
+ # Attention key
150
+ MODEL_TENSOR.ATTN_K: (
151
+ "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron
152
+ "layers.{bid}.attention.wk", # llama-pth
153
+ "encoder.layer.{bid}.attention.self.key", # bert
154
+ "transformer.h.{bid}.attn.k_proj", # gpt-j
155
+ "transformer.h.{bid}.attn.k", # refact
156
+ "model.layers.layers.{bid}.self_attn.k_proj", # plamo
157
+ "model.layers.{bid}.attention.wk", # internlm2
158
+ "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
159
+ "transformer.h.{bid}.attn.attention.k_proj", # exaone
160
+ ),
161
+
162
+ # Attention value
163
+ MODEL_TENSOR.ATTN_V: (
164
+ "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron
165
+ "layers.{bid}.attention.wv", # llama-pth
166
+ "encoder.layer.{bid}.attention.self.value", # bert
167
+ "transformer.h.{bid}.attn.v_proj", # gpt-j
168
+ "transformer.h.{bid}.attn.v", # refact
169
+ "model.layers.layers.{bid}.self_attn.v_proj", # plamo
170
+ "model.layers.{bid}.attention.wv", # internlm2
171
+ "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
172
+ "transformer.h.{bid}.attn.attention.v_proj", # exaone
173
+ ),
174
+
175
+ # Attention output
176
+ MODEL_TENSOR.ATTN_OUT: (
177
+ "gpt_neox.layers.{bid}.attention.dense", # gptneox
178
+ "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
179
+ "transformer.blocks.{bid}.attn.out_proj", # mpt
180
+ "transformer.h.{bid}.self_attention.dense", # falcon
181
+ "h.{bid}.self_attention.dense", # bloom
182
+ "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron
183
+ "layers.{bid}.attention.wo", # llama-pth
184
+ "encoder.layer.{bid}.attention.output.dense", # bert
185
+ "transformer.h.{bid}.attn.out_proj", # gpt-j
186
+ "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
187
+ "model.layers.{bid}.self_attn.dense", # persimmon
188
+ "h.{bid}.attn.c_proj", # gpt2
189
+ "transformer.h.{bid}.mixer.out_proj", # phi2
190
+ "model.layers.layers.{bid}.self_attn.o_proj", # plamo
191
+ "model.layers.{bid}.attention.wo", # internlm2
192
+ "encoder.layers.{bid}.attn.out_proj", # nomic-bert
193
+ "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
194
+ "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
195
+ "encoder.layers.{bid}.self_attention.dense", # chatglm
196
+ "transformer.layers.{bid}.attn.out_proj", # openelm
197
+ "transformer.h.{bid}.attn.attention.out_proj", # exaone
198
+ ),
199
+
200
+ # Attention output norm
201
+ MODEL_TENSOR.ATTN_OUT_NORM: (
202
+ "encoder.layer.{bid}.attention.output.LayerNorm", # bert
203
+ "encoder.layers.{bid}.norm1", # nomic-bert
204
+ "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
205
+ "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
206
+ ),
207
+
208
+ MODEL_TENSOR.ATTN_POST_NORM: (
209
+ "model.layers.{bid}.post_attention_layernorm", # gemma2
210
+ ),
211
+
212
+ # Rotary embeddings
213
+ MODEL_TENSOR.ATTN_ROT_EMBD: (
214
+ "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
215
+ "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
216
+ "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
217
+ "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
218
+ ),
219
+
220
+ # Feed-forward norm
221
+ MODEL_TENSOR.FFN_NORM: (
222
+ "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
223
+ "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
224
+ "h.{bid}.post_attention_layernorm", # bloom
225
+ "transformer.blocks.{bid}.norm_2", # mpt
226
+ "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron
227
+ "layers.{bid}.ffn_norm", # llama-pth
228
+ "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
229
+ "model.layers.{bid}.ln2", # yi
230
+ "h.{bid}.ln_2", # gpt2
231
+ "model.layers.{bid}.ffn_norm", # internlm2
232
+ "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
233
+ "encoder.layers.{bid}.post_attention_layernorm", # chatglm
234
+ "transformer.layers.{bid}.ffn_norm", # openelm
235
+ ),
236
+
237
+ # Post feed-forward norm
238
+ MODEL_TENSOR.FFN_PRE_NORM: (
239
+ "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
240
+ ),
241
+
242
+ # Post feed-forward norm
243
+ MODEL_TENSOR.FFN_POST_NORM: (
244
+ "model.layers.{bid}.post_feedforward_layernorm", # gemma2
245
+ ),
246
+
247
+ MODEL_TENSOR.FFN_GATE_INP: (
248
+ "layers.{bid}.feed_forward.gate", # mixtral
249
+ "model.layers.{bid}.block_sparse_moe.gate", # mixtral
250
+ "model.layers.{bid}.mlp.gate", # qwen2moe
251
+ "transformer.decoder_layer.{bid}.router", # Grok
252
+ "transformer.blocks.{bid}.ffn.router.layer", # dbrx
253
+ ),
254
+
255
+ MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
256
+ "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
257
+ ),
258
+
259
+ # Feed-forward up
260
+ MODEL_TENSOR.FFN_UP: (
261
+ "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
262
+ "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
263
+ "transformer.blocks.{bid}.ffn.up_proj", # mpt
264
+ "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
265
+ "h.{bid}.mlp.dense_h_to_4h", # bloom
266
+ "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron
267
+ "layers.{bid}.feed_forward.w3", # llama-pth
268
+ "encoder.layer.{bid}.intermediate.dense", # bert
269
+ "transformer.h.{bid}.mlp.fc_in", # gpt-j
270
+ "transformer.h.{bid}.mlp.linear_3", # refact
271
+ "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
272
+ "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
273
+ "transformer.h.{bid}.mlp.w1", # qwen
274
+ "h.{bid}.mlp.c_fc", # gpt2
275
+ "transformer.h.{bid}.mlp.fc1", # phi2
276
+ "model.layers.{bid}.mlp.fc1", # phi2
277
+ "model.layers.{bid}.mlp.gate_up_proj", # phi3
278
+ "model.layers.layers.{bid}.mlp.up_proj", # plamo
279
+ "model.layers.{bid}.feed_forward.w3", # internlm2
280
+ "encoder.layers.{bid}.mlp.fc11", # nomic-bert
281
+ "model.layers.{bid}.mlp.c_fc", # starcoder2
282
+ "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
283
+ "model.layers.{bid}.residual_mlp.w3", # arctic
284
+ "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
285
+ "transformer.h.{bid}.mlp.c_fc_1", # exaone
286
+ ),
287
+
288
+ MODEL_TENSOR.FFN_UP_EXP: (
289
+ "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
290
+ "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
291
+ "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
292
+ "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged)
293
+ ),
294
+
295
+ MODEL_TENSOR.FFN_UP_SHEXP: (
296
+ "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
297
+ "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
298
+ ),
299
+
300
+ # AWQ-activation gate
301
+ MODEL_TENSOR.FFN_ACT: (
302
+ "transformer.blocks.{bid}.ffn.act", # mpt
303
+ ),
304
+
305
+ # Feed-forward gate
306
+ MODEL_TENSOR.FFN_GATE: (
307
+ "model.layers.{bid}.mlp.gate_proj", # llama-hf refact
308
+ "layers.{bid}.feed_forward.w1", # llama-pth
309
+ "transformer.h.{bid}.mlp.w2", # qwen
310
+ "transformer.h.{bid}.mlp.c_fc2", # jais
311
+ "model.layers.layers.{bid}.mlp.gate_proj", # plamo
312
+ "model.layers.{bid}.feed_forward.w1", # internlm2
313
+ "encoder.layers.{bid}.mlp.fc12", # nomic-bert
314
+ "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
315
+ "transformer.h.{bid}.mlp.linear_1", # refact
316
+ "model.layers.{bid}.residual_mlp.w1", # arctic
317
+ "transformer.h.{bid}.mlp.c_fc_0", # exaone
318
+ ),
319
+
320
+ MODEL_TENSOR.FFN_GATE_EXP: (
321
+ "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
322
+ "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
323
+ "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
324
+ "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged)
325
+ ),
326
+
327
+ MODEL_TENSOR.FFN_GATE_SHEXP: (
328
+ "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
329
+ "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
330
+ ),
331
+
332
+ # Feed-forward down
333
+ MODEL_TENSOR.FFN_DOWN: (
334
+ "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
335
+ "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
336
+ "transformer.blocks.{bid}.ffn.down_proj", # mpt
337
+ "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
338
+ "h.{bid}.mlp.dense_4h_to_h", # bloom
339
+ "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron
340
+ "layers.{bid}.feed_forward.w2", # llama-pth
341
+ "encoder.layer.{bid}.output.dense", # bert
342
+ "transformer.h.{bid}.mlp.fc_out", # gpt-j
343
+ "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
344
+ "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
345
+ "h.{bid}.mlp.c_proj", # gpt2
346
+ "transformer.h.{bid}.mlp.fc2", # phi2
347
+ "model.layers.{bid}.mlp.fc2", # phi2
348
+ "model.layers.layers.{bid}.mlp.down_proj", # plamo
349
+ "model.layers.{bid}.feed_forward.w2", # internlm2
350
+ "encoder.layers.{bid}.mlp.fc2", # nomic-bert
351
+ "model.layers.{bid}.mlp.c_proj", # starcoder2
352
+ "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
353
+ "transformer.layers.{bid}.ffn.proj_2", # openelm
354
+ "model.layers.{bid}.residual_mlp.w2", # arctic
355
+ "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
356
+ "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
357
+ "model.layers.h.{bid}.mlp.c_proj", # exaone
358
+ ),
359
+
360
+ MODEL_TENSOR.FFN_DOWN_EXP: (
361
+ "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
362
+ "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
363
+ "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
364
+ "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged)
365
+ ),
366
+
367
+ MODEL_TENSOR.FFN_DOWN_SHEXP: (
368
+ "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
369
+ "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
370
+ ),
371
+
372
+ MODEL_TENSOR.ATTN_Q_NORM: (
373
+ "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
374
+ "model.layers.{bid}.self_attn.q_layernorm", # persimmon
375
+ "model.layers.{bid}.self_attn.q_norm", # cohere
376
+ "transformer.blocks.{bid}.attn.q_ln", # sea-lion
377
+ "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
378
+ "transformer.layers.{bid}.attn.q_norm", # openelm
379
+ ),
380
+
381
+ MODEL_TENSOR.ATTN_K_NORM: (
382
+ "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
383
+ "model.layers.{bid}.self_attn.k_layernorm", # persimmon
384
+ "model.layers.{bid}.self_attn.k_norm", # cohere
385
+ "transformer.blocks.{bid}.attn.k_ln", # sea-lion
386
+ "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
387
+ "transformer.layers.{bid}.attn.k_norm", # openelm
388
+ ),
389
+
390
+ MODEL_TENSOR.ROPE_FREQS: (
391
+ "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
392
+ ),
393
+
394
+ MODEL_TENSOR.LAYER_OUT_NORM: (
395
+ "encoder.layer.{bid}.output.LayerNorm", # bert
396
+ "encoder.layers.{bid}.norm2", # nomic-bert
397
+ "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
398
+ "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
399
+ "encoder.layer.{bid}.layer_norm_2" # jina-v2-code
400
+ ),
401
+
402
+ MODEL_TENSOR.SSM_IN: (
403
+ "model.layers.{bid}.in_proj",
404
+ "backbone.layers.{bid}.mixer.in_proj",
405
+ ),
406
+
407
+ MODEL_TENSOR.SSM_CONV1D: (
408
+ "model.layers.{bid}.conv1d",
409
+ "backbone.layers.{bid}.mixer.conv1d",
410
+ ),
411
+
412
+ MODEL_TENSOR.SSM_X: (
413
+ "model.layers.{bid}.x_proj",
414
+ "backbone.layers.{bid}.mixer.x_proj",
415
+ ),
416
+
417
+ MODEL_TENSOR.SSM_DT: (
418
+ "model.layers.{bid}.dt_proj",
419
+ "backbone.layers.{bid}.mixer.dt_proj",
420
+ ),
421
+
422
+ MODEL_TENSOR.SSM_A: (
423
+ "model.layers.{bid}.A_log",
424
+ "backbone.layers.{bid}.mixer.A_log",
425
+ ),
426
+
427
+ MODEL_TENSOR.SSM_D: (
428
+ "model.layers.{bid}.D",
429
+ "backbone.layers.{bid}.mixer.D",
430
+ ),
431
+
432
+ MODEL_TENSOR.SSM_OUT: (
433
+ "model.layers.{bid}.out_proj",
434
+ "backbone.layers.{bid}.mixer.out_proj",
435
+ ),
436
+
437
+ MODEL_TENSOR.ATTN_Q_A: (
438
+ "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
439
+ ),
440
+
441
+ MODEL_TENSOR.ATTN_Q_B: (
442
+ "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
443
+ ),
444
+
445
+ MODEL_TENSOR.ATTN_KV_A_MQA: (
446
+ "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
447
+ ),
448
+
449
+ MODEL_TENSOR.ATTN_KV_B: (
450
+ "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
451
+ ),
452
+
453
+ MODEL_TENSOR.ATTN_Q_A_NORM: (
454
+ "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
455
+ ),
456
+
457
+ MODEL_TENSOR.ATTN_KV_A_NORM: (
458
+ "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
459
+ ),
460
+
461
+ MODEL_TENSOR.ATTN_SUB_NORM: (
462
+ "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
463
+ ),
464
+
465
+ MODEL_TENSOR.FFN_SUB_NORM: (
466
+ "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
467
+ ),
468
+
469
+ MODEL_TENSOR.DEC_ATTN_NORM: (
470
+ "decoder.block.{bid}.layer.0.layer_norm", # t5
471
+ ),
472
+
473
+ MODEL_TENSOR.DEC_ATTN_Q: (
474
+ "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
475
+ ),
476
+
477
+ MODEL_TENSOR.DEC_ATTN_K: (
478
+ "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
479
+ ),
480
+
481
+ MODEL_TENSOR.DEC_ATTN_V: (
482
+ "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
483
+ ),
484
+
485
+ MODEL_TENSOR.DEC_ATTN_OUT: (
486
+ "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
487
+ ),
488
+
489
+ MODEL_TENSOR.DEC_ATTN_REL_B: (
490
+ "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
491
+ ),
492
+
493
+ MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
494
+ "decoder.block.{bid}.layer.1.layer_norm", # t5
495
+ ),
496
+
497
+ MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
498
+ "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
499
+ ),
500
+
501
+ MODEL_TENSOR.DEC_CROSS_ATTN_K: (
502
+ "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
503
+ ),
504
+
505
+ MODEL_TENSOR.DEC_CROSS_ATTN_V: (
506
+ "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
507
+ ),
508
+
509
+ MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
510
+ "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
511
+ ),
512
+
513
+ MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
514
+ "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
515
+ ),
516
+
517
+ MODEL_TENSOR.DEC_FFN_NORM: (
518
+ "decoder.block.{bid}.layer.2.layer_norm", # t5
519
+ ),
520
+
521
+ MODEL_TENSOR.DEC_FFN_GATE: (
522
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
523
+ ),
524
+
525
+ MODEL_TENSOR.DEC_FFN_UP: (
526
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
527
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
528
+ ),
529
+
530
+ MODEL_TENSOR.DEC_FFN_DOWN: (
531
+ "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
532
+ ),
533
+
534
+ MODEL_TENSOR.DEC_OUTPUT_NORM: (
535
+ "decoder.final_layer_norm", # t5
536
+ ),
537
+
538
+ MODEL_TENSOR.ENC_ATTN_NORM: (
539
+ "encoder.block.{bid}.layer.0.layer_norm", # t5
540
+ ),
541
+
542
+ MODEL_TENSOR.ENC_ATTN_Q: (
543
+ "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
544
+ ),
545
+
546
+ MODEL_TENSOR.ENC_ATTN_K: (
547
+ "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
548
+ ),
549
+
550
+ MODEL_TENSOR.ENC_ATTN_V: (
551
+ "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
552
+ ),
553
+
554
+ MODEL_TENSOR.ENC_ATTN_OUT: (
555
+ "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
556
+ ),
557
+
558
+ MODEL_TENSOR.ENC_ATTN_REL_B: (
559
+ "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
560
+ ),
561
+
562
+ MODEL_TENSOR.ENC_FFN_NORM: (
563
+ "encoder.block.{bid}.layer.1.layer_norm", # t5
564
+ ),
565
+
566
+ MODEL_TENSOR.ENC_FFN_GATE: (
567
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
568
+ ),
569
+
570
+ MODEL_TENSOR.ENC_FFN_UP: (
571
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
572
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
573
+ ),
574
+
575
+ MODEL_TENSOR.ENC_FFN_DOWN: (
576
+ "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
577
+ ),
578
+
579
+ MODEL_TENSOR.ENC_OUTPUT_NORM: (
580
+ "encoder.final_layer_norm", # t5
581
+ ),
582
+ }
583
+
584
+ # architecture-specific block mappings
585
+ arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
586
+ MODEL_ARCH.ARCTIC: {
587
+ MODEL_TENSOR.FFN_NORM: (
588
+ "model.layers.{bid}.residual_layernorm",
589
+ ),
590
+ MODEL_TENSOR.FFN_NORM_EXP: (
591
+ "model.layers.{bid}.post_attention_layernorm",
592
+ ),
593
+ },
594
+ }
595
+
596
+ mapping: dict[str, tuple[MODEL_TENSOR, str]]
597
+
598
+ def __init__(self, arch: MODEL_ARCH, n_blocks: int):
599
+ self.mapping = {}
600
+ for tensor, keys in self.mappings_cfg.items():
601
+ if tensor not in MODEL_TENSORS[arch]:
602
+ continue
603
+ tensor_name = TENSOR_NAMES[tensor]
604
+ self.mapping[tensor_name] = (tensor, tensor_name)
605
+ for key in keys:
606
+ self.mapping[key] = (tensor, tensor_name)
607
+ if arch in self.arch_block_mappings_cfg:
608
+ self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
609
+ for bid in range(n_blocks):
610
+ for tensor, keys in self.block_mappings_cfg.items():
611
+ if tensor not in MODEL_TENSORS[arch]:
612
+ continue
613
+
614
+ tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
615
+ self.mapping[tensor_name] = (tensor, tensor_name)
616
+ for key in keys:
617
+ key = key.format(bid = bid)
618
+ self.mapping[key] = (tensor, tensor_name)
619
+
620
+ def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
621
+ result = self.mapping.get(key)
622
+ if result is not None:
623
+ return result
624
+ for suffix in try_suffixes:
625
+ if key.endswith(suffix):
626
+ result = self.mapping.get(key[:-len(suffix)])
627
+ if result is not None:
628
+ return result[0], result[1] + suffix
629
+ return None
630
+
631
+ def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
632
+ result = self.get_type_and_name(key, try_suffixes = try_suffixes)
633
+ if result is None:
634
+ return None
635
+ return result[1]
636
+
637
+ def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
638
+ result = self.get_type_and_name(key, try_suffixes = try_suffixes)
639
+ if result is None:
640
+ return None
641
+ return result[0]
642
+
643
+ def __getitem__(self, key: str) -> str:
644
+ try:
645
+ return self.mapping[key][1]
646
+ except KeyError:
647
+ raise KeyError(key)
648
+
649
+ def __contains__(self, key: str) -> bool:
650
+ return key in self.mapping
651
+
652
+ def __repr__(self) -> str:
653
+ return repr(self.mapping)
654
+
655
+
656
+ def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
657
+ return TensorNameMap(arch, n_blocks)
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/LICENSE ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2015 Vitaly Puzrin, Alex Kocharin.
2
+ Copyright (c) 2021 Taneli Hukkinen
3
+
4
+ Permission is hereby granted, free of charge, to any person
5
+ obtaining a copy of this software and associated documentation
6
+ files (the "Software"), to deal in the Software without
7
+ restriction, including without limitation the rights to use,
8
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the
10
+ Software is furnished to do so, subject to the following
11
+ conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23
+ OTHER DEALINGS IN THE SOFTWARE.
24
+
25
+ --------------------------------------------------------------------------------
26
+
27
+ .parse() is based on Joyent's node.js `url` code:
28
+
29
+ Copyright Joyent, Inc. and other Node contributors. All rights reserved.
30
+ Permission is hereby granted, free of charge, to any person obtaining a copy
31
+ of this software and associated documentation files (the "Software"), to
32
+ deal in the Software without restriction, including without limitation the
33
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
34
+ sell copies of the Software, and to permit persons to whom the Software is
35
+ furnished to do so, subject to the following conditions:
36
+
37
+ The above copyright notice and this permission notice shall be included in
38
+ all copies or substantial portions of the Software.
39
+
40
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
43
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
45
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
46
+ IN THE SOFTWARE.
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/METADATA ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: mdurl
3
+ Version: 0.1.2
4
+ Summary: Markdown URL utilities
5
+ Keywords: markdown,commonmark
6
+ Author-email: Taneli Hukkinen <hukkin@users.noreply.github.com>
7
+ Requires-Python: >=3.7
8
+ Description-Content-Type: text/markdown
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: MacOS
11
+ Classifier: Operating System :: Microsoft :: Windows
12
+ Classifier: Operating System :: POSIX :: Linux
13
+ Classifier: Programming Language :: Python :: 3 :: Only
14
+ Classifier: Programming Language :: Python :: 3.7
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: Implementation :: CPython
19
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Classifier: Typing :: Typed
22
+ Project-URL: Homepage, https://github.com/executablebooks/mdurl
23
+
24
+ # mdurl
25
+
26
+ [![Build Status](https://github.com/executablebooks/mdurl/workflows/Tests/badge.svg?branch=master)](https://github.com/executablebooks/mdurl/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush)
27
+ [![codecov.io](https://codecov.io/gh/executablebooks/mdurl/branch/master/graph/badge.svg)](https://codecov.io/gh/executablebooks/mdurl)
28
+ [![PyPI version](https://img.shields.io/pypi/v/mdurl)](https://pypi.org/project/mdurl)
29
+
30
+ This is a Python port of the JavaScript [mdurl](https://www.npmjs.com/package/mdurl) package.
31
+ See the [upstream README.md file](https://github.com/markdown-it/mdurl/blob/master/README.md) for API documentation.
32
+
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/RECORD ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mdurl-0.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ mdurl-0.1.2.dist-info/LICENSE,sha256=fGBd9uKGZ6lgMRjpgnT2SknOPu0NJvzM6VNKNF4O-VU,2338
3
+ mdurl-0.1.2.dist-info/METADATA,sha256=tTsp1I9Jk2cFP9o8gefOJ9JVg4Drv4PmYCOwLrfd0l0,1638
4
+ mdurl-0.1.2.dist-info/RECORD,,
5
+ mdurl-0.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ mdurl-0.1.2.dist-info/WHEEL,sha256=4TfKIB_xu-04bc2iKz6_zFt-gEFEEDU_31HGhqzOCE8,81
7
+ mdurl/__init__.py,sha256=1vpE89NyXniIRZNC_4f6BPm3Ub4bPntjfyyhLRR7opU,547
8
+ mdurl/__pycache__/__init__.cpython-310.pyc,,
9
+ mdurl/__pycache__/_decode.cpython-310.pyc,,
10
+ mdurl/__pycache__/_encode.cpython-310.pyc,,
11
+ mdurl/__pycache__/_format.cpython-310.pyc,,
12
+ mdurl/__pycache__/_parse.cpython-310.pyc,,
13
+ mdurl/__pycache__/_url.cpython-310.pyc,,
14
+ mdurl/_decode.py,sha256=3Q_gDQqU__TvDbu7x-b9LjbVl4QWy5g_qFwljcuvN_Y,3004
15
+ mdurl/_encode.py,sha256=goJLUFt1h4rVZNqqm9t15Nw2W-bFXYQEy3aR01ImWvs,2602
16
+ mdurl/_format.py,sha256=xZct0mdePXA0H3kAqxjGtlB5O86G35DAYMGkA44CmB4,626
17
+ mdurl/_parse.py,sha256=ezZSkM2_4NQ2Zx047sEdcJG7NYQRFHiZK7Y8INHFzwY,11374
18
+ mdurl/_url.py,sha256=5kQnRQN2A_G4svLnRzZcG0bfoD9AbBrYDXousDHZ3z0,284
19
+ mdurl/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/REQUESTED ADDED
File without changes
vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.7.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
vllm/lib/python3.10/site-packages/outlines/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Outlines is a Generative Model Programming Framework."""
2
+ import outlines.generate
3
+ import outlines.grammars
4
+ import outlines.models
5
+ import outlines.processors
6
+ import outlines.types
7
+ from outlines.base import vectorize
8
+ from outlines.caching import clear_cache, disable_cache, get_cache
9
+ from outlines.function import Function
10
+ from outlines.prompts import prompt
11
+
12
+ __all__ = [
13
+ "clear_cache",
14
+ "disable_cache",
15
+ "get_cache",
16
+ "Function",
17
+ "prompt",
18
+ "vectorize",
19
+ "grammars",
20
+ ]
vllm/lib/python3.10/site-packages/outlines/__pycache__/_version.cpython-310.pyc ADDED
Binary file (482 Bytes). View file
 
vllm/lib/python3.10/site-packages/outlines/__pycache__/base.cpython-310.pyc ADDED
Binary file (9.51 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/__pycache__/function.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/caching.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import contextlib
3
+ import functools
4
+ import os
5
+ from typing import Callable, Optional
6
+
7
+ import cloudpickle
8
+ from diskcache import Cache, Disk
9
+ from diskcache.core import ENOVAL, UNKNOWN, args_to_key, full_name
10
+
11
+ _caching_enabled = True
12
+
13
+
14
+ class CloudpickleDisk(Disk):
15
+ def __init__(self, directory, compress_level=1, **kwargs):
16
+ self.compress_level = compress_level
17
+ super().__init__(directory, **kwargs)
18
+
19
+ def put(self, key):
20
+ data = cloudpickle.dumps(key)
21
+ return super().put(data)
22
+
23
+ def get(self, key, raw):
24
+ data = super().get(key, raw)
25
+ return cloudpickle.loads(data)
26
+
27
+ def store(self, value, read, key=UNKNOWN):
28
+ if not read:
29
+ value = cloudpickle.dumps(value)
30
+ return super().store(value, read, key=key)
31
+
32
+ def fetch(self, mode, filename, value, read):
33
+ data = super().fetch(mode, filename, value, read)
34
+ if not read:
35
+ data = cloudpickle.loads(data)
36
+ return data
37
+
38
+
39
+ @functools.lru_cache(1)
40
+ def get_cache():
41
+ """Get the context object that contains previously-computed return values.
42
+
43
+ The cache is used to avoid unnecessary computations and API calls, which can
44
+ be long and expensive for large models.
45
+
46
+ The cache directory defaults to `HOMEDIR/.cache/outlines`, but this choice
47
+ can be overridden by the user by setting the value of the `OUTLINES_CACHE_DIR`
48
+ environment variable.
49
+
50
+ """
51
+ from outlines._version import __version__ as outlines_version # type: ignore
52
+
53
+ home_dir = os.path.expanduser("~")
54
+ cache_dir = os.environ.get("OUTLINES_CACHE_DIR", f"{home_dir}/.cache/outlines")
55
+ memory = Cache(
56
+ cache_dir,
57
+ eviction_policy="none",
58
+ cull_limit=0,
59
+ disk=CloudpickleDisk,
60
+ )
61
+
62
+ # ensure if version upgrade occurs, old cache is pruned
63
+ if outlines_version != memory.get("__version__"):
64
+ memory.clear()
65
+ memory["__version__"] = outlines_version
66
+
67
+ return memory
68
+
69
+
70
+ def cache(expire: Optional[float] = None, typed=False, ignore=()):
71
+ """Caching decorator for memoizing function calls.
72
+
73
+ The cache key is created based on the values returned by the key_function callable
74
+ if provided or based on the arguments of the decorated function directly otherwise
75
+
76
+ This is based on `diskcache`'s `memoize`.
77
+
78
+ Parameters
79
+ ----------
80
+ expire
81
+ Seconds until arguments expire.
82
+ typed
83
+ Cache different types separately.
84
+ ignore
85
+ Positional or keyword arguments to ignore.
86
+
87
+ Returns
88
+ -------
89
+ A decorator function that can be applied to other functions.
90
+ """
91
+
92
+ def decorator(cached_function: Callable):
93
+ memory = get_cache()
94
+
95
+ base = (full_name(cached_function),)
96
+
97
+ if asyncio.iscoroutinefunction(cached_function):
98
+
99
+ async def wrapper(*args, **kwargs):
100
+ if not _caching_enabled:
101
+ return await cached_function(*args, **kwargs)
102
+
103
+ cache_key = wrapper.__cache_key__(*args, **kwargs)
104
+ result = wrapper.__memory__.get(cache_key, default=ENOVAL, retry=True)
105
+
106
+ if result is ENOVAL:
107
+ result = await cached_function(*args, **kwargs)
108
+ wrapper.__memory__.set(cache_key, result, expire, retry=True)
109
+
110
+ return result
111
+
112
+ else:
113
+
114
+ def wrapper(*args, **kwargs):
115
+ if not _caching_enabled:
116
+ return cached_function(*args, **kwargs)
117
+
118
+ cache_key = wrapper.__cache_key__(*args, **kwargs)
119
+ result = wrapper.__memory__.get(cache_key, default=ENOVAL, retry=True)
120
+
121
+ if result is ENOVAL:
122
+ result = cached_function(*args, **kwargs)
123
+ wrapper.__memory__.set(cache_key, result, expire, retry=True)
124
+
125
+ return result
126
+
127
+ def __cache_key__(*args, **kwargs):
128
+ """Make key for cache given function arguments."""
129
+ return args_to_key(base, args, kwargs, typed, ignore)
130
+
131
+ wrapper.__cache_key__ = __cache_key__ # type: ignore
132
+ wrapper.__memory__ = memory # type: ignore
133
+ wrapper.__wrapped__ = cached_function # type: ignore
134
+
135
+ return wrapper
136
+
137
+ return decorator
138
+
139
+
140
+ def disable_cache():
141
+ """Disable the cache for this session.
142
+
143
+ Generative models output different results each time they are called when
144
+ sampling. This can be a desirable property for some workflows, in which case
145
+ one can call `outlines.call.disable` to disable the cache for the session.
146
+
147
+ This function does not delete the cache, call `outlines.cache.clear`
148
+ instead. It also does not overwrite the cache with the values returned
149
+ during the session.
150
+
151
+ Example
152
+ -------
153
+
154
+ `outlines.cache.disable` should be called right after importing outlines:
155
+
156
+ >>> import outlines.caching as cache
157
+ >>> cache.disable_cache()
158
+
159
+ """
160
+ global _caching_enabled
161
+ _caching_enabled = False
162
+
163
+
164
+ def clear_cache():
165
+ """Erase the cache completely."""
166
+ memory = get_cache()
167
+ memory.clear()
168
+
169
+
170
+ @contextlib.contextmanager
171
+ def cache_disabled():
172
+ # outlines.caching._caching_enabled
173
+ global _caching_enabled
174
+ original_state = _caching_enabled
175
+ _caching_enabled = False
176
+ try:
177
+ yield
178
+ finally:
179
+ _caching_enabled = original_state
vllm/lib/python3.10/site-packages/outlines/fsm/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/guide.cpython-310.pyc ADDED
Binary file (8.65 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/json_schema.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/parsing.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/types.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/fsm/guide.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import copy
3
+ import warnings
4
+ from typing import TYPE_CHECKING, Any, Generator, Union
5
+
6
+ import torch
7
+ from lark.indenter import DedentError
8
+ from lark.lexer import UnexpectedCharacters, UnexpectedToken
9
+ from outlines_core.fsm.guide import Generate
10
+ from outlines_core.fsm.guide import Guide as CoreGuide
11
+ from outlines_core.fsm.guide import RegexGuide as CoreRegexGuide
12
+ from outlines_core.fsm.guide import Write
13
+ from outlines_core.fsm.guide import (
14
+ create_states_mapping as uncached_create_states_mapping,
15
+ )
16
+
17
+ from outlines import grammars
18
+ from outlines.fsm.parsing import PartialLark, PartialParserState
19
+
20
+ if TYPE_CHECKING:
21
+ from outlines.models.tokenizer import Tokenizer
22
+
23
+
24
+ Instruction = Union[Write, Generate]
25
+
26
+
27
+ class Guide(CoreGuide):
28
+ """Base definition of a generation guide.
29
+
30
+ A generation guide defines the behavior of a finite-state machine that guides
31
+ a text generation procedure. Unlike the DFAs built from regular expressions
32
+ guides can also emit a `Write` instructions which tells the model that it can
33
+ append a sequence of tokens (or token word) instead of generating it.
34
+
35
+ """
36
+
37
+ initial_state: Any
38
+
39
+
40
+ class StopAtEOSGuide(Guide):
41
+ """Guide to generate tokens until the EOS token has been generated."""
42
+
43
+ final_state = 1
44
+ start_state = 0 # TODO: remove start_state, use only initial_state
45
+ initial_state = 0
46
+
47
+ def __init__(self, tokenizer: "Tokenizer"):
48
+ """Initialize the generation guide.
49
+
50
+ model
51
+ The logit generator used to generate the next token.
52
+
53
+ """
54
+ self.eos_token_id = tokenizer.eos_token_id
55
+ self.vocabulary = tokenizer.vocabulary.values()
56
+
57
+ def get_next_instruction(self, state: int) -> Instruction:
58
+ if self.is_final_state(state):
59
+ return Write([self.eos_token_id])
60
+ return Generate(None)
61
+
62
+ def get_next_state(self, state: int, token_id: int) -> int:
63
+ if token_id == self.eos_token_id or state == self.final_state:
64
+ return self.final_state
65
+
66
+ return self.initial_state
67
+
68
+ def is_final_state(self, state: int):
69
+ return state == self.final_state
70
+
71
+ def copy(self):
72
+ return self
73
+
74
+
75
+ def cached_create_states_mapping(regex_string, tokenizer, *args, **kwargs):
76
+ return uncached_create_states_mapping(regex_string, tokenizer, *args, **kwargs)
77
+
78
+
79
+ class RegexGuide(CoreRegexGuide):
80
+ """
81
+ Guide to generate text in the language of a regular expression.
82
+ CoreRegexGuide with outlines cache
83
+ """
84
+
85
+ @classmethod
86
+ def from_regex(
87
+ cls,
88
+ regex_string: str,
89
+ tokenizer,
90
+ **kwargs,
91
+ ):
92
+ return super().from_regex(
93
+ regex_string,
94
+ tokenizer,
95
+ _create_states_mapping=cached_create_states_mapping,
96
+ **kwargs,
97
+ )
98
+
99
+
100
+ CFGState = collections.namedtuple("CFGState", ["parser_state", "prev_token"])
101
+
102
+
103
+ class CFGGuide(Guide):
104
+ """Guide to generate text that is in the language of a context-free Lark grammar."""
105
+
106
+ def __init__(self, cfg_string: str, tokenizer):
107
+ """
108
+ Construct the PartialLark parser and set the empty initial_state (PartialParserState)
109
+ """
110
+ warnings.warn(
111
+ "Outlines' public *community-contributed* CFG structured generation is experimental. "
112
+ "Please review https://dottxt-ai.github.io/outlines/latest/reference/generation/cfg#disclaimer"
113
+ )
114
+
115
+ self.cfg_string = cfg_string
116
+ self.tokenizer = tokenizer
117
+ self.eos_token_id = self.tokenizer.eos_token_id
118
+ self.parser = PartialLark(
119
+ cfg_string,
120
+ parser="lalr",
121
+ import_paths=[grammars.GRAMMAR_PATH],
122
+ )
123
+ self.initial_state = CFGState(
124
+ parser_state=self.parser.parse(""), prev_token=None
125
+ )
126
+
127
+ def get_next_instruction(self, state: CFGState) -> Instruction:
128
+ """Return the next instruction for guided generation.
129
+
130
+ Current lazy approach:
131
+ - For each token in the vocabulary
132
+ - create a copy of the parsers state
133
+ - add the tokens to the parsers input text
134
+ - if valid, add token to returned tokens
135
+
136
+ Further refinements are necessary for performant text processing.
137
+
138
+ Parameters
139
+ ----------
140
+ state
141
+ The guides current PartialParserState, or None if complete
142
+
143
+ Returns
144
+ -------
145
+ A `Generate` instance that contains the model and the allowed token ids.
146
+
147
+ """
148
+
149
+ if state.parser_state is None:
150
+ return Write(torch.tensor([self.eos_token_id]))
151
+
152
+ valid_tokens = list(
153
+ self.iter_valid_token_ids(state, self.tokenizer.vocabulary.values())
154
+ )
155
+ if len(valid_tokens) == 1:
156
+ return Write(torch.tensor(valid_tokens))
157
+ return Generate(torch.tensor(valid_tokens))
158
+
159
+ def iter_valid_token_ids(
160
+ self, state: CFGState, candidate_token_ids: list
161
+ ) -> Generator[int, None, None]:
162
+ """
163
+ Iterate over the given token_ids and yield those that are valid for the current parser state.
164
+
165
+ Parameters
166
+ ----------
167
+ parser_state
168
+ The current state of the parser, or None if complete.
169
+ token_ids
170
+ The list of token ids to check for validity.
171
+
172
+ Yields
173
+ ------
174
+ int
175
+ Valid token ids.
176
+ """
177
+ if state.parser_state is None:
178
+ yield self.eos_token_id
179
+ return
180
+
181
+ for token_id in candidate_token_ids:
182
+ if token_id == self.eos_token_id:
183
+ if self.can_terminate_state(state):
184
+ yield token_id
185
+ else:
186
+ try:
187
+ self._get_parser_state_token_applied(state, int(token_id))
188
+ yield token_id
189
+ except (
190
+ ValueError,
191
+ EOFError,
192
+ UnexpectedToken,
193
+ UnexpectedCharacters,
194
+ DedentError,
195
+ ):
196
+ pass
197
+
198
+ def get_next_state(self, state: CFGState, token_id: int) -> CFGState:
199
+ """
200
+ Update the state of the guide.
201
+ Decode the token_id, and calculate the new parser_state with the token applied.
202
+
203
+ Parameters
204
+ ----------
205
+ state
206
+ The guides current PartialParserState, or None if complete
207
+ token_id
208
+ The id of the token that was just generated.
209
+
210
+ Returns
211
+ -------
212
+ The guides new PartialParserState
213
+
214
+ """
215
+ if state.parser_state is None or token_id == self.eos_token_id:
216
+ parser_state = None
217
+ else:
218
+ parser_state = self._get_parser_state_token_applied(state, int(token_id))
219
+ return CFGState(parser_state=parser_state, prev_token=token_id)
220
+
221
+ def _get_parser_state_token_applied(
222
+ self, state: CFGState, token_id: int
223
+ ) -> PartialParserState:
224
+ """
225
+ Don't mutate `parser_state`, copy to protect
226
+
227
+ Get the token string
228
+ - if first token in generation: tokenizer.decode (no leading whitespace)
229
+ - else: normalized (with possibly leading whitespace)
230
+
231
+ Don't allow empty ("") tokens, raise ValueError
232
+ """
233
+ parser_state = copy.copy(state.parser_state) # prevent side effects
234
+
235
+ # normalize
236
+ if state.prev_token is None:
237
+ new_token_str = self.tokenizer.decode([token_id])[0]
238
+ else:
239
+ prev_token_str = self.tokenizer.decode([[state.prev_token]])[0]
240
+ combined_token_str = self.tokenizer.decode([[state.prev_token, token_id]])[
241
+ 0
242
+ ]
243
+ new_token_str = combined_token_str[len(prev_token_str) :]
244
+
245
+ if new_token_str == "":
246
+ raise ValueError("empty next token")
247
+
248
+ # update parser with new token
249
+ parser_state.lexer.state.text += new_token_str
250
+ self.parser.parse_from_state(parser_state, is_end=False)
251
+
252
+ return parser_state
253
+
254
+ def is_final_state(self, state: CFGState) -> bool:
255
+ # TODO: remove this method, use can_terminate_state and must_terminate_state
256
+ # here and in RegexGuide per https://github.com/dottxt-ai/outlines/issues/885
257
+ return self.can_terminate_state(state)
258
+
259
+ def can_terminate_state(self, state: CFGState) -> bool:
260
+ """Generation is allowed to terminate"""
261
+ if state.parser_state is not None:
262
+ try:
263
+ copy.copy(state.parser_state).feed_eof()
264
+ except UnexpectedToken:
265
+ return False
266
+ return True
267
+
268
+ def must_terminate_state(self, state: CFGState) -> bool:
269
+ """Generation must terminate, no legal continuations"""
270
+ return state.parser_state is None or set(state.parser_state.accepts()).issubset(
271
+ {"$END"}
272
+ )
273
+
274
+ def copy(self) -> "CFGGuide":
275
+ """Create a copy of the Guide."""
276
+ return CFGGuide(self.cfg_string, self.tokenizer)
vllm/lib/python3.10/site-packages/outlines/fsm/json_schema.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import json
3
+ import warnings
4
+ from enum import Enum
5
+ from typing import Callable, Type, Union
6
+
7
+ from pydantic import BaseModel, create_model
8
+
9
+
10
+ def convert_json_schema_to_str(json_schema: Union[dict, str, Type[BaseModel]]) -> str:
11
+ """Convert a JSON schema to a string.
12
+
13
+ Parameters
14
+ ----------
15
+ json_schema
16
+ The JSON schema.
17
+
18
+ Returns
19
+ -------
20
+ str
21
+ The JSON schema converted to a string.
22
+
23
+ Raises
24
+ ------
25
+ ValueError
26
+ If the schema is not a dictionary, a string or a Pydantic class.
27
+ """
28
+ if isinstance(json_schema, dict):
29
+ schema_str = json.dumps(json_schema)
30
+ elif isinstance(json_schema, str):
31
+ schema_str = json_schema
32
+ elif issubclass(json_schema, BaseModel):
33
+ schema_str = json.dumps(json_schema.model_json_schema())
34
+ else:
35
+ raise ValueError(
36
+ f"Cannot parse schema {json_schema}. The schema must be either "
37
+ + "a Pydantic class, a dictionary or a string that contains the JSON "
38
+ + "schema specification"
39
+ )
40
+ return schema_str
41
+
42
+
43
+ def get_schema_from_signature(fn: Callable) -> dict:
44
+ """Turn a function signature into a JSON schema.
45
+
46
+ Every JSON object valid to the output JSON Schema can be passed
47
+ to `fn` using the ** unpacking syntax.
48
+
49
+ """
50
+ signature = inspect.signature(fn)
51
+ arguments = {}
52
+ for name, arg in signature.parameters.items():
53
+ if arg.annotation == inspect._empty:
54
+ raise ValueError("Each argument must have a type annotation")
55
+ else:
56
+ arguments[name] = (arg.annotation, ...)
57
+
58
+ try:
59
+ fn_name = fn.__name__
60
+ except Exception as e:
61
+ fn_name = "Arguments"
62
+ warnings.warn(
63
+ f"The function name could not be determined. Using default name 'Arguments' instead. For debugging, here is exact error:\n{e}",
64
+ category=UserWarning,
65
+ )
66
+ model = create_model(fn_name, **arguments)
67
+
68
+ return model.model_json_schema()
69
+
70
+
71
+ def get_schema_from_enum(myenum: type[Enum]) -> dict:
72
+ if len(myenum) == 0:
73
+ raise ValueError(
74
+ f"Your enum class {myenum.__name__} has 0 members. If you are working with an enum of functions, do not forget to register them as callable (using `partial` for instance)"
75
+ )
76
+ choices = [
77
+ get_schema_from_signature(elt.value.func)
78
+ if callable(elt.value)
79
+ else {"const": elt.value}
80
+ for elt in myenum
81
+ ]
82
+ schema = {"title": myenum.__name__, "oneOf": choices}
83
+ return schema
vllm/lib/python3.10/site-packages/outlines/fsm/parsing.py ADDED
@@ -0,0 +1,1127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import copy, deepcopy
2
+ from dataclasses import dataclass
3
+ from functools import lru_cache
4
+ from typing import (
5
+ Any,
6
+ Dict,
7
+ FrozenSet,
8
+ Generator,
9
+ Iterator,
10
+ List,
11
+ Optional,
12
+ Sequence,
13
+ Set,
14
+ Tuple,
15
+ Union,
16
+ )
17
+
18
+ import interegular
19
+ from interegular.fsm import FSM, Alphabet, OblivionError
20
+ from interegular.patterns import Unsupported
21
+ from lark import Lark, Token
22
+ from lark.common import LexerConf, ParserConf
23
+ from lark.exceptions import LexError, UnexpectedInput
24
+ from lark.indenter import Indenter
25
+ from lark.lexer import (
26
+ BasicLexer,
27
+ ContextualLexer,
28
+ LexerState,
29
+ LexerThread,
30
+ Scanner,
31
+ UnexpectedCharacters,
32
+ UnexpectedToken,
33
+ _create_unless,
34
+ )
35
+ from lark.parser_frontends import (
36
+ ParsingFrontend,
37
+ PostLexConnector,
38
+ _validate_frontend_args,
39
+ )
40
+ from lark.parsers.lalr_analysis import (
41
+ Action,
42
+ IntParseTable,
43
+ LALR_Analyzer,
44
+ ParseTable,
45
+ Shift,
46
+ )
47
+ from lark.parsers.lalr_interactive_parser import InteractiveParser
48
+ from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser
49
+ from outlines_core.fsm.regex import (
50
+ BetterFSM,
51
+ get_token_transition_keys,
52
+ make_deterministic_fsm,
53
+ )
54
+
55
+ PartialParseState = Tuple[str, int]
56
+ ParseStateType = Union[int, FrozenSet]
57
+
58
+
59
+ @dataclass
60
+ class PartialTerminalInfo:
61
+ priority: int
62
+ terminal_name: str
63
+ can_transition: bool
64
+ is_final: bool
65
+
66
+
67
+ @dataclass
68
+ class PartialTokensInfo:
69
+ fsm_state_seq: Tuple[int, ...]
70
+ is_not_finished: bool
71
+ terminals_and_info: Tuple[PartialTerminalInfo, ...]
72
+ final_terminals_and_info: Tuple[PartialTerminalInfo, ...]
73
+
74
+
75
+ class PartialParserConf(ParserConf):
76
+ __serialize_fields__ = (
77
+ "rules",
78
+ "start",
79
+ "parser_type",
80
+ "deterministic",
81
+ "use_value_stack",
82
+ )
83
+
84
+ def __init__(self, rules, callbacks, start, deterministic, use_value_stack):
85
+ super().__init__(rules, callbacks, start)
86
+ self.deterministic = deterministic
87
+ self.use_value_stack = use_value_stack
88
+
89
+
90
+ class PartialLark(Lark):
91
+ __serialize_fields__ = (
92
+ "parser",
93
+ "rules",
94
+ "options",
95
+ "deterministic",
96
+ "use_value_stack",
97
+ )
98
+
99
+ def __init__(self, grammar, **options):
100
+ # TODO: Could've extended `LarkOptions`, but all these extensions are
101
+ # already way too much (and brittle). This library really needs a
102
+ # complete refactoring.
103
+ self.deterministic = options.pop("deterministic", False)
104
+ self.use_value_stack = options.pop("use_value_stack", False)
105
+ options["regex"] = True
106
+ super().__init__(grammar, **options)
107
+ assert self.options.parser == "lalr"
108
+
109
+ def _build_lexer(self, dont_ignore: bool = False) -> "PartialBasicLexer":
110
+ lexer_conf = self.lexer_conf
111
+ if dont_ignore:
112
+ from copy import copy
113
+
114
+ lexer_conf = copy(lexer_conf)
115
+ lexer_conf.ignore = ()
116
+
117
+ return PartialBasicLexer(lexer_conf)
118
+
119
+ def _build_parser(self) -> "PartialParsingFrontend":
120
+ self._prepare_callbacks()
121
+ _validate_frontend_args(self.options.parser, self.options.lexer)
122
+ parser_conf = PartialParserConf(
123
+ self.rules,
124
+ self._callbacks,
125
+ self.options.start,
126
+ self.deterministic,
127
+ self.use_value_stack,
128
+ )
129
+
130
+ # This is `_construct_parsing_frontend` expanded/inlined
131
+ parser_type = self.options.parser
132
+ lexer_type = self.options.lexer
133
+ lexer_conf = self.lexer_conf
134
+
135
+ assert isinstance(lexer_conf, LexerConf)
136
+ assert isinstance(parser_conf, ParserConf)
137
+ parser_conf.parser_type = parser_type
138
+ self.lexer_conf.lexer_type = lexer_type
139
+ return PartialParsingFrontend(lexer_conf, parser_conf, self.options)
140
+
141
+ def __repr__(self):
142
+ return "{}(open({!r}), parser={!r}, lexer={!r}, ...)".format(
143
+ type(self).__name__,
144
+ self.source_path,
145
+ self.options.parser,
146
+ self.options.lexer,
147
+ )
148
+
149
+ def parse_from_state(self, parse_state: "PartialParseState", is_end=False):
150
+ return self.parser.parser.parser.parse_from_state(parse_state, is_end=is_end)
151
+
152
+
153
+ class PartialLexerThread(LexerThread):
154
+ def __copy__(self):
155
+ return type(self)(copy(self.lexer), copy(self.state))
156
+
157
+ def __repr__(self):
158
+ return f"{type(self).__name__}(lexer={self.lexer!r}, state={self.state!r})"
159
+
160
+
161
+ class PartialPostLexConnector(PostLexConnector):
162
+ def __copy__(self):
163
+ return type(self)(self.lexer, copy(self.postlexer))
164
+
165
+ def __repr__(self):
166
+ return (
167
+ f"{type(self).__name__}(lexer={self.lexer!r}, postlexer={self.postlexer!r})"
168
+ )
169
+
170
+
171
+ class PartialParsingFrontend(ParsingFrontend):
172
+ def __init__(self, lexer_conf, parser_conf, options, parser=None):
173
+ assert parser_conf.parser_type == "lalr"
174
+
175
+ options._plugins["LALR_Parser"] = PartialLALRParser
176
+ options._plugins["BasicLexer"] = PartialBasicLexer
177
+ options._plugins["ContextualLexer"] = PartialContextualLexer
178
+ options._plugins["LexerThread"] = PartialLexerThread
179
+
180
+ super().__init__(lexer_conf, parser_conf, options, parser=parser)
181
+
182
+ if lexer_conf.postlex:
183
+ self.lexer = PartialPostLexConnector(self.lexer.lexer, lexer_conf.postlex)
184
+
185
+ self._termset_fsm_info = None
186
+ self._symbols_to_states: Optional[
187
+ Dict[str, Set[Tuple[ParseStateType, Action]]]
188
+ ] = None
189
+ self._reverse_shifts: Optional[
190
+ Dict[ParseStateType, Dict[str, Set[ParseStateType]]]
191
+ ] = None
192
+ # self._state_transition_map: Optional[
193
+ # Dict[Tuple[ParseStateType, str], Set[ParseStateType]]
194
+ # ] = None
195
+
196
+ def _compute_maps(
197
+ self,
198
+ ):
199
+ """Compute state transition and symbols-to-states maps."""
200
+ self._reverse_shifts = {}
201
+ self._symbols_to_states = {}
202
+
203
+ parse_table = self.parser.parser.parse_table
204
+
205
+ for from_state, symbols_to_ops in parse_table.states.items():
206
+ for symbol, op in symbols_to_ops.items():
207
+ if op[0] == Shift:
208
+ symbols_to_from_states = self._reverse_shifts.setdefault(op[1], {})
209
+ symbols_to_from_states.setdefault(symbol, set()).add(from_state)
210
+ self._symbols_to_states.setdefault(symbol, set()).add((from_state, op))
211
+
212
+ # # TODO: This approach is very wasteful.
213
+ # context_lexer = get_contextual_lexer(self)
214
+ # self._state_transition_map = {}
215
+ #
216
+ # for from_state, transitions in parse_table.states.items():
217
+ # for symbol, action in transitions.items():
218
+ # # TODO: Filter non-terminals
219
+ # if symbol not in context_lexer.root_lexer.terminals_by_name:
220
+ # continue
221
+ #
222
+ # if action[0] is Shift:
223
+ # self._state_transition_map.setdefault(
224
+ # (from_state, symbol), set()
225
+ # ).add(action[1])
226
+ # continue
227
+ #
228
+ # antecedent_state_seqs = parse_to_terminal(self, [(from_state,)], symbol)
229
+ #
230
+ # for antecedent_state_seq in antecedent_state_seqs:
231
+ # antecedent_state = antecedent_state_seq[-1]
232
+ # self._state_transition_map.setdefault(
233
+ # (from_state, symbol), set()
234
+ # ).add(antecedent_state)
235
+
236
+ def _compute_termset_fsm_info(self):
237
+ """Collect and return information about terminal symbol sets and their FSMs.
238
+
239
+ Terminal symbol sets (or "termsets") are ordered sequences of terminal
240
+ symbols that are used by each parser state. Associated with each is a
241
+ collection of FSMs for each terminal and a single parse state FSM that is
242
+ the union of each terminal's FSM.
243
+
244
+ This constructs a list of tuples containing the termset, the set of
245
+ parse states that use the termsets, parse state FSMs, and information
246
+ mapping the components of the parse state FSMs to their terminal symbol
247
+ FSMs.
248
+
249
+ """
250
+ context_lexer = get_contextual_lexer(self)
251
+ termsets_to_fsms = {}
252
+ termsets_to_parse_states: Dict[Tuple[str, ...], Set[ParseStateType]] = {}
253
+ for parse_state, lexer in context_lexer.lexers.items():
254
+ scanner = lexer.scanner
255
+ key = tuple(term.name for term in scanner.terminals)
256
+ termsets_to_fsms[key] = (scanner.fsm, scanner.fsms_to_trans_finals)
257
+ termsets_to_parse_states.setdefault(key, set()).add(parse_state)
258
+
259
+ self._termset_fsm_info = [
260
+ (
261
+ termset,
262
+ frozenset(termsets_to_parse_states[termset]),
263
+ fsm,
264
+ fsms_to_trans_finals,
265
+ )
266
+ for termset, (fsm, fsms_to_trans_finals) in termsets_to_fsms.items()
267
+ ]
268
+
269
+ @property
270
+ def termset_fsm_info(self):
271
+ if self._termset_fsm_info is None:
272
+ self._compute_termset_fsm_info()
273
+ return self._termset_fsm_info
274
+
275
+ @property
276
+ def symbols_to_states(self):
277
+ if self._symbols_to_states is None:
278
+ self._compute_maps()
279
+ return self._symbols_to_states
280
+
281
+ @property
282
+ def reverse_shifts(self):
283
+ if self._reverse_shifts is None:
284
+ self._compute_maps()
285
+ return self._reverse_shifts
286
+
287
+ # @property
288
+ # def state_transition_map(self):
289
+ # if self._state_transition_map is None:
290
+ # self._compute_maps()
291
+ # return self._state_transition_map
292
+
293
+
294
+ class PartialLALRParser(LALR_Parser):
295
+ def __init__(self, parser_conf, debug=False, strict=False):
296
+ analysis = LALR_Analyzer(
297
+ parser_conf, debug=debug if not parser_conf.deterministic else True
298
+ )
299
+ analysis.compute_lalr()
300
+ callbacks = parser_conf.callbacks
301
+
302
+ self.parser_conf = parser_conf
303
+ self._parse_table = analysis.parse_table
304
+
305
+ if parser_conf.deterministic:
306
+ old_to_new = {}
307
+
308
+ def to_tuple(v):
309
+ new = old_to_new.get(v)
310
+ if new is None:
311
+ new = tuple(sorted(v, key=lambda y: str(y)))
312
+ old_to_new[v] = new
313
+ return new
314
+
315
+ enum = sorted(
316
+ self._parse_table.states.keys(),
317
+ key=lambda x: str(sorted(x, key=lambda y: str(y))),
318
+ )
319
+
320
+ new_states = {}
321
+ for s in enum:
322
+ transitions = {
323
+ term: op if op[0] is not Shift else (op[0], to_tuple(op[1]))
324
+ for term, op in self._parse_table.states[s].items()
325
+ }
326
+ new_states[to_tuple(s)] = transitions
327
+
328
+ self._parse_table = type(self._parse_table)(
329
+ new_states,
330
+ {k: to_tuple(v) for k, v in self._parse_table.start_states.items()},
331
+ {k: to_tuple(v) for k, v in self._parse_table.end_states.items()},
332
+ )
333
+
334
+ if not debug:
335
+ self._parse_table = IntParseTable.from_ParseTable(self._parse_table)
336
+ self.states_to_rulesets = dict(
337
+ zip(self._parse_table.states.keys(), new_states.keys())
338
+ )
339
+
340
+ self.parser = PartialParser(
341
+ self._parse_table,
342
+ callbacks,
343
+ debug,
344
+ use_value_stack=parser_conf.use_value_stack,
345
+ )
346
+
347
+ @classmethod
348
+ def deserialize(cls, data, memo, callbacks, debug=False):
349
+ inst = cls.__new__(cls)
350
+ inst._parse_table = ParseTable.deserialize(data, memo)
351
+ inst.parser = PartialParser(inst._parse_table, callbacks, debug)
352
+ return inst
353
+
354
+
355
+ class PartialParserState(ParserState):
356
+ __slots__ = "use_value_stack"
357
+
358
+ def __init__(
359
+ self,
360
+ parse_conf,
361
+ lexer,
362
+ state_stack=None,
363
+ value_stack=None,
364
+ use_value_stack=False,
365
+ ):
366
+ super().__init__(
367
+ parse_conf, lexer, state_stack=state_stack, value_stack=value_stack
368
+ )
369
+ self.use_value_stack = use_value_stack
370
+
371
+ def feed_token(self, token, is_end=False):
372
+ if token.type == "partial":
373
+ # If none of the potential terminals can transition, we need to know now
374
+ current_state = self.state_stack[-1]
375
+ current_lexer = get_contextual_lexer(self.lexer).lexers[current_state]
376
+
377
+ # We have to feed the token and determine whether or not at least
378
+ # one terminal is consistent with the stack; otherwise, we'll miss
379
+ # invalid REDUCE cases.
380
+ # TODO: We should track separate parses conditional on possible
381
+ # token/symbol types, then we can coherently reuse the following
382
+ # results instead of recomputing it later.
383
+ can_transition = False
384
+ for terminal_info in token.value.terminals_and_info:
385
+ if terminal_info.terminal_name not in current_lexer.ignore_types:
386
+ test_token = Token.new_borrow_pos(
387
+ terminal_info.terminal_name, "", token
388
+ )
389
+
390
+ stack = copy(self.state_stack)
391
+ try:
392
+ self.feed_token_no_stack(test_token, is_end=is_end)
393
+ can_transition = True
394
+ break
395
+ except UnexpectedToken:
396
+ continue
397
+ finally:
398
+ self.state_stack = stack
399
+ else:
400
+ can_transition = True
401
+
402
+ if not can_transition:
403
+ expected = {
404
+ s
405
+ for s in self.parse_conf.states[current_state].keys()
406
+ if s.isupper()
407
+ }
408
+ raise UnexpectedToken(
409
+ token, expected, state=self, interactive_parser=None
410
+ )
411
+
412
+ elif self.use_value_stack:
413
+ super().feed_token(token, is_end=is_end)
414
+ else:
415
+ self.feed_token_no_stack(token, is_end=is_end)
416
+
417
+ def feed_token_no_stack(self, token, is_end=False):
418
+ """
419
+ This is a copy of `ParserState.feed_token` with all the value stack
420
+ steps removed. Since we're not exactly parsing in order to obtain a
421
+ CST or anything similar, we can avoid the growing expense of tracking
422
+ the parse tree.
423
+ """
424
+ state_stack = self.state_stack
425
+ states = self.parse_conf.states
426
+ end_state = self.parse_conf.end_state
427
+
428
+ while True:
429
+ state = state_stack[-1]
430
+ try:
431
+ action, arg = states[state][token.type]
432
+ except KeyError:
433
+ expected = {s for s in states[state].keys() if s.isupper()}
434
+ raise UnexpectedToken(
435
+ token, expected, state=self, interactive_parser=None
436
+ )
437
+
438
+ assert arg != end_state
439
+
440
+ if action is Shift:
441
+ # shift once and return
442
+ assert not is_end
443
+ state_stack.append(arg)
444
+ return
445
+ else:
446
+ # reduce+shift as many times as necessary
447
+ rule = arg
448
+ size = len(rule.expansion)
449
+ if size:
450
+ del state_stack[-size:]
451
+
452
+ _action, new_state = states[state_stack[-1]][rule.origin.name]
453
+ assert _action is Shift
454
+ state_stack.append(new_state)
455
+
456
+ if is_end and state_stack[-1] == end_state:
457
+ return
458
+
459
+ def feed_eof(self):
460
+ last_token = self.lexer.state.last_token
461
+
462
+ if last_token is None:
463
+ eof_token = self.lexer._Token("$END", "", 0, 1, 1)
464
+ else:
465
+ eof_token = Token.new_borrow_pos("$END", "", last_token)
466
+
467
+ new_token_is_legal = (
468
+ last_token is None
469
+ or last_token.type != "partial"
470
+ or any(ti.is_final for ti in last_token.value.terminals_and_info)
471
+ )
472
+ if new_token_is_legal:
473
+ self.feed_token(eof_token, is_end=True)
474
+ else:
475
+ raise UnexpectedToken(eof_token, [], state=self, interactive_parser=None)
476
+
477
+ def choices(self):
478
+ return self.parse_conf.parse_table.states[self.position]
479
+
480
+ def accepts(self):
481
+ """
482
+ Adapted from https://github.com/lark-parser/lark/blob/be542c2ff6d968817df019b8bf03f37b3111c08c/lark/parsers/lalr_interactive_parser.py#L95
483
+ Returns the set of possible tokens that will advance the parser into a new valid state.
484
+ """
485
+ accepts = set()
486
+ conf_no_callbacks = copy(self.parse_conf)
487
+ # We don't want to call callbacks here since those might have arbitrary side effects
488
+ # and are unnecessarily slow.
489
+ conf_no_callbacks.callbacks = {}
490
+ for t in self.choices():
491
+ if t.isupper(): # is terminal?
492
+ new_state = copy(self)
493
+ new_state.parse_conf = conf_no_callbacks
494
+ try:
495
+ new_state.feed_token(new_state.lexer._Token(t, ""))
496
+ except UnexpectedToken:
497
+ pass
498
+ else:
499
+ accepts.add(t)
500
+ return accepts
501
+
502
+ def __copy__(self):
503
+ return type(self)(
504
+ self.parse_conf,
505
+ copy(self.lexer),
506
+ copy(self.state_stack),
507
+ deepcopy(self.value_stack),
508
+ use_value_stack=self.use_value_stack,
509
+ )
510
+
511
+ def __repr__(self):
512
+ return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})"
513
+
514
+
515
+ class PartialParser(_Parser):
516
+ def __init__(self, parse_table, callbacks, debug=False, use_value_stack=False):
517
+ super().__init__(parse_table, callbacks, debug=debug)
518
+ self.use_value_stack = use_value_stack
519
+
520
+ def parse(
521
+ self, lexer, start, value_stack=None, state_stack=None, start_interactive=False
522
+ ):
523
+ parse_conf = ParseConf(self.parse_table, self.callbacks, start)
524
+ parser_state = PartialParserState(
525
+ parse_conf, copy(lexer), state_stack, value_stack, self.use_value_stack
526
+ )
527
+ if start_interactive:
528
+ return InteractiveParser(self, parser_state, parser_state.lexer)
529
+ return self.parse_from_state(parser_state)
530
+
531
+ def parse_from_state(self, state, last_token=None, is_end=False):
532
+ try:
533
+ token = last_token
534
+ for token in state.lexer.lex(state):
535
+ state.feed_token(token)
536
+
537
+ if is_end and (not token or token.type != "partial"):
538
+ state.feed_eof()
539
+
540
+ return state
541
+ except UnexpectedInput as e:
542
+ try:
543
+ e.interactive_parser = InteractiveParser(self, state, state.lexer)
544
+ except NameError:
545
+ pass
546
+ raise e
547
+ except Exception:
548
+ if self.debug:
549
+ print("")
550
+ print("STATE STACK DUMP")
551
+ print("----------------")
552
+ for i, s in enumerate(state.state_stack):
553
+ print("%d)" % i, s)
554
+ print("")
555
+
556
+ raise
557
+
558
+
559
+ class PartialScanner(Scanner):
560
+ @classmethod
561
+ @lru_cache
562
+ def construct_terminal_fsm(cls, terminal):
563
+ # TODO: This should really be done at the lexer/parser level so that
564
+ # the lifetime of these objects is tied to the parser itself.
565
+ regex_str = terminal.pattern.to_regexp()
566
+ pattern = interegular.parse_pattern(regex_str)
567
+ fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce())
568
+ return fsm, pattern.prefix_postfix
569
+
570
+ def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False):
571
+ self.terminals = terminals
572
+ self.g_regex_flags = g_regex_flags
573
+ self.use_bytes = use_bytes
574
+ self.match_whole = match_whole
575
+ self.allowed_types = {t.name for t in self.terminals}
576
+ self._mres = None
577
+
578
+ fsms = []
579
+ for t in self.terminals:
580
+ fsm, prefix_postfix = self.construct_terminal_fsm(t)
581
+
582
+ # TODO FIXME: We don't support this right now.
583
+ assert prefix_postfix == (0, 0)
584
+
585
+ fsms.append(fsm)
586
+
587
+ self.fsm, self.fsms_to_trans_finals = fsm_union(fsms)
588
+
589
+ def get_terminals_info(
590
+ self, fsm_state_seq
591
+ ) -> Tuple[Tuple[PartialTerminalInfo, ...], Tuple[PartialTerminalInfo, ...]]:
592
+ """Get the possible terminal symbols for an FSM state sequence."""
593
+ terminals_and_info: Tuple[PartialTerminalInfo, ...] = ()
594
+ final_terminals_and_info: Tuple[PartialTerminalInfo, ...] = ()
595
+ for i, (fsm_id, fsm_reads_more, in_final) in enumerate(
596
+ get_sub_fsms_from_seq(fsm_state_seq, self.fsms_to_trans_finals)
597
+ ):
598
+ terminal_name = self.terminals[fsm_id].name
599
+ info = PartialTerminalInfo(i, terminal_name, fsm_reads_more, in_final)
600
+ terminals_and_info += (info,)
601
+ if in_final:
602
+ final_terminals_and_info += (info,)
603
+
604
+ return terminals_and_info, final_terminals_and_info
605
+
606
+ def match(self, text, pos, last_fsm_state_seq: Optional[Tuple[int, ...]] = None):
607
+ """Determine an FSM match over `text` starting at `pos` and continuing `last_fsm_state_seq`."""
608
+
609
+ start_pos = pos
610
+
611
+ if last_fsm_state_seq:
612
+ assert len(last_fsm_state_seq) > 1
613
+ start_pos += len(last_fsm_state_seq) - 1
614
+ start_state = last_fsm_state_seq[-1]
615
+ else:
616
+ start_state = self.fsm.initial
617
+
618
+ text_part = text[start_pos:]
619
+
620
+ text_transitions = get_token_transition_keys(
621
+ self.fsm.fsm_info.alphabet_symbol_mapping,
622
+ self.fsm.fsm_info.alphabet_anything_value,
623
+ text_part,
624
+ )
625
+
626
+ state_seq = walk_fsm(
627
+ self.fsm,
628
+ text_transitions,
629
+ start_state,
630
+ full_match=self.match_whole,
631
+ )
632
+
633
+ if not state_seq:
634
+ return None
635
+
636
+ if last_fsm_state_seq:
637
+ res = last_fsm_state_seq + tuple(state_seq)
638
+ else:
639
+ res = (start_state,) + tuple(state_seq)
640
+
641
+ return res
642
+
643
+
644
+ class PartialContextualLexer(ContextualLexer):
645
+ def __init__(self, conf: "LexerConf", states, always_accept=()):
646
+ terminals = list(conf.terminals)
647
+ terminals_by_name = conf.terminals_by_name
648
+
649
+ trad_conf = copy(conf)
650
+ trad_conf.terminals = terminals
651
+
652
+ lexer_by_symbols: Dict = {}
653
+ self.lexers = {}
654
+ for state, accepts in states.items():
655
+ key = frozenset(accepts)
656
+ try:
657
+ lexer = lexer_by_symbols[key]
658
+ except KeyError:
659
+ accepts = set(accepts) | set(conf.ignore) | set(always_accept)
660
+ lexer_conf = copy(trad_conf)
661
+ lexer_conf.terminals = [
662
+ terminals_by_name[n] for n in accepts if n in terminals_by_name
663
+ ]
664
+ if not lexer_conf.terminals:
665
+ continue
666
+ lexer = PartialBasicLexer(lexer_conf)
667
+ lexer_by_symbols[key] = lexer
668
+
669
+ self.lexers[state] = lexer
670
+
671
+ assert trad_conf.terminals is terminals
672
+ self.root_lexer = PartialBasicLexer(trad_conf)
673
+
674
+ def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
675
+ try:
676
+ while True:
677
+ lexer = self.lexers[parser_state.position]
678
+ next_tok = lexer.next_token(lexer_state, parser_state)
679
+ yield next_tok
680
+ except EOFError:
681
+ pass
682
+ except KeyError:
683
+ if len(lexer_state.text) > lexer_state.line_ctr.char_pos:
684
+ raise UnexpectedCharacters(
685
+ lexer_state.text,
686
+ lexer_state.line_ctr.char_pos,
687
+ lexer_state.line_ctr.line,
688
+ lexer_state.line_ctr.column,
689
+ allowed=False,
690
+ token_history=lexer_state.last_token and [lexer_state.last_token],
691
+ state=parser_state,
692
+ terminals_by_name=self.root_lexer.terminals,
693
+ )
694
+
695
+
696
+ class PartialBasicLexer(BasicLexer):
697
+ def __init__(self, conf: "LexerConf"):
698
+ super().__init__(conf)
699
+ # Eagerly construct the scanner
700
+ self._build_scanner()
701
+
702
+ def _build_scanner(self):
703
+ # This seems incredibly convoluted: `lark` creates callback-triggered
704
+ # nested scanners for regex-defined terminals that overlap with
705
+ # string-defined terminals when both types of terminals have the same
706
+ # priority. Unless I'm missing something important, why not simply
707
+ # reorder the terminals so that the string-defined ones come before the
708
+ # regex-defined ones?
709
+ terminals, self.callback = _create_unless(
710
+ self.terminals, self.g_regex_flags, self.re, self.use_bytes
711
+ )
712
+
713
+ # We can't let people arbitrarily mess with the scanning process.
714
+ assert not self.user_callbacks
715
+ # for type_, f in self.user_callbacks.items():
716
+ # if type_ in self.callback:
717
+ # # Already a callback there, probably UnlessCallback
718
+ # self.callback[type_] = CallChain(
719
+ # self.callback[type_], f, lambda t: t.type == type_
720
+ # )
721
+ # else:
722
+ # self.callback[type_] = f
723
+
724
+ # We used the "callback" results to reorder the terminals (see the
725
+ # comments above).
726
+ for terminal_name, callback in self.callback.items():
727
+ terminal = self.terminals_by_name[terminal_name]
728
+ for sub_terminal in callback.scanner.terminals:
729
+ self.terminals.remove(sub_terminal)
730
+ idx = self.terminals.index(terminal)
731
+ self.terminals.insert(idx, sub_terminal)
732
+
733
+ self._scanner = PartialScanner(
734
+ self.terminals, self.g_regex_flags, self.re, self.use_bytes
735
+ )
736
+
737
+ def match(self, text, pos, last_fsm_state_seq=None):
738
+ return self.scanner.match(text, pos, last_fsm_state_seq)
739
+
740
+ def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:
741
+ last_token = lex_state.last_token
742
+
743
+ last_fsm_state_seq = None
744
+ if last_token and last_token.type == "partial":
745
+ # Continue from last partial lexer state
746
+ last_fsm_state_seq = last_token.value.fsm_state_seq
747
+
748
+ line_ctr = lex_state.line_ctr
749
+ end_pos = line_ctr.char_pos + (
750
+ len(last_fsm_state_seq) - 1 if last_fsm_state_seq else 0
751
+ )
752
+ while end_pos < len(lex_state.text):
753
+ res = self.match(lex_state.text, line_ctr.char_pos, last_fsm_state_seq)
754
+
755
+ if not res:
756
+ if (
757
+ not last_fsm_state_seq
758
+ or last_fsm_state_seq[-1] not in self.scanner.fsm.finals
759
+ ):
760
+ allowed = self.scanner.allowed_types - self.ignore_types
761
+ if not allowed:
762
+ allowed = {"<END-OF-FILE>"}
763
+ raise UnexpectedCharacters(
764
+ lex_state.text,
765
+ line_ctr.char_pos,
766
+ line_ctr.line,
767
+ line_ctr.column,
768
+ allowed=allowed,
769
+ token_history=lex_state.last_token and [lex_state.last_token],
770
+ state=parser_state,
771
+ terminals_by_name=self.terminals_by_name,
772
+ )
773
+
774
+ # The partial match might be complete now
775
+ fsm_state_seq = last_token.value.fsm_state_seq
776
+ terminals_and_info = last_token.value.terminals_and_info
777
+ final_terminals_and_info = last_token.value.final_terminals_and_info
778
+ else:
779
+ fsm_state_seq = res
780
+ (
781
+ terminals_and_info,
782
+ final_terminals_and_info,
783
+ ) = self.scanner.get_terminals_info(fsm_state_seq)
784
+
785
+ priority_terminal_info = (
786
+ final_terminals_and_info[0]
787
+ if final_terminals_and_info
788
+ else terminals_and_info[0]
789
+ )
790
+
791
+ is_not_finished = (
792
+ not priority_terminal_info.is_final
793
+ or priority_terminal_info.can_transition
794
+ or len(terminals_and_info) > 1
795
+ )
796
+
797
+ start_pos = line_ctr.char_pos
798
+ end_pos = start_pos + len(fsm_state_seq) - 1
799
+
800
+ if end_pos >= len(lex_state.text) and is_not_finished:
801
+ type_name = "partial"
802
+ token_value = PartialTokensInfo(
803
+ fsm_state_seq,
804
+ is_not_finished,
805
+ terminals_and_info,
806
+ final_terminals_and_info,
807
+ )
808
+ # Don't update the line counter states until we've finished
809
+ value = ""
810
+ else:
811
+ type_name = priority_terminal_info.terminal_name
812
+ # The token value should contain all partial scan parts in this
813
+ # case
814
+ value = token_value = lex_state.text[start_pos:end_pos]
815
+
816
+ assert isinstance(self.callback, Dict)
817
+
818
+ if type_name not in self.ignore_types:
819
+ t = Token(
820
+ type_name,
821
+ token_value,
822
+ line_ctr.char_pos,
823
+ line_ctr.line,
824
+ line_ctr.column,
825
+ )
826
+
827
+ line_ctr.feed(value, type_name in self.newline_types)
828
+
829
+ t.end_line = line_ctr.line
830
+ t.end_column = line_ctr.column
831
+ t.end_pos = line_ctr.char_pos
832
+ if t.type in self.callback:
833
+ t = self.callback[t.type](t)
834
+ if not isinstance(t, Token):
835
+ raise LexError(
836
+ "Callbacks must return a token (returned %r)" % t
837
+ )
838
+ lex_state.last_token = t
839
+ return t
840
+
841
+ if type_name in self.callback:
842
+ t2 = Token(
843
+ type_name, value, line_ctr.char_pos, line_ctr.line, line_ctr.column
844
+ )
845
+ self.callback[type_name](t2)
846
+
847
+ line_ctr.feed(value, type_name in self.newline_types)
848
+
849
+ last_fsm_state_seq = None
850
+
851
+ raise EOFError(self)
852
+
853
+
854
+ class PartialIndenter(Indenter):
855
+ """An `Indenter` that doesn't reset its state every time `process` is called."""
856
+
857
+ def process(self, stream):
858
+ return self._process(stream)
859
+
860
+ def _process(self, stream):
861
+ for token in stream:
862
+ # These were previously *after* the `yield`, but that makes the
863
+ # state tracking unnecessarily convoluted.
864
+ if token.type in self.OPEN_PAREN_types:
865
+ self.paren_level += 1
866
+ elif token.type in self.CLOSE_PAREN_types:
867
+ self.paren_level -= 1
868
+ if self.paren_level < 0:
869
+ raise UnexpectedToken(token, [])
870
+
871
+ if token.type == self.NL_type:
872
+ yield from self.handle_NL(token)
873
+ else:
874
+ yield token
875
+
876
+ # TODO: What do we want to do here?
877
+ # while len(self.indent_level) > 1:
878
+ # self.indent_level.pop()
879
+ # yield Token(self.DEDENT_type, "")
880
+
881
+ def accepts_token_type(self, token_type):
882
+ if token_type in self.CLOSE_PAREN_types and self.paren_level - 1 < 0:
883
+ return False
884
+
885
+ # TODO:
886
+ # if token_type == self.NL_type and self.paren_level == 0:
887
+ # ...
888
+ # return False
889
+
890
+ return True
891
+
892
+ def __copy__(self):
893
+ res = type(self)()
894
+ res.paren_level = self.paren_level
895
+ res.indent_level = copy(self.indent_level)
896
+ return res
897
+
898
+ def __repr__(self):
899
+ return f"{type(self).__name__}(paren_level={self.paren_level!r}, indent_level={self.indent_level!r})"
900
+
901
+
902
+ class PartialPythonIndenter(PartialIndenter):
903
+ NL_type = "_NEWLINE"
904
+ OPEN_PAREN_types = ["LPAR", "LSQB", "LBRACE"]
905
+ CLOSE_PAREN_types = ["RPAR", "RSQB", "RBRACE"]
906
+ INDENT_type = "_INDENT"
907
+ DEDENT_type = "_DEDENT"
908
+ tab_len = 8
909
+
910
+
911
+ def get_contextual_lexer(x: Union[PartialLexerThread, PartialParsingFrontend]):
912
+ if isinstance(x.lexer, ContextualLexer):
913
+ return x.lexer
914
+ else:
915
+ return x.lexer.lexer
916
+
917
+
918
+ def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]:
919
+ """Construct a ``dict`` mapping terminal symbol names to their finite state machines."""
920
+
921
+ symbol_names_and_fsms = {}
922
+ for terminal in lp.terminals:
923
+ pattern = interegular.parse_pattern(terminal.pattern.to_regexp())
924
+ # TODO: Use `pyparser.terminals[0].pattern.flags`?
925
+ try:
926
+ fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce())
927
+ except Unsupported:
928
+ fsm = None
929
+
930
+ symbol_names_and_fsms[terminal.name] = fsm
931
+
932
+ return symbol_names_and_fsms
933
+
934
+
935
+ def fsm_union(
936
+ fsms: Sequence[FSM],
937
+ ) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]:
938
+ """Construct an FSM representing the union of the FSMs in `fsms`.
939
+
940
+ This is an updated version of `interegular.fsm.FSM.union` made to return an
941
+ extra map of component FSMs to the sets of state transitions that
942
+ correspond to them in the new FSM.
943
+
944
+ """
945
+
946
+ alphabet, new_to_old = Alphabet.union(*[fsm.alphabet for fsm in fsms])
947
+
948
+ indexed_fsms = tuple(enumerate(fsms))
949
+
950
+ initial = {i: fsm.initial for (i, fsm) in indexed_fsms}
951
+
952
+ # Dedicated function accepting a "superset" and returning the next
953
+ # "superset" obtained by following this transition in the new FSM
954
+ def follow(current_state, new_transition: int):
955
+ next = {}
956
+ for i, f in indexed_fsms:
957
+ old_transition = new_to_old[i][new_transition]
958
+ if (
959
+ i in current_state
960
+ and current_state[i] in f.map
961
+ and old_transition in f.map[current_state[i]]
962
+ ):
963
+ next[i] = f.map[current_state[i]][old_transition]
964
+ if not next:
965
+ raise OblivionError
966
+ return next
967
+
968
+ states = [initial]
969
+ finals: Set[int] = set()
970
+ map: Dict[int, Dict[int, int]] = {}
971
+
972
+ # Map component FSMs to their new state-to-state transitions, finals, and a
973
+ # map translating component FSM states to aggregate FSM states
974
+ fsms_to_trans_finals: Dict[
975
+ int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]
976
+ ] = {}
977
+
978
+ i = 0
979
+ while i < len(states):
980
+ state = states[i]
981
+
982
+ # Add to the finals of the aggregate FSM whenever we hit a final in a
983
+ # component FSM
984
+ if any(state.get(j, -1) in fsm.finals for (j, fsm) in indexed_fsms):
985
+ finals.add(i)
986
+
987
+ # Compute the map for this state
988
+ map[i] = {}
989
+ for transition in alphabet.by_transition:
990
+ try:
991
+ next = follow(state, transition)
992
+ except OblivionError:
993
+ # Reached an oblivion state; don't list it
994
+ continue
995
+ else:
996
+ try:
997
+ # TODO: Seems like this could--and should--be avoided
998
+ j = states.index(next)
999
+ except ValueError:
1000
+ j = len(states)
1001
+ states.append(next)
1002
+
1003
+ map[i][transition] = j
1004
+
1005
+ for fsm_id, fsm_state in next.items():
1006
+ (
1007
+ fsm_transitions,
1008
+ fsm_finals,
1009
+ fsm_old_to_new,
1010
+ ) = fsms_to_trans_finals.setdefault(fsm_id, (set(), set(), {}))
1011
+ old_from = state[fsm_id]
1012
+ old_to = fsm_state
1013
+ fsm_old_to_new.setdefault(old_from, set()).add(i)
1014
+ fsm_old_to_new.setdefault(old_to, set()).add(j)
1015
+ fsm_transitions.add((i, j))
1016
+ if fsm_state in fsms[fsm_id].finals:
1017
+ fsm_finals.add(j)
1018
+
1019
+ i += 1
1020
+
1021
+ fsm = FSM(
1022
+ alphabet=alphabet,
1023
+ states=range(len(states)),
1024
+ initial=0,
1025
+ finals=finals,
1026
+ map=map,
1027
+ __no_validation__=True,
1028
+ )
1029
+
1030
+ fsm, old_to_new_states = make_deterministic_fsm(fsm)
1031
+ _fsms_to_trans_finals = {
1032
+ fsm_id: (
1033
+ {(old_to_new_states[s1], old_to_new_states[s2]) for s1, s2 in transitions},
1034
+ {old_to_new_states[s] for s in finals},
1035
+ {
1036
+ old_state: {old_to_new_states[new_state] for new_state in new_states}
1037
+ for old_state, new_states in old_to_new.items()
1038
+ },
1039
+ )
1040
+ for fsm_id, (transitions, finals, old_to_new) in sorted(
1041
+ fsms_to_trans_finals.items(), key=lambda x: x[0]
1042
+ )
1043
+ }
1044
+
1045
+ return (
1046
+ fsm,
1047
+ _fsms_to_trans_finals,
1048
+ )
1049
+
1050
+
1051
+ def get_sub_fsms_from_seq(
1052
+ state_seq: Sequence[int],
1053
+ fsms_to_trans_finals: Dict[
1054
+ int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]
1055
+ ],
1056
+ ) -> Generator[Tuple[int, bool, bool], None, None]:
1057
+ """Get the indices of the sub-FSMs in `fsm` that could have matched the state sequence `state_seq`.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ state_seq
1062
+ A state sequence.
1063
+ fsms_to_trans_finals
1064
+ A map from FSM indices to tuples containing sets of their state transitions
1065
+ and sets of the final/accept states.
1066
+
1067
+ Returns
1068
+ -------
1069
+ A generator returning tuples containing each sub-FSM index (in the order
1070
+ they were union-ed to construct `fsm`) and booleans indicating whether or
1071
+ not there is another valid transition from the last state in the sequence
1072
+ for the associated sub-FSM (i.e. if the FSM can continue
1073
+ accepting/matching) and whether or not the sequence ends in a final state
1074
+ of the sub-FSM.
1075
+ """
1076
+ state_seq_transitions = set(zip(state_seq[:-1], state_seq[1:]))
1077
+ last_fsm_state = state_seq[-1]
1078
+ yield from (
1079
+ (
1080
+ # The sub-FMS index
1081
+ fsm_idx,
1082
+ # Is there another possible transition in this sub-FSM?
1083
+ any(last_fsm_state == from_s for (from_s, to_s) in transitions),
1084
+ # Is this sub-FSM in a final state?
1085
+ state_seq[-1] in finals,
1086
+ )
1087
+ for fsm_idx, (transitions, finals, _) in fsms_to_trans_finals.items()
1088
+ if state_seq_transitions.issubset(transitions)
1089
+ )
1090
+
1091
+
1092
+ def walk_fsm(
1093
+ fsm: BetterFSM,
1094
+ token_transition_keys: Sequence[int],
1095
+ start_state: int,
1096
+ full_match: bool = True,
1097
+ ) -> List[int]:
1098
+ fsm_finals = fsm.finals
1099
+
1100
+ state = start_state
1101
+ accepted_states: List[int] = []
1102
+ last_final_idx: int = 0
1103
+
1104
+ fsm_transitions = fsm.flat_transition_map
1105
+
1106
+ # Iterate over token transition key sequence. The transition key
1107
+ # sequence represents the FSM traversal rules of the tokens symbols.
1108
+ for i, trans_key in enumerate(token_transition_keys):
1109
+ new_state = fsm_transitions.get((state, trans_key))
1110
+
1111
+ if new_state is None:
1112
+ if not full_match and last_final_idx > 0:
1113
+ return accepted_states[:last_final_idx]
1114
+
1115
+ return []
1116
+
1117
+ state = new_state
1118
+
1119
+ if state in fsm_finals:
1120
+ last_final_idx = i + 1
1121
+
1122
+ accepted_states.append(state)
1123
+
1124
+ if full_match and last_final_idx - 1 != i:
1125
+ return []
1126
+
1127
+ return accepted_states
vllm/lib/python3.10/site-packages/outlines/fsm/types.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ from enum import EnumMeta
3
+ from typing import Any, Protocol, Tuple, Type
4
+
5
+ from typing_extensions import _AnnotatedAlias, get_args
6
+
7
+ INTEGER = r"[+-]?(0|[1-9][0-9]*)"
8
+ BOOLEAN = "(True|False)"
9
+ FLOAT = rf"{INTEGER}(\.[0-9]+)?([eE][+-][0-9]+)?"
10
+ DATE = r"(\d{4})-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])"
11
+ TIME = r"([0-1][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])"
12
+ DATETIME = rf"({DATE})(\s)({TIME})"
13
+
14
+
15
+ class FormatFunction(Protocol):
16
+ def __call__(self, sequence: str) -> Any:
17
+ ...
18
+
19
+
20
+ def python_types_to_regex(python_type: Type) -> Tuple[str, FormatFunction]:
21
+ # If it is a custom type
22
+ if isinstance(python_type, _AnnotatedAlias):
23
+ json_schema = get_args(python_type)[1].json_schema
24
+ type_class = get_args(python_type)[0]
25
+
26
+ custom_regex_str = json_schema["pattern"]
27
+
28
+ def custom_format_fn(sequence: str) -> Any:
29
+ return type_class(sequence)
30
+
31
+ return custom_regex_str, custom_format_fn
32
+
33
+ if isinstance(python_type, EnumMeta):
34
+ values = python_type.__members__.keys()
35
+ enum_regex_str: str = "(" + "|".join(values) + ")"
36
+
37
+ def enum_format_fn(sequence: str) -> str:
38
+ return str(sequence)
39
+
40
+ return enum_regex_str, enum_format_fn
41
+
42
+ if python_type == float:
43
+
44
+ def float_format_fn(sequence: str) -> float:
45
+ return float(sequence)
46
+
47
+ return FLOAT, float_format_fn
48
+ elif python_type == int:
49
+
50
+ def int_format_fn(sequence: str) -> int:
51
+ return int(sequence)
52
+
53
+ return INTEGER, int_format_fn
54
+ elif python_type == bool:
55
+
56
+ def bool_format_fn(sequence: str) -> bool:
57
+ return bool(sequence)
58
+
59
+ return BOOLEAN, bool_format_fn
60
+ elif python_type == datetime.date:
61
+
62
+ def date_format_fn(sequence: str) -> datetime.date:
63
+ return datetime.datetime.strptime(sequence, "%Y-%m-%d").date()
64
+
65
+ return DATE, date_format_fn
66
+ elif python_type == datetime.time:
67
+
68
+ def time_format_fn(sequence: str) -> datetime.time:
69
+ return datetime.datetime.strptime(sequence, "%H:%M:%S").time()
70
+
71
+ return TIME, time_format_fn
72
+ elif python_type == datetime.datetime:
73
+
74
+ def datetime_format_fn(sequence: str) -> datetime.datetime:
75
+ return datetime.datetime.strptime(sequence, "%Y-%m-%d %H:%M:%S")
76
+
77
+ return DATETIME, datetime_format_fn
78
+ else:
79
+ raise NotImplementedError(
80
+ f"The Python type {python_type} is not supported. Please open an issue."
81
+ )
vllm/lib/python3.10/site-packages/outlines/function.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.util
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Callable, Optional, Tuple, Union
4
+
5
+ import requests
6
+
7
+ from outlines import generate, models
8
+
9
+ if TYPE_CHECKING:
10
+ from outlines.generate.api import SequenceGenerator
11
+ from outlines.prompts import Prompt
12
+
13
+
14
+ @dataclass
15
+ class Function:
16
+ """Represents an Outlines function.
17
+
18
+ Functions are a convenient way to encapsulate a prompt template, a language
19
+ model and a Pydantic model that define the output structure. Once defined,
20
+ the function can be called with arguments that will be used to render the
21
+ prompt template.
22
+
23
+ """
24
+
25
+ prompt_template: "Prompt"
26
+ schema: Union[str, Callable, object]
27
+ model_name: str
28
+ generator: Optional["SequenceGenerator"] = None
29
+
30
+ @classmethod
31
+ def from_github(cls, program_path: str, function_name: str = "fn"):
32
+ """Load a function stored on GitHub"""
33
+ program_content = download_from_github(program_path)
34
+ function = extract_function_from_file(program_content, function_name)
35
+
36
+ return function
37
+
38
+ def init_generator(self):
39
+ """Load the model and initialize the generator."""
40
+ model = models.transformers(self.model_name)
41
+ self.generator = generate.json(model, self.schema)
42
+
43
+ def __call__(self, *args, **kwargs):
44
+ """Call the function.
45
+
46
+ .. warning::
47
+
48
+ This currently does not support batching.
49
+
50
+ Parameters
51
+ ----------
52
+ args
53
+ Values to pass to the prompt template as positional arguments.
54
+ kwargs
55
+ Values to pass to the prompt template as keyword arguments.
56
+
57
+ """
58
+ if self.generator is None:
59
+ self.init_generator()
60
+
61
+ prompt = self.prompt_template(*args, **kwargs)
62
+ return self.generator(prompt)
63
+
64
+
65
+ def download_from_github(short_path: str):
66
+ """Download the file in which the function is stored on GitHub."""
67
+ GITHUB_BASE_URL = "https://raw.githubusercontent.com"
68
+ BRANCH = "main"
69
+
70
+ path = short_path.split("/")
71
+ if len(path) < 3:
72
+ raise ValueError(
73
+ "Please provide a valid path in the form {USERNAME}/{REPO_NAME}/{PATH_TO_FILE}."
74
+ )
75
+ elif short_path[-3:] == ".py":
76
+ raise ValueError("Do not append the `.py` extension to the program name.")
77
+
78
+ username = path[0]
79
+ repo = path[1]
80
+ path_to_file = path[2:]
81
+
82
+ url = "/".join([GITHUB_BASE_URL, username, repo, BRANCH] + path_to_file) + ".py"
83
+ result = requests.get(url)
84
+
85
+ if result.status_code == 200:
86
+ return result.text
87
+ elif result.status_code == 404:
88
+ raise ValueError(
89
+ f"Program could not be found at {url}. Please make sure you entered the GitHub username, repository name and path to the program correctly."
90
+ )
91
+ else:
92
+ result.raise_for_status()
93
+
94
+
95
+ def extract_function_from_file(content: str, function_name: str) -> Tuple[Callable]:
96
+ """Extract a function object from a downloaded file."""
97
+
98
+ spec = importlib.util.spec_from_loader(
99
+ "outlines_function", loader=None, origin="github"
100
+ )
101
+ if spec is not None:
102
+ module = importlib.util.module_from_spec(spec)
103
+ exec(content, module.__dict__)
104
+
105
+ try:
106
+ fn = getattr(module, function_name)
107
+ except AttributeError:
108
+ raise AttributeError(
109
+ "Could not find an `outlines.Function` instance in the remote file. Make sure that the path you specified is correct."
110
+ )
111
+
112
+ if not isinstance(fn, module.outlines.Function):
113
+ raise TypeError(
114
+ f"The `{function_name}` variable in the program must be an instance of `outlines.Function`"
115
+ )
116
+
117
+ return fn
vllm/lib/python3.10/site-packages/outlines/generate/choice.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json as pyjson
2
+ import re
3
+ from enum import Enum
4
+ from functools import singledispatch
5
+ from typing import Callable, List, Union
6
+
7
+ from outlines_core.fsm.json_schema import build_regex_from_schema
8
+
9
+ from outlines.fsm.json_schema import get_schema_from_enum
10
+ from outlines.generate.api import SequenceGeneratorAdapter
11
+ from outlines.models import OpenAI
12
+ from outlines.samplers import Sampler, multinomial
13
+
14
+ from .json import json
15
+ from .regex import regex
16
+
17
+
18
+ @singledispatch
19
+ def choice(
20
+ model, choices: Union[List[str], type[Enum]], sampler: Sampler = multinomial()
21
+ ) -> SequenceGeneratorAdapter:
22
+ if isinstance(choices, type(Enum)):
23
+ regex_str = build_regex_from_schema(pyjson.dumps(get_schema_from_enum(choices)))
24
+ else:
25
+ choices = [re.escape(choice) for choice in choices] # type: ignore
26
+ regex_str = r"(" + r"|".join(choices) + r")"
27
+
28
+ generator = regex(model, regex_str, sampler)
29
+ if isinstance(choices, type(Enum)):
30
+ generator.format_sequence = lambda x: pyjson.loads(x)
31
+ else:
32
+ generator.format_sequence = lambda x: x
33
+
34
+ return generator
35
+
36
+
37
+ @choice.register(OpenAI)
38
+ def choice_openai(
39
+ model: OpenAI, choices: List[str], sampler: Sampler = multinomial()
40
+ ) -> Callable:
41
+ """
42
+ Call OpenAI API with response_format of a dict:
43
+ {"result": <one of choices>}
44
+ """
45
+
46
+ choices_schema = pyjson.dumps(
47
+ {
48
+ "type": "object",
49
+ "properties": {"result": {"type": "string", "enum": choices}},
50
+ "additionalProperties": False,
51
+ "required": ["result"],
52
+ }
53
+ )
54
+ generator = json(model, choices_schema, sampler)
55
+
56
+ def generate_choice(*args, **kwargs):
57
+ return generator(*args, **kwargs)["result"]
58
+
59
+ return generate_choice
vllm/lib/python3.10/site-packages/outlines/generate/text.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import singledispatch
2
+
3
+ from outlines.generate.api import (
4
+ SequenceGeneratorAdapter,
5
+ VisionSequenceGeneratorAdapter,
6
+ )
7
+ from outlines.models import OpenAI, TransformersVision
8
+ from outlines.samplers import Sampler, multinomial
9
+
10
+
11
+ @singledispatch
12
+ def text(model, sampler: Sampler = multinomial()) -> SequenceGeneratorAdapter:
13
+ """Generate text with a `Transformer` model.
14
+
15
+ Note
16
+ ----
17
+ Python 3.11 allows dispatching on Union types and
18
+ this should greatly simplify the code.
19
+
20
+ Arguments
21
+ ---------
22
+ model:
23
+ An instance of `Transformer` that represents a model from the
24
+ `transformers` library.
25
+ sampler:
26
+ The sampling algorithm to use to generate token ids from the logits
27
+ distribution.
28
+
29
+ Returns
30
+ -------
31
+ A `SequenceGeneratorAdapter` instance that generates text.
32
+
33
+ """
34
+ return SequenceGeneratorAdapter(model, None, sampler)
35
+
36
+
37
+ @text.register(TransformersVision)
38
+ def text_vision(model, sampler: Sampler = multinomial()):
39
+ return VisionSequenceGeneratorAdapter(model, None, sampler)
40
+
41
+
42
+ @text.register(OpenAI)
43
+ def text_openai(model: OpenAI, sampler: Sampler = multinomial()) -> OpenAI:
44
+ if not isinstance(sampler, multinomial):
45
+ raise NotImplementedError(
46
+ r"The OpenAI API does not support any other sampling algorithm "
47
+ + "than the multinomial sampler."
48
+ )
49
+
50
+ return model
vllm/lib/python3.10/site-packages/outlines/grammars.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ GRAMMAR_PATH = Path(__file__).parent / "grammars"
4
+
5
+
6
+ def read_grammar(grammar_file_name, base_grammar_path=GRAMMAR_PATH):
7
+ """Read grammar file from default grammar path"""
8
+ full_path = base_grammar_path / grammar_file_name
9
+ with open(full_path) as file:
10
+ return file.read()
11
+
12
+
13
+ arithmetic = read_grammar("arithmetic.lark")
14
+ json = read_grammar("json.lark")
vllm/lib/python3.10/site-packages/outlines/grammars/arithmetic.lark ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ?start: sum
2
+
3
+ ?sum: product
4
+ | sum "+" product -> add
5
+ | sum "-" product -> sub
6
+
7
+ ?product: atom
8
+ | product "*" atom -> mul
9
+ | product "/" atom -> div
10
+
11
+ ?atom: NUMBER -> number
12
+ | "-" atom -> neg
13
+ | "(" sum ")"
14
+
15
+ %import common.NUMBER
16
+ %import common.WS_INLINE
17
+
18
+ %ignore WS_INLINE
vllm/lib/python3.10/site-packages/outlines/grammars/common.lark ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Adapted from https://github.com/lark-parser/lark/blob/master/lark/grammars/common.lark
2
+
3
+ // Lark License:
4
+ // Copyright © 2017 Erez Shinan
5
+ //
6
+ // Permission is hereby granted, free of charge, to any person obtaining a copy of
7
+ // this software and associated documentation files (the "Software"), to deal in
8
+ // the Software without restriction, including without limitation the rights to
9
+ // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
10
+ // the Software, and to permit persons to whom the Software is furnished to do so,
11
+ // subject to the following conditions:
12
+ //
13
+ // The above copyright notice and this permission notice shall be included in all
14
+ // copies or substantial portions of the Software.
15
+ //
16
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
18
+ // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
19
+ // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20
+ // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
+ // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+
23
+
24
+ // Basic terminals for common use
25
+
26
+
27
+ //
28
+ // Numbers
29
+ //
30
+
31
+ DIGIT: "0".."9"
32
+ HEXDIGIT: "a".."f"|"A".."F"|DIGIT
33
+
34
+ INT: DIGIT+
35
+ SIGNED_INT: ["+"|"-"] INT
36
+ DECIMAL: INT "." INT? | "." INT
37
+
38
+ // float = /-?\d+(\.\d+)?([eE][+-]?\d+)?/
39
+ _EXP: ("e"|"E") SIGNED_INT
40
+ FLOAT: INT _EXP | DECIMAL _EXP?
41
+ SIGNED_FLOAT: ["+"|"-"] FLOAT
42
+
43
+ NUMBER: FLOAT | INT
44
+ SIGNED_NUMBER: ["+"|"-"] NUMBER
45
+
46
+ UNESCAPED_STRING: /\"[^"]*\"/
47
+
48
+ // based on `outlines/fsm/json_schema.py`
49
+ _NON_CONTROL_CHAR: /([^"\\\x00-\x1F\x7F-\x9F])/
50
+ _ESCAPED_CHAR: /\\/ (_NON_CONTROL_CHAR | /\\/ | /"/)
51
+ ESCAPED_STRING_INNER: _NON_CONTROL_CHAR | _ESCAPED_CHAR
52
+ ESCAPED_STRING: /"/ ESCAPED_STRING_INNER* /"/
53
+
54
+
55
+
56
+ //
57
+ // Names (Variables)
58
+ //
59
+ LCASE_LETTER: "a".."z"
60
+ UCASE_LETTER: "A".."Z"
61
+
62
+ LETTER: UCASE_LETTER | LCASE_LETTER
63
+ WORD: LETTER+
64
+
65
+ CNAME: ("_"|LETTER) ("_"|LETTER|DIGIT)*
66
+
67
+
68
+ //
69
+ // Whitespace
70
+ //
71
+ WS_INLINE: (" "|/\t/)+
72
+ WS: /[ \t\f\r\n]/+
73
+
74
+ CR : /\r/
75
+ LF : /\n/
76
+ NEWLINE: (CR? LF)+
77
+
78
+
79
+ // Comments
80
+ SH_COMMENT: /#[^\n]*/
81
+ CPP_COMMENT: /\/\/[^\n]*/
82
+ C_COMMENT: "/*" /(.|\n)*?/ "*/"
83
+ SQL_COMMENT: /--[^\n]*/
vllm/lib/python3.10/site-packages/outlines/models/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module that contains all the models integrated in outlines.
2
+
3
+ We group the models in submodules by provider instead of theme (completion, chat
4
+ completion, diffusers, etc.) and use routing functions everywhere else in the
5
+ codebase.
6
+
7
+ """
8
+
9
+ from typing import Union
10
+
11
+ from .exllamav2 import ExLlamaV2Model, exl2
12
+ from .llamacpp import LlamaCpp, llamacpp
13
+ from .mlxlm import MLXLM, mlxlm
14
+ from .openai import OpenAI, azure_openai, openai
15
+ from .transformers import Transformers, TransformerTokenizer, mamba, transformers
16
+ from .transformers_vision import TransformersVision, transformers_vision
17
+ from .vllm import VLLM, vllm
18
+
19
+ LogitsGenerator = Union[Transformers, LlamaCpp, OpenAI, ExLlamaV2Model, MLXLM, VLLM]
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (937 Bytes). View file
 
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/exllamav2.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/llamacpp.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/mlxlm.cpython-310.pyc ADDED
Binary file (7.13 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/openai.cpython-310.pyc ADDED
Binary file (9.2 kB). View file
 
vllm/lib/python3.10/site-packages/outlines/models/__pycache__/tokenizer.cpython-310.pyc ADDED
Binary file (1.63 kB). View file