diff --git a/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..97a60b0a4e4a0e2269033e0386ec33d4dadcfbb8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Stanis Trendelenburg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..01b8fc7d4a10cb8b4f1d21f11d3398d07d6b3478 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..81ab0d77aa235d52280323722c495c6ffe22c4b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/braceexpand-0.1.7.dist-info/top_level.txt @@ -0,0 +1 @@ +braceexpand diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdbf6157988fe093e18a6d1de60aa643b4d58fc7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/constants.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8325db600ed174f9b1b1bb0fde2b443484f0836c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/constants.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb2e5b69c762579a17fef8a06a127f8637567822 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_reader.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4edd5c1c84e738d140c6fbab29ffac021d304bb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_reader.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_writer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198f96e51fcf8f6591b48db94c1d4e869e866bcb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/gguf_writer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/lazy.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/lazy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5942975520699d2ef1346e0f0650eea8aea2e45 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/lazy.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/metadata.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8667b8e3eac6cded547e5d3b356a03dcc9cd9421 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/metadata.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/quants.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/quants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a574ab537daa5f49843b5e25255a48eb7cabfa00 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/quants.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/tensor_mapping.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/tensor_mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9397c9a2f09bd39abbb39a828db1c212c0238f1d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/tensor_mapping.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/utility.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/utility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66a28650be80ffbbdb21c3352be567d96cbc1343 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/utility.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/__pycache__/vocab.cpython-310.pyc b/vllm/lib/python3.10/site-packages/gguf/__pycache__/vocab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..472a3f83332286f98e8585b881e65d5791aabac5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/gguf/__pycache__/vocab.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/gguf/tensor_mapping.py b/vllm/lib/python3.10/site-packages/gguf/tensor_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f185c0658a34b14abd6893c16e5bf8216258af --- /dev/null +++ b/vllm/lib/python3.10/site-packages/gguf/tensor_mapping.py @@ -0,0 +1,657 @@ +from __future__ import annotations + +from typing import Sequence + +from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES + + +class TensorNameMap: + mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Token embeddings + MODEL_TENSOR.TOKEN_EMBD: ( + "gpt_neox.embed_in", # gptneox + "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone + "transformer.word_embeddings", # falcon + "word_embeddings", # bloom + "model.embed_tokens", # llama-hf nemotron + "tok_embeddings", # llama-pth + "embeddings.word_embeddings", # bert nomic-bert + "language_model.embedding.word_embeddings", # persimmon + "wte", # gpt2 + "transformer.embd.wte", # phi2 + "model.tok_embeddings", # internlm2 + "model.embedding", # mamba-qbert + "backbone.embedding", # mamba + "backbone.embeddings", # mamba-hf + "transformer.in_out_embed", # Grok + "embedding.word_embeddings", # chatglm + "transformer.token_embeddings", # openelm + "shared", # t5 + ), + + # Token type embeddings + MODEL_TENSOR.TOKEN_TYPES: ( + "embeddings.token_type_embeddings", # bert nomic-bert + ), + + # Normalization of token embeddings + MODEL_TENSOR.TOKEN_EMBD_NORM: ( + "word_embeddings_layernorm", # bloom + "embeddings.LayerNorm", # bert + "emb_ln", # nomic-bert + "transformer.norm", # openelm + ), + + # Position embeddings + MODEL_TENSOR.POS_EMBD: ( + "transformer.wpe", # gpt2 + "embeddings.position_embeddings", # bert + "wpe", # gpt2 + ), + + # Output + MODEL_TENSOR.OUTPUT: ( + "embed_out", # gptneox + "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone + "output", # llama-pth bloom internlm2 + "word_embeddings_for_head", # persimmon + "lm_head.linear", # phi2 + "output_layer", # chatglm + ), + + # Output norm + MODEL_TENSOR.OUTPUT_NORM: ( + "gpt_neox.final_layer_norm", # gptneox + "transformer.ln_f", # gpt2 gpt-j falcon jais exaone + "model.norm", # llama-hf baichuan internlm2 + "norm", # llama-pth + "transformer.norm_f", # mpt dbrx + "ln_f", # refact bloom qwen gpt2 + "language_model.encoder.final_layernorm", # persimmon + "model.final_layernorm", # persimmon + "lm_head.ln", # phi2 + "model.norm_f", # mamba-qbert + "backbone.norm_f", # mamba + "transformer.rms_norm", # Grok + "encoder.final_layernorm", # chatglm + "transformer.norm", # openelm + "model.norm", # nemotron + ), + + # Rope frequencies + MODEL_TENSOR.ROPE_FREQS: ( + "rope.freqs", # llama-pth + "rotary_pos_emb.inv_freq", # chatglm + ), + } + + block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Attention norm + MODEL_TENSOR.ATTN_NORM: ( + "gpt_neox.layers.{bid}.input_layernorm", # gptneox + "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone + "transformer.blocks.{bid}.norm_1", # mpt + "transformer.h.{bid}.input_layernorm", # falcon7b + "h.{bid}.input_layernorm", # bloom + "transformer.h.{bid}.ln_mlp", # falcon40b + "model.layers.{bid}.input_layernorm", # llama-hf nemotron + "layers.{bid}.attention_norm", # llama-pth + "language_model.encoder.layers.{bid}.input_layernorm", # persimmon + "model.layers.{bid}.ln1", # yi + "h.{bid}.ln_1", # gpt2 + "transformer.h.{bid}.ln", # phi2 + "model.layers.layers.{bid}.norm", # plamo + "model.layers.{bid}.attention_norm", # internlm2 + "model.layers.{bid}.norm", # mamba-qbert + "backbone.layers.{bid}.norm", # mamba + "transformer.decoder_layer.{bid}.rms_norm", # Grok + "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx + "encoder.layers.{bid}.input_layernorm", # chatglm + "transformer.layers.{bid}.attn_norm", # openelm + ), + + # Attention norm 2 + MODEL_TENSOR.ATTN_NORM_2: ( + "transformer.h.{bid}.ln_attn", # falcon40b + "encoder.layer.{bid}.layer_norm_1", # jina-v2-code + ), + + # Attention query-key-value + MODEL_TENSOR.ATTN_QKV: ( + "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox + "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais + "transformer.blocks.{bid}.attn.Wqkv", # mpt + "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx + "transformer.h.{bid}.self_attention.query_key_value", # falcon + "h.{bid}.self_attention.query_key_value", # bloom + "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon + "model.layers.{bid}.self_attn.query_key_value", # persimmon + "h.{bid}.attn.c_attn", # gpt2 + "transformer.h.{bid}.mixer.Wqkv", # phi2 + "encoder.layers.{bid}.attn.Wqkv", # nomic-bert + "model.layers.{bid}.self_attn.qkv_proj", # phi3 + "encoder.layers.{bid}.self_attention.query_key_value", # chatglm + "transformer.layers.{bid}.attn.qkv_proj", # openelm + ), + + # Attention query + MODEL_TENSOR.ATTN_Q: ( + "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron + "layers.{bid}.attention.wq", # llama-pth + "encoder.layer.{bid}.attention.self.query", # bert + "transformer.h.{bid}.attn.q_proj", # gpt-j + "model.layers.layers.{bid}.self_attn.q_proj", # plamo + "model.layers.{bid}.attention.wq", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok + "transformer.h.{bid}.attn.attention.q_proj", # exaone + ), + + # Attention key + MODEL_TENSOR.ATTN_K: ( + "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron + "layers.{bid}.attention.wk", # llama-pth + "encoder.layer.{bid}.attention.self.key", # bert + "transformer.h.{bid}.attn.k_proj", # gpt-j + "transformer.h.{bid}.attn.k", # refact + "model.layers.layers.{bid}.self_attn.k_proj", # plamo + "model.layers.{bid}.attention.wk", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok + "transformer.h.{bid}.attn.attention.k_proj", # exaone + ), + + # Attention value + MODEL_TENSOR.ATTN_V: ( + "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron + "layers.{bid}.attention.wv", # llama-pth + "encoder.layer.{bid}.attention.self.value", # bert + "transformer.h.{bid}.attn.v_proj", # gpt-j + "transformer.h.{bid}.attn.v", # refact + "model.layers.layers.{bid}.self_attn.v_proj", # plamo + "model.layers.{bid}.attention.wv", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok + "transformer.h.{bid}.attn.attention.v_proj", # exaone + ), + + # Attention output + MODEL_TENSOR.ATTN_OUT: ( + "gpt_neox.layers.{bid}.attention.dense", # gptneox + "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais + "transformer.blocks.{bid}.attn.out_proj", # mpt + "transformer.h.{bid}.self_attention.dense", # falcon + "h.{bid}.self_attention.dense", # bloom + "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron + "layers.{bid}.attention.wo", # llama-pth + "encoder.layer.{bid}.attention.output.dense", # bert + "transformer.h.{bid}.attn.out_proj", # gpt-j + "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon + "model.layers.{bid}.self_attn.dense", # persimmon + "h.{bid}.attn.c_proj", # gpt2 + "transformer.h.{bid}.mixer.out_proj", # phi2 + "model.layers.layers.{bid}.self_attn.o_proj", # plamo + "model.layers.{bid}.attention.wo", # internlm2 + "encoder.layers.{bid}.attn.out_proj", # nomic-bert + "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok + "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx + "encoder.layers.{bid}.self_attention.dense", # chatglm + "transformer.layers.{bid}.attn.out_proj", # openelm + "transformer.h.{bid}.attn.attention.out_proj", # exaone + ), + + # Attention output norm + MODEL_TENSOR.ATTN_OUT_NORM: ( + "encoder.layer.{bid}.attention.output.LayerNorm", # bert + "encoder.layers.{bid}.norm1", # nomic-bert + "transformer.decoder_layer.{bid}.rms_norm_1", # Grok + "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx + ), + + MODEL_TENSOR.ATTN_POST_NORM: ( + "model.layers.{bid}.post_attention_layernorm", # gemma2 + ), + + # Rotary embeddings + MODEL_TENSOR.ATTN_ROT_EMBD: ( + "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf + "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo + "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell + ), + + # Feed-forward norm + MODEL_TENSOR.FFN_NORM: ( + "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox + "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone + "h.{bid}.post_attention_layernorm", # bloom + "transformer.blocks.{bid}.norm_2", # mpt + "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron + "layers.{bid}.ffn_norm", # llama-pth + "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon + "model.layers.{bid}.ln2", # yi + "h.{bid}.ln_2", # gpt2 + "model.layers.{bid}.ffn_norm", # internlm2 + "transformer.decoder_layer.{bid}.rms_norm_2", # Grok + "encoder.layers.{bid}.post_attention_layernorm", # chatglm + "transformer.layers.{bid}.ffn_norm", # openelm + ), + + # Post feed-forward norm + MODEL_TENSOR.FFN_PRE_NORM: ( + "model.layers.{bid}.pre_feedforward_layernorm", # gemma2 + ), + + # Post feed-forward norm + MODEL_TENSOR.FFN_POST_NORM: ( + "model.layers.{bid}.post_feedforward_layernorm", # gemma2 + ), + + MODEL_TENSOR.FFN_GATE_INP: ( + "layers.{bid}.feed_forward.gate", # mixtral + "model.layers.{bid}.block_sparse_moe.gate", # mixtral + "model.layers.{bid}.mlp.gate", # qwen2moe + "transformer.decoder_layer.{bid}.router", # Grok + "transformer.blocks.{bid}.ffn.router.layer", # dbrx + ), + + MODEL_TENSOR.FFN_GATE_INP_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe + ), + + # Feed-forward up + MODEL_TENSOR.FFN_UP: ( + "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox + "transformer.h.{bid}.mlp.c_fc", # gpt2 jais + "transformer.blocks.{bid}.ffn.up_proj", # mpt + "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon + "h.{bid}.mlp.dense_h_to_4h", # bloom + "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron + "layers.{bid}.feed_forward.w3", # llama-pth + "encoder.layer.{bid}.intermediate.dense", # bert + "transformer.h.{bid}.mlp.fc_in", # gpt-j + "transformer.h.{bid}.mlp.linear_3", # refact + "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon + "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon + "transformer.h.{bid}.mlp.w1", # qwen + "h.{bid}.mlp.c_fc", # gpt2 + "transformer.h.{bid}.mlp.fc1", # phi2 + "model.layers.{bid}.mlp.fc1", # phi2 + "model.layers.{bid}.mlp.gate_up_proj", # phi3 + "model.layers.layers.{bid}.mlp.up_proj", # plamo + "model.layers.{bid}.feed_forward.w3", # internlm2 + "encoder.layers.{bid}.mlp.fc11", # nomic-bert + "model.layers.{bid}.mlp.c_fc", # starcoder2 + "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 + "model.layers.{bid}.residual_mlp.w3", # arctic + "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm + "transformer.h.{bid}.mlp.c_fc_1", # exaone + ), + + MODEL_TENSOR.FFN_UP_EXP: ( + "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx + "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_UP_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2 + ), + + # AWQ-activation gate + MODEL_TENSOR.FFN_ACT: ( + "transformer.blocks.{bid}.ffn.act", # mpt + ), + + # Feed-forward gate + MODEL_TENSOR.FFN_GATE: ( + "model.layers.{bid}.mlp.gate_proj", # llama-hf refact + "layers.{bid}.feed_forward.w1", # llama-pth + "transformer.h.{bid}.mlp.w2", # qwen + "transformer.h.{bid}.mlp.c_fc2", # jais + "model.layers.layers.{bid}.mlp.gate_proj", # plamo + "model.layers.{bid}.feed_forward.w1", # internlm2 + "encoder.layers.{bid}.mlp.fc12", # nomic-bert + "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 + "transformer.h.{bid}.mlp.linear_1", # refact + "model.layers.{bid}.residual_mlp.w1", # arctic + "transformer.h.{bid}.mlp.c_fc_0", # exaone + ), + + MODEL_TENSOR.FFN_GATE_EXP: ( + "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx + "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_GATE_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2 + ), + + # Feed-forward down + MODEL_TENSOR.FFN_DOWN: ( + "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox + "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais + "transformer.blocks.{bid}.ffn.down_proj", # mpt + "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon + "h.{bid}.mlp.dense_4h_to_h", # bloom + "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron + "layers.{bid}.feed_forward.w2", # llama-pth + "encoder.layer.{bid}.output.dense", # bert + "transformer.h.{bid}.mlp.fc_out", # gpt-j + "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "h.{bid}.mlp.c_proj", # gpt2 + "transformer.h.{bid}.mlp.fc2", # phi2 + "model.layers.{bid}.mlp.fc2", # phi2 + "model.layers.layers.{bid}.mlp.down_proj", # plamo + "model.layers.{bid}.feed_forward.w2", # internlm2 + "encoder.layers.{bid}.mlp.fc2", # nomic-bert + "model.layers.{bid}.mlp.c_proj", # starcoder2 + "encoder.layer.{bid}.mlp.wo", # jina-bert-v2 + "transformer.layers.{bid}.ffn.proj_2", # openelm + "model.layers.{bid}.residual_mlp.w2", # arctic + "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2 + "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm + "model.layers.h.{bid}.mlp.c_proj", # exaone + ), + + MODEL_TENSOR.FFN_DOWN_EXP: ( + "layers.{bid}.feed_forward.experts.w2", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx + "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_DOWN_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.q_layernorm", + "model.layers.{bid}.self_attn.q_layernorm", # persimmon + "model.layers.{bid}.self_attn.q_norm", # cohere + "transformer.blocks.{bid}.attn.q_ln", # sea-lion + "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2 + "transformer.layers.{bid}.attn.q_norm", # openelm + ), + + MODEL_TENSOR.ATTN_K_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.k_layernorm", + "model.layers.{bid}.self_attn.k_layernorm", # persimmon + "model.layers.{bid}.self_attn.k_norm", # cohere + "transformer.blocks.{bid}.attn.k_ln", # sea-lion + "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2 + "transformer.layers.{bid}.attn.k_norm", # openelm + ), + + MODEL_TENSOR.ROPE_FREQS: ( + "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon + ), + + MODEL_TENSOR.LAYER_OUT_NORM: ( + "encoder.layer.{bid}.output.LayerNorm", # bert + "encoder.layers.{bid}.norm2", # nomic-bert + "transformer.decoder_layer.{bid}.rms_norm_3", # Grok + "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2 + "encoder.layer.{bid}.layer_norm_2" # jina-v2-code + ), + + MODEL_TENSOR.SSM_IN: ( + "model.layers.{bid}.in_proj", + "backbone.layers.{bid}.mixer.in_proj", + ), + + MODEL_TENSOR.SSM_CONV1D: ( + "model.layers.{bid}.conv1d", + "backbone.layers.{bid}.mixer.conv1d", + ), + + MODEL_TENSOR.SSM_X: ( + "model.layers.{bid}.x_proj", + "backbone.layers.{bid}.mixer.x_proj", + ), + + MODEL_TENSOR.SSM_DT: ( + "model.layers.{bid}.dt_proj", + "backbone.layers.{bid}.mixer.dt_proj", + ), + + MODEL_TENSOR.SSM_A: ( + "model.layers.{bid}.A_log", + "backbone.layers.{bid}.mixer.A_log", + ), + + MODEL_TENSOR.SSM_D: ( + "model.layers.{bid}.D", + "backbone.layers.{bid}.mixer.D", + ), + + MODEL_TENSOR.SSM_OUT: ( + "model.layers.{bid}.out_proj", + "backbone.layers.{bid}.mixer.out_proj", + ), + + MODEL_TENSOR.ATTN_Q_A: ( + "model.layers.{bid}.self_attn.q_a_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_B: ( + "model.layers.{bid}.self_attn.q_b_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_A_MQA: ( + "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_B: ( + "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_A_NORM: ( + "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_A_NORM: ( + "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2 + ), + + MODEL_TENSOR.ATTN_SUB_NORM: ( + "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet + ), + + MODEL_TENSOR.FFN_SUB_NORM: ( + "model.layers.{bid}.mlp.ffn_layernorm", # bitnet + ), + + MODEL_TENSOR.DEC_ATTN_NORM: ( + "decoder.block.{bid}.layer.0.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_Q: ( + "decoder.block.{bid}.layer.0.SelfAttention.q", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_K: ( + "decoder.block.{bid}.layer.0.SelfAttention.k", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_V: ( + "decoder.block.{bid}.layer.0.SelfAttention.v", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_OUT: ( + "decoder.block.{bid}.layer.0.SelfAttention.o", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_REL_B: ( + "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_NORM: ( + "decoder.block.{bid}.layer.1.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_Q: ( + "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_K: ( + "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_V: ( + "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_OUT: ( + "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: ( + "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.DEC_FFN_NORM: ( + "decoder.block.{bid}.layer.2.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_FFN_GATE: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5 + ), + + MODEL_TENSOR.DEC_FFN_UP: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5 + "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5 + ), + + MODEL_TENSOR.DEC_FFN_DOWN: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5 + ), + + MODEL_TENSOR.DEC_OUTPUT_NORM: ( + "decoder.final_layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_NORM: ( + "encoder.block.{bid}.layer.0.layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_Q: ( + "encoder.block.{bid}.layer.0.SelfAttention.q", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_K: ( + "encoder.block.{bid}.layer.0.SelfAttention.k", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_V: ( + "encoder.block.{bid}.layer.0.SelfAttention.v", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_OUT: ( + "encoder.block.{bid}.layer.0.SelfAttention.o", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_REL_B: ( + "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.ENC_FFN_NORM: ( + "encoder.block.{bid}.layer.1.layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_FFN_GATE: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5 + ), + + MODEL_TENSOR.ENC_FFN_UP: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5 + "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5 + ), + + MODEL_TENSOR.ENC_FFN_DOWN: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5 + ), + + MODEL_TENSOR.ENC_OUTPUT_NORM: ( + "encoder.final_layer_norm", # t5 + ), + } + + # architecture-specific block mappings + arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = { + MODEL_ARCH.ARCTIC: { + MODEL_TENSOR.FFN_NORM: ( + "model.layers.{bid}.residual_layernorm", + ), + MODEL_TENSOR.FFN_NORM_EXP: ( + "model.layers.{bid}.post_attention_layernorm", + ), + }, + } + + mapping: dict[str, tuple[MODEL_TENSOR, str]] + + def __init__(self, arch: MODEL_ARCH, n_blocks: int): + self.mapping = {} + for tensor, keys in self.mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + tensor_name = TENSOR_NAMES[tensor] + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + self.mapping[key] = (tensor, tensor_name) + if arch in self.arch_block_mappings_cfg: + self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch]) + for bid in range(n_blocks): + for tensor, keys in self.block_mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + + tensor_name = TENSOR_NAMES[tensor].format(bid = bid) + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + key = key.format(bid = bid) + self.mapping[key] = (tensor, tensor_name) + + def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None: + result = self.mapping.get(key) + if result is not None: + return result + for suffix in try_suffixes: + if key.endswith(suffix): + result = self.mapping.get(key[:-len(suffix)]) + if result is not None: + return result[0], result[1] + suffix + return None + + def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[1] + + def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[0] + + def __getitem__(self, key: str) -> str: + try: + return self.mapping[key][1] + except KeyError: + raise KeyError(key) + + def __contains__(self, key: str) -> bool: + return key in self.mapping + + def __repr__(self) -> str: + return repr(self.mapping) + + +def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap: + return TensorNameMap(arch, n_blocks) diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2a920c59d8abdd485a774087915986448495fd7c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/LICENSE @@ -0,0 +1,46 @@ +Copyright (c) 2015 Vitaly Puzrin, Alex Kocharin. +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +.parse() is based on Joyent's node.js `url` code: + +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/METADATA b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b4670e86b6dc207c944c55c5d3b84911fb41157a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/METADATA @@ -0,0 +1,32 @@ +Metadata-Version: 2.1 +Name: mdurl +Version: 0.1.2 +Summary: Markdown URL utilities +Keywords: markdown,commonmark +Author-email: Taneli Hukkinen +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Project-URL: Homepage, https://github.com/executablebooks/mdurl + +# mdurl + +[![Build Status](https://github.com/executablebooks/mdurl/workflows/Tests/badge.svg?branch=master)](https://github.com/executablebooks/mdurl/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) +[![codecov.io](https://codecov.io/gh/executablebooks/mdurl/branch/master/graph/badge.svg)](https://codecov.io/gh/executablebooks/mdurl) +[![PyPI version](https://img.shields.io/pypi/v/mdurl)](https://pypi.org/project/mdurl) + +This is a Python port of the JavaScript [mdurl](https://www.npmjs.com/package/mdurl) package. +See the [upstream README.md file](https://github.com/markdown-it/mdurl/blob/master/README.md) for API documentation. + diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/RECORD b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5ddbed7bd0213fb53481397443b146f128b7484e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/RECORD @@ -0,0 +1,19 @@ +mdurl-0.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mdurl-0.1.2.dist-info/LICENSE,sha256=fGBd9uKGZ6lgMRjpgnT2SknOPu0NJvzM6VNKNF4O-VU,2338 +mdurl-0.1.2.dist-info/METADATA,sha256=tTsp1I9Jk2cFP9o8gefOJ9JVg4Drv4PmYCOwLrfd0l0,1638 +mdurl-0.1.2.dist-info/RECORD,, +mdurl-0.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mdurl-0.1.2.dist-info/WHEEL,sha256=4TfKIB_xu-04bc2iKz6_zFt-gEFEEDU_31HGhqzOCE8,81 +mdurl/__init__.py,sha256=1vpE89NyXniIRZNC_4f6BPm3Ub4bPntjfyyhLRR7opU,547 +mdurl/__pycache__/__init__.cpython-310.pyc,, +mdurl/__pycache__/_decode.cpython-310.pyc,, +mdurl/__pycache__/_encode.cpython-310.pyc,, +mdurl/__pycache__/_format.cpython-310.pyc,, +mdurl/__pycache__/_parse.cpython-310.pyc,, +mdurl/__pycache__/_url.cpython-310.pyc,, +mdurl/_decode.py,sha256=3Q_gDQqU__TvDbu7x-b9LjbVl4QWy5g_qFwljcuvN_Y,3004 +mdurl/_encode.py,sha256=goJLUFt1h4rVZNqqm9t15Nw2W-bFXYQEy3aR01ImWvs,2602 +mdurl/_format.py,sha256=xZct0mdePXA0H3kAqxjGtlB5O86G35DAYMGkA44CmB4,626 +mdurl/_parse.py,sha256=ezZSkM2_4NQ2Zx047sEdcJG7NYQRFHiZK7Y8INHFzwY,11374 +mdurl/_url.py,sha256=5kQnRQN2A_G4svLnRzZcG0bfoD9AbBrYDXousDHZ3z0,284 +mdurl/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..668ba4d0151c5c76ed6e758061daa8c1b0bf5d21 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mdurl-0.1.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.7.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/outlines/__init__.py b/vllm/lib/python3.10/site-packages/outlines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..307d2ba6f484f7b4c416189374462ed762789d25 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/__init__.py @@ -0,0 +1,20 @@ +"""Outlines is a Generative Model Programming Framework.""" +import outlines.generate +import outlines.grammars +import outlines.models +import outlines.processors +import outlines.types +from outlines.base import vectorize +from outlines.caching import clear_cache, disable_cache, get_cache +from outlines.function import Function +from outlines.prompts import prompt + +__all__ = [ + "clear_cache", + "disable_cache", + "get_cache", + "Function", + "prompt", + "vectorize", + "grammars", +] diff --git a/vllm/lib/python3.10/site-packages/outlines/__pycache__/_version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bc75d16b3c68c4ebfd0a9aec18cc566b6c82aa7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/__pycache__/_version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/__pycache__/base.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e69b189cfbe1aa36199bbe22689faa8d017cd942 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/__pycache__/base.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/__pycache__/function.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/__pycache__/function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb8967305173ea1c1b2faadebdaf047d65ef7dc2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/__pycache__/function.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/caching.py b/vllm/lib/python3.10/site-packages/outlines/caching.py new file mode 100644 index 0000000000000000000000000000000000000000..6fdda6214b06bbe996f23b5e64a642f5d6aceecb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/caching.py @@ -0,0 +1,179 @@ +import asyncio +import contextlib +import functools +import os +from typing import Callable, Optional + +import cloudpickle +from diskcache import Cache, Disk +from diskcache.core import ENOVAL, UNKNOWN, args_to_key, full_name + +_caching_enabled = True + + +class CloudpickleDisk(Disk): + def __init__(self, directory, compress_level=1, **kwargs): + self.compress_level = compress_level + super().__init__(directory, **kwargs) + + def put(self, key): + data = cloudpickle.dumps(key) + return super().put(data) + + def get(self, key, raw): + data = super().get(key, raw) + return cloudpickle.loads(data) + + def store(self, value, read, key=UNKNOWN): + if not read: + value = cloudpickle.dumps(value) + return super().store(value, read, key=key) + + def fetch(self, mode, filename, value, read): + data = super().fetch(mode, filename, value, read) + if not read: + data = cloudpickle.loads(data) + return data + + +@functools.lru_cache(1) +def get_cache(): + """Get the context object that contains previously-computed return values. + + The cache is used to avoid unnecessary computations and API calls, which can + be long and expensive for large models. + + The cache directory defaults to `HOMEDIR/.cache/outlines`, but this choice + can be overridden by the user by setting the value of the `OUTLINES_CACHE_DIR` + environment variable. + + """ + from outlines._version import __version__ as outlines_version # type: ignore + + home_dir = os.path.expanduser("~") + cache_dir = os.environ.get("OUTLINES_CACHE_DIR", f"{home_dir}/.cache/outlines") + memory = Cache( + cache_dir, + eviction_policy="none", + cull_limit=0, + disk=CloudpickleDisk, + ) + + # ensure if version upgrade occurs, old cache is pruned + if outlines_version != memory.get("__version__"): + memory.clear() + memory["__version__"] = outlines_version + + return memory + + +def cache(expire: Optional[float] = None, typed=False, ignore=()): + """Caching decorator for memoizing function calls. + + The cache key is created based on the values returned by the key_function callable + if provided or based on the arguments of the decorated function directly otherwise + + This is based on `diskcache`'s `memoize`. + + Parameters + ---------- + expire + Seconds until arguments expire. + typed + Cache different types separately. + ignore + Positional or keyword arguments to ignore. + + Returns + ------- + A decorator function that can be applied to other functions. + """ + + def decorator(cached_function: Callable): + memory = get_cache() + + base = (full_name(cached_function),) + + if asyncio.iscoroutinefunction(cached_function): + + async def wrapper(*args, **kwargs): + if not _caching_enabled: + return await cached_function(*args, **kwargs) + + cache_key = wrapper.__cache_key__(*args, **kwargs) + result = wrapper.__memory__.get(cache_key, default=ENOVAL, retry=True) + + if result is ENOVAL: + result = await cached_function(*args, **kwargs) + wrapper.__memory__.set(cache_key, result, expire, retry=True) + + return result + + else: + + def wrapper(*args, **kwargs): + if not _caching_enabled: + return cached_function(*args, **kwargs) + + cache_key = wrapper.__cache_key__(*args, **kwargs) + result = wrapper.__memory__.get(cache_key, default=ENOVAL, retry=True) + + if result is ENOVAL: + result = cached_function(*args, **kwargs) + wrapper.__memory__.set(cache_key, result, expire, retry=True) + + return result + + def __cache_key__(*args, **kwargs): + """Make key for cache given function arguments.""" + return args_to_key(base, args, kwargs, typed, ignore) + + wrapper.__cache_key__ = __cache_key__ # type: ignore + wrapper.__memory__ = memory # type: ignore + wrapper.__wrapped__ = cached_function # type: ignore + + return wrapper + + return decorator + + +def disable_cache(): + """Disable the cache for this session. + + Generative models output different results each time they are called when + sampling. This can be a desirable property for some workflows, in which case + one can call `outlines.call.disable` to disable the cache for the session. + + This function does not delete the cache, call `outlines.cache.clear` + instead. It also does not overwrite the cache with the values returned + during the session. + + Example + ------- + + `outlines.cache.disable` should be called right after importing outlines: + + >>> import outlines.caching as cache + >>> cache.disable_cache() + + """ + global _caching_enabled + _caching_enabled = False + + +def clear_cache(): + """Erase the cache completely.""" + memory = get_cache() + memory.clear() + + +@contextlib.contextmanager +def cache_disabled(): + # outlines.caching._caching_enabled + global _caching_enabled + original_state = _caching_enabled + _caching_enabled = False + try: + yield + finally: + _caching_enabled = original_state diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__init__.py b/vllm/lib/python3.10/site-packages/outlines/fsm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2478a529168ef06bbe231b13969175dc2170f829 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/guide.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/guide.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd4c2dd23bc4c548e3a2941e299773e6c937a712 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/guide.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/json_schema.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/json_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..492487d7d76f1269a7463a7cf87aaa7c2b60655d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/json_schema.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/parsing.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c748d267b9657a13675de1be2d161437565fef4f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/parsing.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/types.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..596d4e11f7e4788b770001b3fed812a8e5e7a36a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/fsm/__pycache__/types.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/guide.py b/vllm/lib/python3.10/site-packages/outlines/fsm/guide.py new file mode 100644 index 0000000000000000000000000000000000000000..6b97d7729ddf5a7b035afec0ef19cdbca50afdac --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/fsm/guide.py @@ -0,0 +1,276 @@ +import collections +import copy +import warnings +from typing import TYPE_CHECKING, Any, Generator, Union + +import torch +from lark.indenter import DedentError +from lark.lexer import UnexpectedCharacters, UnexpectedToken +from outlines_core.fsm.guide import Generate +from outlines_core.fsm.guide import Guide as CoreGuide +from outlines_core.fsm.guide import RegexGuide as CoreRegexGuide +from outlines_core.fsm.guide import Write +from outlines_core.fsm.guide import ( + create_states_mapping as uncached_create_states_mapping, +) + +from outlines import grammars +from outlines.fsm.parsing import PartialLark, PartialParserState + +if TYPE_CHECKING: + from outlines.models.tokenizer import Tokenizer + + +Instruction = Union[Write, Generate] + + +class Guide(CoreGuide): + """Base definition of a generation guide. + + A generation guide defines the behavior of a finite-state machine that guides + a text generation procedure. Unlike the DFAs built from regular expressions + guides can also emit a `Write` instructions which tells the model that it can + append a sequence of tokens (or token word) instead of generating it. + + """ + + initial_state: Any + + +class StopAtEOSGuide(Guide): + """Guide to generate tokens until the EOS token has been generated.""" + + final_state = 1 + start_state = 0 # TODO: remove start_state, use only initial_state + initial_state = 0 + + def __init__(self, tokenizer: "Tokenizer"): + """Initialize the generation guide. + + model + The logit generator used to generate the next token. + + """ + self.eos_token_id = tokenizer.eos_token_id + self.vocabulary = tokenizer.vocabulary.values() + + def get_next_instruction(self, state: int) -> Instruction: + if self.is_final_state(state): + return Write([self.eos_token_id]) + return Generate(None) + + def get_next_state(self, state: int, token_id: int) -> int: + if token_id == self.eos_token_id or state == self.final_state: + return self.final_state + + return self.initial_state + + def is_final_state(self, state: int): + return state == self.final_state + + def copy(self): + return self + + +def cached_create_states_mapping(regex_string, tokenizer, *args, **kwargs): + return uncached_create_states_mapping(regex_string, tokenizer, *args, **kwargs) + + +class RegexGuide(CoreRegexGuide): + """ + Guide to generate text in the language of a regular expression. + CoreRegexGuide with outlines cache + """ + + @classmethod + def from_regex( + cls, + regex_string: str, + tokenizer, + **kwargs, + ): + return super().from_regex( + regex_string, + tokenizer, + _create_states_mapping=cached_create_states_mapping, + **kwargs, + ) + + +CFGState = collections.namedtuple("CFGState", ["parser_state", "prev_token"]) + + +class CFGGuide(Guide): + """Guide to generate text that is in the language of a context-free Lark grammar.""" + + def __init__(self, cfg_string: str, tokenizer): + """ + Construct the PartialLark parser and set the empty initial_state (PartialParserState) + """ + warnings.warn( + "Outlines' public *community-contributed* CFG structured generation is experimental. " + "Please review https://dottxt-ai.github.io/outlines/latest/reference/generation/cfg#disclaimer" + ) + + self.cfg_string = cfg_string + self.tokenizer = tokenizer + self.eos_token_id = self.tokenizer.eos_token_id + self.parser = PartialLark( + cfg_string, + parser="lalr", + import_paths=[grammars.GRAMMAR_PATH], + ) + self.initial_state = CFGState( + parser_state=self.parser.parse(""), prev_token=None + ) + + def get_next_instruction(self, state: CFGState) -> Instruction: + """Return the next instruction for guided generation. + + Current lazy approach: + - For each token in the vocabulary + - create a copy of the parsers state + - add the tokens to the parsers input text + - if valid, add token to returned tokens + + Further refinements are necessary for performant text processing. + + Parameters + ---------- + state + The guides current PartialParserState, or None if complete + + Returns + ------- + A `Generate` instance that contains the model and the allowed token ids. + + """ + + if state.parser_state is None: + return Write(torch.tensor([self.eos_token_id])) + + valid_tokens = list( + self.iter_valid_token_ids(state, self.tokenizer.vocabulary.values()) + ) + if len(valid_tokens) == 1: + return Write(torch.tensor(valid_tokens)) + return Generate(torch.tensor(valid_tokens)) + + def iter_valid_token_ids( + self, state: CFGState, candidate_token_ids: list + ) -> Generator[int, None, None]: + """ + Iterate over the given token_ids and yield those that are valid for the current parser state. + + Parameters + ---------- + parser_state + The current state of the parser, or None if complete. + token_ids + The list of token ids to check for validity. + + Yields + ------ + int + Valid token ids. + """ + if state.parser_state is None: + yield self.eos_token_id + return + + for token_id in candidate_token_ids: + if token_id == self.eos_token_id: + if self.can_terminate_state(state): + yield token_id + else: + try: + self._get_parser_state_token_applied(state, int(token_id)) + yield token_id + except ( + ValueError, + EOFError, + UnexpectedToken, + UnexpectedCharacters, + DedentError, + ): + pass + + def get_next_state(self, state: CFGState, token_id: int) -> CFGState: + """ + Update the state of the guide. + Decode the token_id, and calculate the new parser_state with the token applied. + + Parameters + ---------- + state + The guides current PartialParserState, or None if complete + token_id + The id of the token that was just generated. + + Returns + ------- + The guides new PartialParserState + + """ + if state.parser_state is None or token_id == self.eos_token_id: + parser_state = None + else: + parser_state = self._get_parser_state_token_applied(state, int(token_id)) + return CFGState(parser_state=parser_state, prev_token=token_id) + + def _get_parser_state_token_applied( + self, state: CFGState, token_id: int + ) -> PartialParserState: + """ + Don't mutate `parser_state`, copy to protect + + Get the token string + - if first token in generation: tokenizer.decode (no leading whitespace) + - else: normalized (with possibly leading whitespace) + + Don't allow empty ("") tokens, raise ValueError + """ + parser_state = copy.copy(state.parser_state) # prevent side effects + + # normalize + if state.prev_token is None: + new_token_str = self.tokenizer.decode([token_id])[0] + else: + prev_token_str = self.tokenizer.decode([[state.prev_token]])[0] + combined_token_str = self.tokenizer.decode([[state.prev_token, token_id]])[ + 0 + ] + new_token_str = combined_token_str[len(prev_token_str) :] + + if new_token_str == "": + raise ValueError("empty next token") + + # update parser with new token + parser_state.lexer.state.text += new_token_str + self.parser.parse_from_state(parser_state, is_end=False) + + return parser_state + + def is_final_state(self, state: CFGState) -> bool: + # TODO: remove this method, use can_terminate_state and must_terminate_state + # here and in RegexGuide per https://github.com/dottxt-ai/outlines/issues/885 + return self.can_terminate_state(state) + + def can_terminate_state(self, state: CFGState) -> bool: + """Generation is allowed to terminate""" + if state.parser_state is not None: + try: + copy.copy(state.parser_state).feed_eof() + except UnexpectedToken: + return False + return True + + def must_terminate_state(self, state: CFGState) -> bool: + """Generation must terminate, no legal continuations""" + return state.parser_state is None or set(state.parser_state.accepts()).issubset( + {"$END"} + ) + + def copy(self) -> "CFGGuide": + """Create a copy of the Guide.""" + return CFGGuide(self.cfg_string, self.tokenizer) diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/json_schema.py b/vllm/lib/python3.10/site-packages/outlines/fsm/json_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..578ee762661b6861c37de07c3cad0f42e4f5ff92 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/fsm/json_schema.py @@ -0,0 +1,83 @@ +import inspect +import json +import warnings +from enum import Enum +from typing import Callable, Type, Union + +from pydantic import BaseModel, create_model + + +def convert_json_schema_to_str(json_schema: Union[dict, str, Type[BaseModel]]) -> str: + """Convert a JSON schema to a string. + + Parameters + ---------- + json_schema + The JSON schema. + + Returns + ------- + str + The JSON schema converted to a string. + + Raises + ------ + ValueError + If the schema is not a dictionary, a string or a Pydantic class. + """ + if isinstance(json_schema, dict): + schema_str = json.dumps(json_schema) + elif isinstance(json_schema, str): + schema_str = json_schema + elif issubclass(json_schema, BaseModel): + schema_str = json.dumps(json_schema.model_json_schema()) + else: + raise ValueError( + f"Cannot parse schema {json_schema}. The schema must be either " + + "a Pydantic class, a dictionary or a string that contains the JSON " + + "schema specification" + ) + return schema_str + + +def get_schema_from_signature(fn: Callable) -> dict: + """Turn a function signature into a JSON schema. + + Every JSON object valid to the output JSON Schema can be passed + to `fn` using the ** unpacking syntax. + + """ + signature = inspect.signature(fn) + arguments = {} + for name, arg in signature.parameters.items(): + if arg.annotation == inspect._empty: + raise ValueError("Each argument must have a type annotation") + else: + arguments[name] = (arg.annotation, ...) + + try: + fn_name = fn.__name__ + except Exception as e: + fn_name = "Arguments" + warnings.warn( + f"The function name could not be determined. Using default name 'Arguments' instead. For debugging, here is exact error:\n{e}", + category=UserWarning, + ) + model = create_model(fn_name, **arguments) + + return model.model_json_schema() + + +def get_schema_from_enum(myenum: type[Enum]) -> dict: + if len(myenum) == 0: + raise ValueError( + f"Your enum class {myenum.__name__} has 0 members. If you are working with an enum of functions, do not forget to register them as callable (using `partial` for instance)" + ) + choices = [ + get_schema_from_signature(elt.value.func) + if callable(elt.value) + else {"const": elt.value} + for elt in myenum + ] + schema = {"title": myenum.__name__, "oneOf": choices} + return schema diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/parsing.py b/vllm/lib/python3.10/site-packages/outlines/fsm/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..e48fb69e49f130562904880f7913353535788b3a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/fsm/parsing.py @@ -0,0 +1,1127 @@ +from copy import copy, deepcopy +from dataclasses import dataclass +from functools import lru_cache +from typing import ( + Any, + Dict, + FrozenSet, + Generator, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import interegular +from interegular.fsm import FSM, Alphabet, OblivionError +from interegular.patterns import Unsupported +from lark import Lark, Token +from lark.common import LexerConf, ParserConf +from lark.exceptions import LexError, UnexpectedInput +from lark.indenter import Indenter +from lark.lexer import ( + BasicLexer, + ContextualLexer, + LexerState, + LexerThread, + Scanner, + UnexpectedCharacters, + UnexpectedToken, + _create_unless, +) +from lark.parser_frontends import ( + ParsingFrontend, + PostLexConnector, + _validate_frontend_args, +) +from lark.parsers.lalr_analysis import ( + Action, + IntParseTable, + LALR_Analyzer, + ParseTable, + Shift, +) +from lark.parsers.lalr_interactive_parser import InteractiveParser +from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser +from outlines_core.fsm.regex import ( + BetterFSM, + get_token_transition_keys, + make_deterministic_fsm, +) + +PartialParseState = Tuple[str, int] +ParseStateType = Union[int, FrozenSet] + + +@dataclass +class PartialTerminalInfo: + priority: int + terminal_name: str + can_transition: bool + is_final: bool + + +@dataclass +class PartialTokensInfo: + fsm_state_seq: Tuple[int, ...] + is_not_finished: bool + terminals_and_info: Tuple[PartialTerminalInfo, ...] + final_terminals_and_info: Tuple[PartialTerminalInfo, ...] + + +class PartialParserConf(ParserConf): + __serialize_fields__ = ( + "rules", + "start", + "parser_type", + "deterministic", + "use_value_stack", + ) + + def __init__(self, rules, callbacks, start, deterministic, use_value_stack): + super().__init__(rules, callbacks, start) + self.deterministic = deterministic + self.use_value_stack = use_value_stack + + +class PartialLark(Lark): + __serialize_fields__ = ( + "parser", + "rules", + "options", + "deterministic", + "use_value_stack", + ) + + def __init__(self, grammar, **options): + # TODO: Could've extended `LarkOptions`, but all these extensions are + # already way too much (and brittle). This library really needs a + # complete refactoring. + self.deterministic = options.pop("deterministic", False) + self.use_value_stack = options.pop("use_value_stack", False) + options["regex"] = True + super().__init__(grammar, **options) + assert self.options.parser == "lalr" + + def _build_lexer(self, dont_ignore: bool = False) -> "PartialBasicLexer": + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + + return PartialBasicLexer(lexer_conf) + + def _build_parser(self) -> "PartialParsingFrontend": + self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) + parser_conf = PartialParserConf( + self.rules, + self._callbacks, + self.options.start, + self.deterministic, + self.use_value_stack, + ) + + # This is `_construct_parsing_frontend` expanded/inlined + parser_type = self.options.parser + lexer_type = self.options.lexer + lexer_conf = self.lexer_conf + + assert isinstance(lexer_conf, LexerConf) + assert isinstance(parser_conf, ParserConf) + parser_conf.parser_type = parser_type + self.lexer_conf.lexer_type = lexer_type + return PartialParsingFrontend(lexer_conf, parser_conf, self.options) + + def __repr__(self): + return "{}(open({!r}), parser={!r}, lexer={!r}, ...)".format( + type(self).__name__, + self.source_path, + self.options.parser, + self.options.lexer, + ) + + def parse_from_state(self, parse_state: "PartialParseState", is_end=False): + return self.parser.parser.parser.parse_from_state(parse_state, is_end=is_end) + + +class PartialLexerThread(LexerThread): + def __copy__(self): + return type(self)(copy(self.lexer), copy(self.state)) + + def __repr__(self): + return f"{type(self).__name__}(lexer={self.lexer!r}, state={self.state!r})" + + +class PartialPostLexConnector(PostLexConnector): + def __copy__(self): + return type(self)(self.lexer, copy(self.postlexer)) + + def __repr__(self): + return ( + f"{type(self).__name__}(lexer={self.lexer!r}, postlexer={self.postlexer!r})" + ) + + +class PartialParsingFrontend(ParsingFrontend): + def __init__(self, lexer_conf, parser_conf, options, parser=None): + assert parser_conf.parser_type == "lalr" + + options._plugins["LALR_Parser"] = PartialLALRParser + options._plugins["BasicLexer"] = PartialBasicLexer + options._plugins["ContextualLexer"] = PartialContextualLexer + options._plugins["LexerThread"] = PartialLexerThread + + super().__init__(lexer_conf, parser_conf, options, parser=parser) + + if lexer_conf.postlex: + self.lexer = PartialPostLexConnector(self.lexer.lexer, lexer_conf.postlex) + + self._termset_fsm_info = None + self._symbols_to_states: Optional[ + Dict[str, Set[Tuple[ParseStateType, Action]]] + ] = None + self._reverse_shifts: Optional[ + Dict[ParseStateType, Dict[str, Set[ParseStateType]]] + ] = None + # self._state_transition_map: Optional[ + # Dict[Tuple[ParseStateType, str], Set[ParseStateType]] + # ] = None + + def _compute_maps( + self, + ): + """Compute state transition and symbols-to-states maps.""" + self._reverse_shifts = {} + self._symbols_to_states = {} + + parse_table = self.parser.parser.parse_table + + for from_state, symbols_to_ops in parse_table.states.items(): + for symbol, op in symbols_to_ops.items(): + if op[0] == Shift: + symbols_to_from_states = self._reverse_shifts.setdefault(op[1], {}) + symbols_to_from_states.setdefault(symbol, set()).add(from_state) + self._symbols_to_states.setdefault(symbol, set()).add((from_state, op)) + + # # TODO: This approach is very wasteful. + # context_lexer = get_contextual_lexer(self) + # self._state_transition_map = {} + # + # for from_state, transitions in parse_table.states.items(): + # for symbol, action in transitions.items(): + # # TODO: Filter non-terminals + # if symbol not in context_lexer.root_lexer.terminals_by_name: + # continue + # + # if action[0] is Shift: + # self._state_transition_map.setdefault( + # (from_state, symbol), set() + # ).add(action[1]) + # continue + # + # antecedent_state_seqs = parse_to_terminal(self, [(from_state,)], symbol) + # + # for antecedent_state_seq in antecedent_state_seqs: + # antecedent_state = antecedent_state_seq[-1] + # self._state_transition_map.setdefault( + # (from_state, symbol), set() + # ).add(antecedent_state) + + def _compute_termset_fsm_info(self): + """Collect and return information about terminal symbol sets and their FSMs. + + Terminal symbol sets (or "termsets") are ordered sequences of terminal + symbols that are used by each parser state. Associated with each is a + collection of FSMs for each terminal and a single parse state FSM that is + the union of each terminal's FSM. + + This constructs a list of tuples containing the termset, the set of + parse states that use the termsets, parse state FSMs, and information + mapping the components of the parse state FSMs to their terminal symbol + FSMs. + + """ + context_lexer = get_contextual_lexer(self) + termsets_to_fsms = {} + termsets_to_parse_states: Dict[Tuple[str, ...], Set[ParseStateType]] = {} + for parse_state, lexer in context_lexer.lexers.items(): + scanner = lexer.scanner + key = tuple(term.name for term in scanner.terminals) + termsets_to_fsms[key] = (scanner.fsm, scanner.fsms_to_trans_finals) + termsets_to_parse_states.setdefault(key, set()).add(parse_state) + + self._termset_fsm_info = [ + ( + termset, + frozenset(termsets_to_parse_states[termset]), + fsm, + fsms_to_trans_finals, + ) + for termset, (fsm, fsms_to_trans_finals) in termsets_to_fsms.items() + ] + + @property + def termset_fsm_info(self): + if self._termset_fsm_info is None: + self._compute_termset_fsm_info() + return self._termset_fsm_info + + @property + def symbols_to_states(self): + if self._symbols_to_states is None: + self._compute_maps() + return self._symbols_to_states + + @property + def reverse_shifts(self): + if self._reverse_shifts is None: + self._compute_maps() + return self._reverse_shifts + + # @property + # def state_transition_map(self): + # if self._state_transition_map is None: + # self._compute_maps() + # return self._state_transition_map + + +class PartialLALRParser(LALR_Parser): + def __init__(self, parser_conf, debug=False, strict=False): + analysis = LALR_Analyzer( + parser_conf, debug=debug if not parser_conf.deterministic else True + ) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self.parser_conf = parser_conf + self._parse_table = analysis.parse_table + + if parser_conf.deterministic: + old_to_new = {} + + def to_tuple(v): + new = old_to_new.get(v) + if new is None: + new = tuple(sorted(v, key=lambda y: str(y))) + old_to_new[v] = new + return new + + enum = sorted( + self._parse_table.states.keys(), + key=lambda x: str(sorted(x, key=lambda y: str(y))), + ) + + new_states = {} + for s in enum: + transitions = { + term: op if op[0] is not Shift else (op[0], to_tuple(op[1])) + for term, op in self._parse_table.states[s].items() + } + new_states[to_tuple(s)] = transitions + + self._parse_table = type(self._parse_table)( + new_states, + {k: to_tuple(v) for k, v in self._parse_table.start_states.items()}, + {k: to_tuple(v) for k, v in self._parse_table.end_states.items()}, + ) + + if not debug: + self._parse_table = IntParseTable.from_ParseTable(self._parse_table) + self.states_to_rulesets = dict( + zip(self._parse_table.states.keys(), new_states.keys()) + ) + + self.parser = PartialParser( + self._parse_table, + callbacks, + debug, + use_value_stack=parser_conf.use_value_stack, + ) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = ParseTable.deserialize(data, memo) + inst.parser = PartialParser(inst._parse_table, callbacks, debug) + return inst + + +class PartialParserState(ParserState): + __slots__ = "use_value_stack" + + def __init__( + self, + parse_conf, + lexer, + state_stack=None, + value_stack=None, + use_value_stack=False, + ): + super().__init__( + parse_conf, lexer, state_stack=state_stack, value_stack=value_stack + ) + self.use_value_stack = use_value_stack + + def feed_token(self, token, is_end=False): + if token.type == "partial": + # If none of the potential terminals can transition, we need to know now + current_state = self.state_stack[-1] + current_lexer = get_contextual_lexer(self.lexer).lexers[current_state] + + # We have to feed the token and determine whether or not at least + # one terminal is consistent with the stack; otherwise, we'll miss + # invalid REDUCE cases. + # TODO: We should track separate parses conditional on possible + # token/symbol types, then we can coherently reuse the following + # results instead of recomputing it later. + can_transition = False + for terminal_info in token.value.terminals_and_info: + if terminal_info.terminal_name not in current_lexer.ignore_types: + test_token = Token.new_borrow_pos( + terminal_info.terminal_name, "", token + ) + + stack = copy(self.state_stack) + try: + self.feed_token_no_stack(test_token, is_end=is_end) + can_transition = True + break + except UnexpectedToken: + continue + finally: + self.state_stack = stack + else: + can_transition = True + + if not can_transition: + expected = { + s + for s in self.parse_conf.states[current_state].keys() + if s.isupper() + } + raise UnexpectedToken( + token, expected, state=self, interactive_parser=None + ) + + elif self.use_value_stack: + super().feed_token(token, is_end=is_end) + else: + self.feed_token_no_stack(token, is_end=is_end) + + def feed_token_no_stack(self, token, is_end=False): + """ + This is a copy of `ParserState.feed_token` with all the value stack + steps removed. Since we're not exactly parsing in order to obtain a + CST or anything similar, we can avoid the growing expense of tracking + the parse tree. + """ + state_stack = self.state_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken( + token, expected, state=self, interactive_parser=None + ) + + assert arg != end_state + + if action is Shift: + # shift once and return + assert not is_end + state_stack.append(arg) + return + else: + # reduce+shift as many times as necessary + rule = arg + size = len(rule.expansion) + if size: + del state_stack[-size:] + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + + if is_end and state_stack[-1] == end_state: + return + + def feed_eof(self): + last_token = self.lexer.state.last_token + + if last_token is None: + eof_token = self.lexer._Token("$END", "", 0, 1, 1) + else: + eof_token = Token.new_borrow_pos("$END", "", last_token) + + new_token_is_legal = ( + last_token is None + or last_token.type != "partial" + or any(ti.is_final for ti in last_token.value.terminals_and_info) + ) + if new_token_is_legal: + self.feed_token(eof_token, is_end=True) + else: + raise UnexpectedToken(eof_token, [], state=self, interactive_parser=None) + + def choices(self): + return self.parse_conf.parse_table.states[self.position] + + def accepts(self): + """ + Adapted from https://github.com/lark-parser/lark/blob/be542c2ff6d968817df019b8bf03f37b3111c08c/lark/parsers/lalr_interactive_parser.py#L95 + Returns the set of possible tokens that will advance the parser into a new valid state. + """ + accepts = set() + conf_no_callbacks = copy(self.parse_conf) + # We don't want to call callbacks here since those might have arbitrary side effects + # and are unnecessarily slow. + conf_no_callbacks.callbacks = {} + for t in self.choices(): + if t.isupper(): # is terminal? + new_state = copy(self) + new_state.parse_conf = conf_no_callbacks + try: + new_state.feed_token(new_state.lexer._Token(t, "")) + except UnexpectedToken: + pass + else: + accepts.add(t) + return accepts + + def __copy__(self): + return type(self)( + self.parse_conf, + copy(self.lexer), + copy(self.state_stack), + deepcopy(self.value_stack), + use_value_stack=self.use_value_stack, + ) + + def __repr__(self): + return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})" + + +class PartialParser(_Parser): + def __init__(self, parse_table, callbacks, debug=False, use_value_stack=False): + super().__init__(parse_table, callbacks, debug=debug) + self.use_value_stack = use_value_stack + + def parse( + self, lexer, start, value_stack=None, state_stack=None, start_interactive=False + ): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = PartialParserState( + parse_conf, copy(lexer), state_stack, value_stack, self.use_value_stack + ) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + def parse_from_state(self, state, last_token=None, is_end=False): + try: + token = last_token + for token in state.lexer.lex(state): + state.feed_token(token) + + if is_end and (not token or token.type != "partial"): + state.feed_eof() + + return state + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print("%d)" % i, s) + print("") + + raise + + +class PartialScanner(Scanner): + @classmethod + @lru_cache + def construct_terminal_fsm(cls, terminal): + # TODO: This should really be done at the lexer/parser level so that + # the lifetime of these objects is tied to the parser itself. + regex_str = terminal.pattern.to_regexp() + pattern = interegular.parse_pattern(regex_str) + fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) + return fsm, pattern.prefix_postfix + + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.use_bytes = use_bytes + self.match_whole = match_whole + self.allowed_types = {t.name for t in self.terminals} + self._mres = None + + fsms = [] + for t in self.terminals: + fsm, prefix_postfix = self.construct_terminal_fsm(t) + + # TODO FIXME: We don't support this right now. + assert prefix_postfix == (0, 0) + + fsms.append(fsm) + + self.fsm, self.fsms_to_trans_finals = fsm_union(fsms) + + def get_terminals_info( + self, fsm_state_seq + ) -> Tuple[Tuple[PartialTerminalInfo, ...], Tuple[PartialTerminalInfo, ...]]: + """Get the possible terminal symbols for an FSM state sequence.""" + terminals_and_info: Tuple[PartialTerminalInfo, ...] = () + final_terminals_and_info: Tuple[PartialTerminalInfo, ...] = () + for i, (fsm_id, fsm_reads_more, in_final) in enumerate( + get_sub_fsms_from_seq(fsm_state_seq, self.fsms_to_trans_finals) + ): + terminal_name = self.terminals[fsm_id].name + info = PartialTerminalInfo(i, terminal_name, fsm_reads_more, in_final) + terminals_and_info += (info,) + if in_final: + final_terminals_and_info += (info,) + + return terminals_and_info, final_terminals_and_info + + def match(self, text, pos, last_fsm_state_seq: Optional[Tuple[int, ...]] = None): + """Determine an FSM match over `text` starting at `pos` and continuing `last_fsm_state_seq`.""" + + start_pos = pos + + if last_fsm_state_seq: + assert len(last_fsm_state_seq) > 1 + start_pos += len(last_fsm_state_seq) - 1 + start_state = last_fsm_state_seq[-1] + else: + start_state = self.fsm.initial + + text_part = text[start_pos:] + + text_transitions = get_token_transition_keys( + self.fsm.fsm_info.alphabet_symbol_mapping, + self.fsm.fsm_info.alphabet_anything_value, + text_part, + ) + + state_seq = walk_fsm( + self.fsm, + text_transitions, + start_state, + full_match=self.match_whole, + ) + + if not state_seq: + return None + + if last_fsm_state_seq: + res = last_fsm_state_seq + tuple(state_seq) + else: + res = (start_state,) + tuple(state_seq) + + return res + + +class PartialContextualLexer(ContextualLexer): + def __init__(self, conf: "LexerConf", states, always_accept=()): + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + lexer_by_symbols: Dict = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_symbols[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [ + terminals_by_name[n] for n in accepts if n in terminals_by_name + ] + if not lexer_conf.terminals: + continue + lexer = PartialBasicLexer(lexer_conf) + lexer_by_symbols[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + self.root_lexer = PartialBasicLexer(trad_conf) + + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + try: + while True: + lexer = self.lexers[parser_state.position] + next_tok = lexer.next_token(lexer_state, parser_state) + yield next_tok + except EOFError: + pass + except KeyError: + if len(lexer_state.text) > lexer_state.line_ctr.char_pos: + raise UnexpectedCharacters( + lexer_state.text, + lexer_state.line_ctr.char_pos, + lexer_state.line_ctr.line, + lexer_state.line_ctr.column, + allowed=False, + token_history=lexer_state.last_token and [lexer_state.last_token], + state=parser_state, + terminals_by_name=self.root_lexer.terminals, + ) + + +class PartialBasicLexer(BasicLexer): + def __init__(self, conf: "LexerConf"): + super().__init__(conf) + # Eagerly construct the scanner + self._build_scanner() + + def _build_scanner(self): + # This seems incredibly convoluted: `lark` creates callback-triggered + # nested scanners for regex-defined terminals that overlap with + # string-defined terminals when both types of terminals have the same + # priority. Unless I'm missing something important, why not simply + # reorder the terminals so that the string-defined ones come before the + # regex-defined ones? + terminals, self.callback = _create_unless( + self.terminals, self.g_regex_flags, self.re, self.use_bytes + ) + + # We can't let people arbitrarily mess with the scanning process. + assert not self.user_callbacks + # for type_, f in self.user_callbacks.items(): + # if type_ in self.callback: + # # Already a callback there, probably UnlessCallback + # self.callback[type_] = CallChain( + # self.callback[type_], f, lambda t: t.type == type_ + # ) + # else: + # self.callback[type_] = f + + # We used the "callback" results to reorder the terminals (see the + # comments above). + for terminal_name, callback in self.callback.items(): + terminal = self.terminals_by_name[terminal_name] + for sub_terminal in callback.scanner.terminals: + self.terminals.remove(sub_terminal) + idx = self.terminals.index(terminal) + self.terminals.insert(idx, sub_terminal) + + self._scanner = PartialScanner( + self.terminals, self.g_regex_flags, self.re, self.use_bytes + ) + + def match(self, text, pos, last_fsm_state_seq=None): + return self.scanner.match(text, pos, last_fsm_state_seq) + + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + last_token = lex_state.last_token + + last_fsm_state_seq = None + if last_token and last_token.type == "partial": + # Continue from last partial lexer state + last_fsm_state_seq = last_token.value.fsm_state_seq + + line_ctr = lex_state.line_ctr + end_pos = line_ctr.char_pos + ( + len(last_fsm_state_seq) - 1 if last_fsm_state_seq else 0 + ) + while end_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos, last_fsm_state_seq) + + if not res: + if ( + not last_fsm_state_seq + or last_fsm_state_seq[-1] not in self.scanner.fsm.finals + ): + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters( + lex_state.text, + line_ctr.char_pos, + line_ctr.line, + line_ctr.column, + allowed=allowed, + token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, + terminals_by_name=self.terminals_by_name, + ) + + # The partial match might be complete now + fsm_state_seq = last_token.value.fsm_state_seq + terminals_and_info = last_token.value.terminals_and_info + final_terminals_and_info = last_token.value.final_terminals_and_info + else: + fsm_state_seq = res + ( + terminals_and_info, + final_terminals_and_info, + ) = self.scanner.get_terminals_info(fsm_state_seq) + + priority_terminal_info = ( + final_terminals_and_info[0] + if final_terminals_and_info + else terminals_and_info[0] + ) + + is_not_finished = ( + not priority_terminal_info.is_final + or priority_terminal_info.can_transition + or len(terminals_and_info) > 1 + ) + + start_pos = line_ctr.char_pos + end_pos = start_pos + len(fsm_state_seq) - 1 + + if end_pos >= len(lex_state.text) and is_not_finished: + type_name = "partial" + token_value = PartialTokensInfo( + fsm_state_seq, + is_not_finished, + terminals_and_info, + final_terminals_and_info, + ) + # Don't update the line counter states until we've finished + value = "" + else: + type_name = priority_terminal_info.terminal_name + # The token value should contain all partial scan parts in this + # case + value = token_value = lex_state.text[start_pos:end_pos] + + assert isinstance(self.callback, Dict) + + if type_name not in self.ignore_types: + t = Token( + type_name, + token_value, + line_ctr.char_pos, + line_ctr.line, + line_ctr.column, + ) + + line_ctr.feed(value, type_name in self.newline_types) + + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError( + "Callbacks must return a token (returned %r)" % t + ) + lex_state.last_token = t + return t + + if type_name in self.callback: + t2 = Token( + type_name, value, line_ctr.char_pos, line_ctr.line, line_ctr.column + ) + self.callback[type_name](t2) + + line_ctr.feed(value, type_name in self.newline_types) + + last_fsm_state_seq = None + + raise EOFError(self) + + +class PartialIndenter(Indenter): + """An `Indenter` that doesn't reset its state every time `process` is called.""" + + def process(self, stream): + return self._process(stream) + + def _process(self, stream): + for token in stream: + # These were previously *after* the `yield`, but that makes the + # state tracking unnecessarily convoluted. + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + if self.paren_level < 0: + raise UnexpectedToken(token, []) + + if token.type == self.NL_type: + yield from self.handle_NL(token) + else: + yield token + + # TODO: What do we want to do here? + # while len(self.indent_level) > 1: + # self.indent_level.pop() + # yield Token(self.DEDENT_type, "") + + def accepts_token_type(self, token_type): + if token_type in self.CLOSE_PAREN_types and self.paren_level - 1 < 0: + return False + + # TODO: + # if token_type == self.NL_type and self.paren_level == 0: + # ... + # return False + + return True + + def __copy__(self): + res = type(self)() + res.paren_level = self.paren_level + res.indent_level = copy(self.indent_level) + return res + + def __repr__(self): + return f"{type(self).__name__}(paren_level={self.paren_level!r}, indent_level={self.indent_level!r})" + + +class PartialPythonIndenter(PartialIndenter): + NL_type = "_NEWLINE" + OPEN_PAREN_types = ["LPAR", "LSQB", "LBRACE"] + CLOSE_PAREN_types = ["RPAR", "RSQB", "RBRACE"] + INDENT_type = "_INDENT" + DEDENT_type = "_DEDENT" + tab_len = 8 + + +def get_contextual_lexer(x: Union[PartialLexerThread, PartialParsingFrontend]): + if isinstance(x.lexer, ContextualLexer): + return x.lexer + else: + return x.lexer.lexer + + +def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]: + """Construct a ``dict`` mapping terminal symbol names to their finite state machines.""" + + symbol_names_and_fsms = {} + for terminal in lp.terminals: + pattern = interegular.parse_pattern(terminal.pattern.to_regexp()) + # TODO: Use `pyparser.terminals[0].pattern.flags`? + try: + fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) + except Unsupported: + fsm = None + + symbol_names_and_fsms[terminal.name] = fsm + + return symbol_names_and_fsms + + +def fsm_union( + fsms: Sequence[FSM], +) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]: + """Construct an FSM representing the union of the FSMs in `fsms`. + + This is an updated version of `interegular.fsm.FSM.union` made to return an + extra map of component FSMs to the sets of state transitions that + correspond to them in the new FSM. + + """ + + alphabet, new_to_old = Alphabet.union(*[fsm.alphabet for fsm in fsms]) + + indexed_fsms = tuple(enumerate(fsms)) + + initial = {i: fsm.initial for (i, fsm) in indexed_fsms} + + # Dedicated function accepting a "superset" and returning the next + # "superset" obtained by following this transition in the new FSM + def follow(current_state, new_transition: int): + next = {} + for i, f in indexed_fsms: + old_transition = new_to_old[i][new_transition] + if ( + i in current_state + and current_state[i] in f.map + and old_transition in f.map[current_state[i]] + ): + next[i] = f.map[current_state[i]][old_transition] + if not next: + raise OblivionError + return next + + states = [initial] + finals: Set[int] = set() + map: Dict[int, Dict[int, int]] = {} + + # Map component FSMs to their new state-to-state transitions, finals, and a + # map translating component FSM states to aggregate FSM states + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ] = {} + + i = 0 + while i < len(states): + state = states[i] + + # Add to the finals of the aggregate FSM whenever we hit a final in a + # component FSM + if any(state.get(j, -1) in fsm.finals for (j, fsm) in indexed_fsms): + finals.add(i) + + # Compute the map for this state + map[i] = {} + for transition in alphabet.by_transition: + try: + next = follow(state, transition) + except OblivionError: + # Reached an oblivion state; don't list it + continue + else: + try: + # TODO: Seems like this could--and should--be avoided + j = states.index(next) + except ValueError: + j = len(states) + states.append(next) + + map[i][transition] = j + + for fsm_id, fsm_state in next.items(): + ( + fsm_transitions, + fsm_finals, + fsm_old_to_new, + ) = fsms_to_trans_finals.setdefault(fsm_id, (set(), set(), {})) + old_from = state[fsm_id] + old_to = fsm_state + fsm_old_to_new.setdefault(old_from, set()).add(i) + fsm_old_to_new.setdefault(old_to, set()).add(j) + fsm_transitions.add((i, j)) + if fsm_state in fsms[fsm_id].finals: + fsm_finals.add(j) + + i += 1 + + fsm = FSM( + alphabet=alphabet, + states=range(len(states)), + initial=0, + finals=finals, + map=map, + __no_validation__=True, + ) + + fsm, old_to_new_states = make_deterministic_fsm(fsm) + _fsms_to_trans_finals = { + fsm_id: ( + {(old_to_new_states[s1], old_to_new_states[s2]) for s1, s2 in transitions}, + {old_to_new_states[s] for s in finals}, + { + old_state: {old_to_new_states[new_state] for new_state in new_states} + for old_state, new_states in old_to_new.items() + }, + ) + for fsm_id, (transitions, finals, old_to_new) in sorted( + fsms_to_trans_finals.items(), key=lambda x: x[0] + ) + } + + return ( + fsm, + _fsms_to_trans_finals, + ) + + +def get_sub_fsms_from_seq( + state_seq: Sequence[int], + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ], +) -> Generator[Tuple[int, bool, bool], None, None]: + """Get the indices of the sub-FSMs in `fsm` that could have matched the state sequence `state_seq`. + + Parameters + ---------- + state_seq + A state sequence. + fsms_to_trans_finals + A map from FSM indices to tuples containing sets of their state transitions + and sets of the final/accept states. + + Returns + ------- + A generator returning tuples containing each sub-FSM index (in the order + they were union-ed to construct `fsm`) and booleans indicating whether or + not there is another valid transition from the last state in the sequence + for the associated sub-FSM (i.e. if the FSM can continue + accepting/matching) and whether or not the sequence ends in a final state + of the sub-FSM. + """ + state_seq_transitions = set(zip(state_seq[:-1], state_seq[1:])) + last_fsm_state = state_seq[-1] + yield from ( + ( + # The sub-FMS index + fsm_idx, + # Is there another possible transition in this sub-FSM? + any(last_fsm_state == from_s for (from_s, to_s) in transitions), + # Is this sub-FSM in a final state? + state_seq[-1] in finals, + ) + for fsm_idx, (transitions, finals, _) in fsms_to_trans_finals.items() + if state_seq_transitions.issubset(transitions) + ) + + +def walk_fsm( + fsm: BetterFSM, + token_transition_keys: Sequence[int], + start_state: int, + full_match: bool = True, +) -> List[int]: + fsm_finals = fsm.finals + + state = start_state + accepted_states: List[int] = [] + last_final_idx: int = 0 + + fsm_transitions = fsm.flat_transition_map + + # Iterate over token transition key sequence. The transition key + # sequence represents the FSM traversal rules of the tokens symbols. + for i, trans_key in enumerate(token_transition_keys): + new_state = fsm_transitions.get((state, trans_key)) + + if new_state is None: + if not full_match and last_final_idx > 0: + return accepted_states[:last_final_idx] + + return [] + + state = new_state + + if state in fsm_finals: + last_final_idx = i + 1 + + accepted_states.append(state) + + if full_match and last_final_idx - 1 != i: + return [] + + return accepted_states diff --git a/vllm/lib/python3.10/site-packages/outlines/fsm/types.py b/vllm/lib/python3.10/site-packages/outlines/fsm/types.py new file mode 100644 index 0000000000000000000000000000000000000000..5695dee0733946f1a6334a1242d575df65458c05 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/fsm/types.py @@ -0,0 +1,81 @@ +import datetime +from enum import EnumMeta +from typing import Any, Protocol, Tuple, Type + +from typing_extensions import _AnnotatedAlias, get_args + +INTEGER = r"[+-]?(0|[1-9][0-9]*)" +BOOLEAN = "(True|False)" +FLOAT = rf"{INTEGER}(\.[0-9]+)?([eE][+-][0-9]+)?" +DATE = r"(\d{4})-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])" +TIME = r"([0-1][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])" +DATETIME = rf"({DATE})(\s)({TIME})" + + +class FormatFunction(Protocol): + def __call__(self, sequence: str) -> Any: + ... + + +def python_types_to_regex(python_type: Type) -> Tuple[str, FormatFunction]: + # If it is a custom type + if isinstance(python_type, _AnnotatedAlias): + json_schema = get_args(python_type)[1].json_schema + type_class = get_args(python_type)[0] + + custom_regex_str = json_schema["pattern"] + + def custom_format_fn(sequence: str) -> Any: + return type_class(sequence) + + return custom_regex_str, custom_format_fn + + if isinstance(python_type, EnumMeta): + values = python_type.__members__.keys() + enum_regex_str: str = "(" + "|".join(values) + ")" + + def enum_format_fn(sequence: str) -> str: + return str(sequence) + + return enum_regex_str, enum_format_fn + + if python_type == float: + + def float_format_fn(sequence: str) -> float: + return float(sequence) + + return FLOAT, float_format_fn + elif python_type == int: + + def int_format_fn(sequence: str) -> int: + return int(sequence) + + return INTEGER, int_format_fn + elif python_type == bool: + + def bool_format_fn(sequence: str) -> bool: + return bool(sequence) + + return BOOLEAN, bool_format_fn + elif python_type == datetime.date: + + def date_format_fn(sequence: str) -> datetime.date: + return datetime.datetime.strptime(sequence, "%Y-%m-%d").date() + + return DATE, date_format_fn + elif python_type == datetime.time: + + def time_format_fn(sequence: str) -> datetime.time: + return datetime.datetime.strptime(sequence, "%H:%M:%S").time() + + return TIME, time_format_fn + elif python_type == datetime.datetime: + + def datetime_format_fn(sequence: str) -> datetime.datetime: + return datetime.datetime.strptime(sequence, "%Y-%m-%d %H:%M:%S") + + return DATETIME, datetime_format_fn + else: + raise NotImplementedError( + f"The Python type {python_type} is not supported. Please open an issue." + ) diff --git a/vllm/lib/python3.10/site-packages/outlines/function.py b/vllm/lib/python3.10/site-packages/outlines/function.py new file mode 100644 index 0000000000000000000000000000000000000000..48577be8f7d4db1050545ac4036c8063bc206bb1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/function.py @@ -0,0 +1,117 @@ +import importlib.util +from dataclasses import dataclass +from typing import TYPE_CHECKING, Callable, Optional, Tuple, Union + +import requests + +from outlines import generate, models + +if TYPE_CHECKING: + from outlines.generate.api import SequenceGenerator + from outlines.prompts import Prompt + + +@dataclass +class Function: + """Represents an Outlines function. + + Functions are a convenient way to encapsulate a prompt template, a language + model and a Pydantic model that define the output structure. Once defined, + the function can be called with arguments that will be used to render the + prompt template. + + """ + + prompt_template: "Prompt" + schema: Union[str, Callable, object] + model_name: str + generator: Optional["SequenceGenerator"] = None + + @classmethod + def from_github(cls, program_path: str, function_name: str = "fn"): + """Load a function stored on GitHub""" + program_content = download_from_github(program_path) + function = extract_function_from_file(program_content, function_name) + + return function + + def init_generator(self): + """Load the model and initialize the generator.""" + model = models.transformers(self.model_name) + self.generator = generate.json(model, self.schema) + + def __call__(self, *args, **kwargs): + """Call the function. + + .. warning:: + + This currently does not support batching. + + Parameters + ---------- + args + Values to pass to the prompt template as positional arguments. + kwargs + Values to pass to the prompt template as keyword arguments. + + """ + if self.generator is None: + self.init_generator() + + prompt = self.prompt_template(*args, **kwargs) + return self.generator(prompt) + + +def download_from_github(short_path: str): + """Download the file in which the function is stored on GitHub.""" + GITHUB_BASE_URL = "https://raw.githubusercontent.com" + BRANCH = "main" + + path = short_path.split("/") + if len(path) < 3: + raise ValueError( + "Please provide a valid path in the form {USERNAME}/{REPO_NAME}/{PATH_TO_FILE}." + ) + elif short_path[-3:] == ".py": + raise ValueError("Do not append the `.py` extension to the program name.") + + username = path[0] + repo = path[1] + path_to_file = path[2:] + + url = "/".join([GITHUB_BASE_URL, username, repo, BRANCH] + path_to_file) + ".py" + result = requests.get(url) + + if result.status_code == 200: + return result.text + elif result.status_code == 404: + raise ValueError( + f"Program could not be found at {url}. Please make sure you entered the GitHub username, repository name and path to the program correctly." + ) + else: + result.raise_for_status() + + +def extract_function_from_file(content: str, function_name: str) -> Tuple[Callable]: + """Extract a function object from a downloaded file.""" + + spec = importlib.util.spec_from_loader( + "outlines_function", loader=None, origin="github" + ) + if spec is not None: + module = importlib.util.module_from_spec(spec) + exec(content, module.__dict__) + + try: + fn = getattr(module, function_name) + except AttributeError: + raise AttributeError( + "Could not find an `outlines.Function` instance in the remote file. Make sure that the path you specified is correct." + ) + + if not isinstance(fn, module.outlines.Function): + raise TypeError( + f"The `{function_name}` variable in the program must be an instance of `outlines.Function`" + ) + + return fn diff --git a/vllm/lib/python3.10/site-packages/outlines/generate/choice.py b/vllm/lib/python3.10/site-packages/outlines/generate/choice.py new file mode 100644 index 0000000000000000000000000000000000000000..afb998f52abae6bf53fc3efbfd9b7122d741a4f6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/generate/choice.py @@ -0,0 +1,59 @@ +import json as pyjson +import re +from enum import Enum +from functools import singledispatch +from typing import Callable, List, Union + +from outlines_core.fsm.json_schema import build_regex_from_schema + +from outlines.fsm.json_schema import get_schema_from_enum +from outlines.generate.api import SequenceGeneratorAdapter +from outlines.models import OpenAI +from outlines.samplers import Sampler, multinomial + +from .json import json +from .regex import regex + + +@singledispatch +def choice( + model, choices: Union[List[str], type[Enum]], sampler: Sampler = multinomial() +) -> SequenceGeneratorAdapter: + if isinstance(choices, type(Enum)): + regex_str = build_regex_from_schema(pyjson.dumps(get_schema_from_enum(choices))) + else: + choices = [re.escape(choice) for choice in choices] # type: ignore + regex_str = r"(" + r"|".join(choices) + r")" + + generator = regex(model, regex_str, sampler) + if isinstance(choices, type(Enum)): + generator.format_sequence = lambda x: pyjson.loads(x) + else: + generator.format_sequence = lambda x: x + + return generator + + +@choice.register(OpenAI) +def choice_openai( + model: OpenAI, choices: List[str], sampler: Sampler = multinomial() +) -> Callable: + """ + Call OpenAI API with response_format of a dict: + {"result": } + """ + + choices_schema = pyjson.dumps( + { + "type": "object", + "properties": {"result": {"type": "string", "enum": choices}}, + "additionalProperties": False, + "required": ["result"], + } + ) + generator = json(model, choices_schema, sampler) + + def generate_choice(*args, **kwargs): + return generator(*args, **kwargs)["result"] + + return generate_choice diff --git a/vllm/lib/python3.10/site-packages/outlines/generate/text.py b/vllm/lib/python3.10/site-packages/outlines/generate/text.py new file mode 100644 index 0000000000000000000000000000000000000000..32530d0c49c693ec499a067ab751ef834536716b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/generate/text.py @@ -0,0 +1,50 @@ +from functools import singledispatch + +from outlines.generate.api import ( + SequenceGeneratorAdapter, + VisionSequenceGeneratorAdapter, +) +from outlines.models import OpenAI, TransformersVision +from outlines.samplers import Sampler, multinomial + + +@singledispatch +def text(model, sampler: Sampler = multinomial()) -> SequenceGeneratorAdapter: + """Generate text with a `Transformer` model. + + Note + ---- + Python 3.11 allows dispatching on Union types and + this should greatly simplify the code. + + Arguments + --------- + model: + An instance of `Transformer` that represents a model from the + `transformers` library. + sampler: + The sampling algorithm to use to generate token ids from the logits + distribution. + + Returns + ------- + A `SequenceGeneratorAdapter` instance that generates text. + + """ + return SequenceGeneratorAdapter(model, None, sampler) + + +@text.register(TransformersVision) +def text_vision(model, sampler: Sampler = multinomial()): + return VisionSequenceGeneratorAdapter(model, None, sampler) + + +@text.register(OpenAI) +def text_openai(model: OpenAI, sampler: Sampler = multinomial()) -> OpenAI: + if not isinstance(sampler, multinomial): + raise NotImplementedError( + r"The OpenAI API does not support any other sampling algorithm " + + "than the multinomial sampler." + ) + + return model diff --git a/vllm/lib/python3.10/site-packages/outlines/grammars.py b/vllm/lib/python3.10/site-packages/outlines/grammars.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c122964786e8cb42cec595cff04a823f2c1958 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/grammars.py @@ -0,0 +1,14 @@ +from pathlib import Path + +GRAMMAR_PATH = Path(__file__).parent / "grammars" + + +def read_grammar(grammar_file_name, base_grammar_path=GRAMMAR_PATH): + """Read grammar file from default grammar path""" + full_path = base_grammar_path / grammar_file_name + with open(full_path) as file: + return file.read() + + +arithmetic = read_grammar("arithmetic.lark") +json = read_grammar("json.lark") diff --git a/vllm/lib/python3.10/site-packages/outlines/grammars/arithmetic.lark b/vllm/lib/python3.10/site-packages/outlines/grammars/arithmetic.lark new file mode 100644 index 0000000000000000000000000000000000000000..2332650c63c02b5f3ded849dc61542170c922038 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/grammars/arithmetic.lark @@ -0,0 +1,18 @@ +?start: sum + +?sum: product +| sum "+" product -> add +| sum "-" product -> sub + +?product: atom +| product "*" atom -> mul +| product "/" atom -> div + +?atom: NUMBER -> number +| "-" atom -> neg +| "(" sum ")" + +%import common.NUMBER +%import common.WS_INLINE + +%ignore WS_INLINE diff --git a/vllm/lib/python3.10/site-packages/outlines/grammars/common.lark b/vllm/lib/python3.10/site-packages/outlines/grammars/common.lark new file mode 100644 index 0000000000000000000000000000000000000000..ee5e00c500093e8c095c83cc7d383ebc82592a6f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/grammars/common.lark @@ -0,0 +1,83 @@ +// Adapted from https://github.com/lark-parser/lark/blob/master/lark/grammars/common.lark + +// Lark License: +// Copyright © 2017 Erez Shinan +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +// Basic terminals for common use + + +// +// Numbers +// + +DIGIT: "0".."9" +HEXDIGIT: "a".."f"|"A".."F"|DIGIT + +INT: DIGIT+ +SIGNED_INT: ["+"|"-"] INT +DECIMAL: INT "." INT? | "." INT + +// float = /-?\d+(\.\d+)?([eE][+-]?\d+)?/ +_EXP: ("e"|"E") SIGNED_INT +FLOAT: INT _EXP | DECIMAL _EXP? +SIGNED_FLOAT: ["+"|"-"] FLOAT + +NUMBER: FLOAT | INT +SIGNED_NUMBER: ["+"|"-"] NUMBER + +UNESCAPED_STRING: /\"[^"]*\"/ + +// based on `outlines/fsm/json_schema.py` +_NON_CONTROL_CHAR: /([^"\\\x00-\x1F\x7F-\x9F])/ +_ESCAPED_CHAR: /\\/ (_NON_CONTROL_CHAR | /\\/ | /"/) +ESCAPED_STRING_INNER: _NON_CONTROL_CHAR | _ESCAPED_CHAR +ESCAPED_STRING: /"/ ESCAPED_STRING_INNER* /"/ + + + +// +// Names (Variables) +// +LCASE_LETTER: "a".."z" +UCASE_LETTER: "A".."Z" + +LETTER: UCASE_LETTER | LCASE_LETTER +WORD: LETTER+ + +CNAME: ("_"|LETTER) ("_"|LETTER|DIGIT)* + + +// +// Whitespace +// +WS_INLINE: (" "|/\t/)+ +WS: /[ \t\f\r\n]/+ + +CR : /\r/ +LF : /\n/ +NEWLINE: (CR? LF)+ + + +// Comments +SH_COMMENT: /#[^\n]*/ +CPP_COMMENT: /\/\/[^\n]*/ +C_COMMENT: "/*" /(.|\n)*?/ "*/" +SQL_COMMENT: /--[^\n]*/ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__init__.py b/vllm/lib/python3.10/site-packages/outlines/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6f861acc5c5a1c12273d64065d95ee4fa4be21 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/__init__.py @@ -0,0 +1,19 @@ +"""Module that contains all the models integrated in outlines. + +We group the models in submodules by provider instead of theme (completion, chat +completion, diffusers, etc.) and use routing functions everywhere else in the +codebase. + +""" + +from typing import Union + +from .exllamav2 import ExLlamaV2Model, exl2 +from .llamacpp import LlamaCpp, llamacpp +from .mlxlm import MLXLM, mlxlm +from .openai import OpenAI, azure_openai, openai +from .transformers import Transformers, TransformerTokenizer, mamba, transformers +from .transformers_vision import TransformersVision, transformers_vision +from .vllm import VLLM, vllm + +LogitsGenerator = Union[Transformers, LlamaCpp, OpenAI, ExLlamaV2Model, MLXLM, VLLM] diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4e90195779d38c80faf298c25f88078529d10b9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/exllamav2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/exllamav2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..319ea50793184bb8188ae0a66f02b6c90859f8ed Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/exllamav2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/llamacpp.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/llamacpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4669730b1736291c56f963a5bc2ddc3aea149432 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/llamacpp.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/mlxlm.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/mlxlm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16994a1ca3b319c1bb226da6604d40d3b86180c3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/mlxlm.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/openai.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/openai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d392c228345e50a765df2706d61af4ff7085c3b9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/openai.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/tokenizer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..010b71fd84042467a23b3cf0b03f6fdf03cbcea8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/tokenizer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..522b979595c014e97035d2253bae814bc7eca3c4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers_vision.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers_vision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7af97c8124c1305cc33bf3a5c931da4ca8c56142 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/transformers_vision.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/vllm.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/vllm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a51b3afaa6ba40d4ae9942aea266f639bae41b1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/models/__pycache__/vllm.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/models/exllamav2.py b/vllm/lib/python3.10/site-packages/outlines/models/exllamav2.py new file mode 100644 index 0000000000000000000000000000000000000000..d2aa84b0dbbcdcc3c337dfad6fafc092f98b64c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/exllamav2.py @@ -0,0 +1,357 @@ +import dataclasses +from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, TypedDict, Union + +import torch +from typing_extensions import Unpack + +from outlines.generate.api import GenerationParameters, SamplingParameters + +if TYPE_CHECKING: + import torch.LongTensor + from exllamav2.generator import ExLlamaV2DynamicGenerator, ExLlamaV2Sampler + + +class ExllamaV2Params(TypedDict, total=False): + max_tokens: int + stop_conditions: Optional[List[Union[int, str]]] + seed: Optional[int] + gen_settings: "ExLlamaV2Sampler.Settings" + max_new_tokens: List[int] + + +class OutlinesExLlamaV2Tokenizer: + def __init__(self, tokenizer): + self.exl2_tokenizer = tokenizer + self.vocabulary = self.exl2_tokenizer.get_piece_to_id_dict() + self.special_tokens = set(self.exl2_tokenizer.extended_piece_to_id) + self.eos_token_id = self.exl2_tokenizer.eos_token_id + + def convert_token_to_string(self, token): + return token + + def decode(self, token_ids: "torch.LongTensor") -> List[str]: + decoded = self.exl2_tokenizer.decode( + torch.tensor(token_ids), + decode_special_tokens=False, + ) + if isinstance(decoded, str): + return [decoded] + return decoded + + +class ExLlamaV2Model: + """Represents a `exl2` model.""" + + def __init__( + self, + generator: "ExLlamaV2DynamicGenerator", + tokenizer: "OutlinesExLlamaV2Tokenizer", + max_seq_len: int, + ): + self.generator = generator + self.tokenizer = tokenizer + self.max_seq_len = max_seq_len + + def prepare_generation_parameters( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + sampling_parameters: SamplingParameters, + structure_logits_processor, + **exllamav2_params: Unpack[ExllamaV2Params], + ) -> Tuple[ExllamaV2Params, Union[str, List[str]]]: + """Prepare the generation parameters. + + `exllamav2` uses different default values + + """ + from exllamav2.generator import ExLlamaV2Sampler + + if isinstance(prompts, str): + prompts = [prompts] + max_tokens, stop_at, seed = dataclasses.astuple(generation_parameters) + + if max_tokens is None: + max_tokens = [] + for prompt in prompts: + ids = self.generator.tokenizer.encode( + prompt, encode_special_tokens=True + ) + prompt_tokens = ids.shape[-1] + max_tokens.append(self.max_seq_len - prompt_tokens) + exllamav2_params["max_new_tokens"] = max_tokens + else: + exllamav2_params["max_new_tokens"] = [ + max_tokens for _ in range(len(prompts)) + ] + + stop_conditions = [self.generator.tokenizer.eos_token_id] + if isinstance(generation_parameters.stop_at, str): + stop_conditions.append(generation_parameters.stop_at) + elif isinstance(generation_parameters.stop_at, list): + for stop_at in generation_parameters.stop_at: + stop_conditions.append(stop_at) + exllamav2_params["stop_conditions"] = stop_conditions + exllamav2_params["seed"] = seed + + gen_settings = ExLlamaV2Sampler.Settings() + if sampling_parameters.temperature is not None: + gen_settings.temperature = sampling_parameters.temperature + if sampling_parameters.top_p is not None: + gen_settings.top_p = sampling_parameters.top_p + if sampling_parameters.top_k is not None: + gen_settings.top_k = sampling_parameters.top_k + gen_settings.logits_processor = structure_logits_processor + exllamav2_params["gen_settings"] = gen_settings + if sampling_parameters.num_samples > 1: + prompts = prompts * sampling_parameters.num_samples + exllamav2_params["max_new_tokens"] = ( + exllamav2_params["max_new_tokens"] * sampling_parameters.num_samples + ) + + if len(prompts) == 1: + prompts = prompts[0] + + return exllamav2_params, prompts + + def reformat_output( + self, output: Union[str, List[str]], sampling_parameters: SamplingParameters + ): + """ + The purpose of this function is to reformat the output from exllamav2's output format to outline's output format + For exllamav2, it mainly accepts only a list or a string(they also do cfg sampling with tuples but we will ignore this for now) + The exllamav2's logic is + 1. If the prompt is a string, return a string. This is the same as outlines + 2. If a prompt is a list, return a list. This is not the same as outlines output in that if the list is only one element, the string is expected to be outputted. + 3. There is no such thing as num_samples, so the prompts had to be duplicated by num_samples times. Then, we had the function output a list of lists + """ + if isinstance(output, str): + return output + if len(output) == 1: + return output[0] + if sampling_parameters.num_samples > 1: + if len(output) == sampling_parameters.num_samples: + return output + assert len(output) % sampling_parameters.num_samples == 0 + num_items_per_sample = len(output) // sampling_parameters.num_samples + new_output = [] + for i in range(sampling_parameters.num_samples): + curr_sample = [] + for j in range(num_items_per_sample): + curr_sample.append(output[i * num_items_per_sample + j]) + new_output.append(curr_sample) + return new_output + return output + + def generate( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + structure_logits_processor, + sampling_parameters: SamplingParameters, + **exllamav2_params: Unpack[ExllamaV2Params], + ) -> Union[str, List[str]]: + exllamav2_params, prompts = self.prepare_generation_parameters( + prompts, + generation_parameters, + sampling_parameters, + structure_logits_processor, + ) + """ + In exllamav2, it needs the max amount of new tokens generated. + The reason exllamav2_params["max_new_tokens"] is a list is because in prepare_generation_parameters + the max amount of tokens that can be generated by the model for each prompt(by encoding with tokenizer) is calculated. + The minimum is picked because otherwise it might be possible for one of the + prompts to exceed the max sequence length. + """ + output = self.generator.generate( + prompt=prompts, + gen_settings=exllamav2_params["gen_settings"], + max_new_tokens=min(exllamav2_params["max_new_tokens"]), + completion_only=True, + encode_special_tokens=True, + stop_conditions=exllamav2_params["stop_conditions"], + add_bos=False, + seed=exllamav2_params["seed"], + ) + + return self.reformat_output(output, sampling_parameters) + + def stream( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + structure_logits_processor, + sampling_parameters: SamplingParameters, + **exllamav2_params: Unpack[ExllamaV2Params], + ) -> Iterator[Union[str, List[str]]]: + from exllamav2.generator import ExLlamaV2DynamicJob + + exllamav2_params, prompts = self.prepare_generation_parameters( + prompts, + generation_parameters, + sampling_parameters, + structure_logits_processor, + ) + + order = {} + if isinstance(prompts, str): + prompts = [prompts] + batch_size = len(prompts) + seed = exllamav2_params["seed"] + for idx, p in enumerate(prompts): + input_ids = self.generator.tokenizer.encode( + p, encode_special_tokens=True, add_bos=False + ) + + job = ExLlamaV2DynamicJob( + input_ids=input_ids, + max_new_tokens=exllamav2_params["max_new_tokens"][idx], + min_new_tokens=0, + seed=seed, + stop_conditions=exllamav2_params["stop_conditions"], + gen_settings=exllamav2_params["gen_settings"], + token_healing=False, + decode_special_tokens=False, + ) + + if seed is not None: + seed += 1 + + serial = self.generator.enqueue(job) + order[serial] = idx + + # Collect outputs until all jobs finish + + next_text = [""] * batch_size + + def token_generator() -> Iterator[str]: + while self.generator.num_remaining_jobs(): + results = self.generator.iterate() + for r in results: + idx = order[r["serial"]] + if r["stage"] == "streaming": + text = r.get("text", "") + next_text[idx] = text + if r["eos"]: + next_text[idx] = "" + yield self.reformat_output(next_text, sampling_parameters) + return + + return token_generator() + + +def exl2( + model_path: str, + draft_model_path: Optional[str] = None, + max_seq_len: Optional[int] = None, + cache_q4: bool = False, + paged: bool = True, + max_chunk_size: Optional[int] = None, +) -> ExLlamaV2Model: + """ + Load an ExLlamaV2 model. + + Parameters + ---------- + model_path (str) + Path to the model directory. + device (str) + Device to load the model on. Pass in 'cuda' for GPU or 'cpu' for CPU + max_seq_len (Optional[int], optional) + Maximum sequence length. Defaults to None. + scale_pos_emb (Optional[float], optional) + Scale factor for positional embeddings. Defaults to None. + scale_alpha_value (Optional[float], optional) + Scale alpha value. Defaults to None. + no_flash_attn (Optional[bool], optional) + Disable flash attention. Defaults to None. + num_experts_per_token (Optional[int], optional) + Number of experts per token. Defaults to None. + cache_q4 (bool, optional) + Use Q4 cache. Defaults to False. + tokenizer_kwargs (dict, optional) + Additional keyword arguments for the tokenizer. Defaults to {}. + gpu_split (str) + \"auto\", or VRAM allocation per GPU in GB. Auto will use exllama's autosplit feature + low_mem (bool, optional) + Enable VRAM optimizations, potentially trading off speed + verbose (bool, optional) + Enable if you want debugging statements + + Returns + ------- + An `ExLlamaV2Model` instance. + + Raises + ------ + `ImportError` if the `exllamav2` library is not installed. + + """ + try: + from exllamav2 import ( + ExLlamaV2, + ExLlamaV2Cache, + ExLlamaV2Cache_Q4, + ExLlamaV2Config, + ExLlamaV2Tokenizer, + ) + from exllamav2.generator import ExLlamaV2DynamicGenerator + + except ImportError: + raise ImportError( + "The `exllamav2`, `transformers` and `torch` libraries needs to be installed in order to use `exllamav2` models. " + "Please run `pip install transformers torch git+https://github.com/lapp0/exllamav2@sampler-logits-processor` " + "Documentation: https://dottxt-ai.github.io/outlines/latest/reference/models/exllamav2/" + ) + config = ExLlamaV2Config(model_path) + if max_chunk_size is not None: + config.max_input_len = max_chunk_size + config.max_attention_size = max_chunk_size**2 + + config.arch_compat_overrides() + model = ExLlamaV2(config) + if max_seq_len is None: + max_seq_len = -1 + if cache_q4: + cache = ExLlamaV2Cache_Q4(model, max_seq_len=max_seq_len, lazy=True) + else: + cache = ExLlamaV2Cache(model, max_seq_len=max_seq_len, lazy=True) + model.load_autosplit(cache, progress=True) + + print("Loading tokenizer...") + tokenizer = ExLlamaV2Tokenizer(config) + max_batch_size = 4 if paged else 1 + + draft_model = None + draft_cache = None + if draft_model_path is not None: + draft_config = ExLlamaV2Config(draft_model_path) + draft_model = ExLlamaV2(draft_config) + + if cache_q4: + draft_cache = ExLlamaV2Cache_Q4( + draft_model, max_seq_len=max_seq_len, lazy=True + ) + else: + draft_cache = ExLlamaV2Cache( + draft_model, max_seq_len=max_seq_len, lazy=True + ) + + # Initialize the generator with all default parameters + generator = ExLlamaV2DynamicGenerator( + model=model, + cache=cache, + draft_model=draft_model, + draft_cache=draft_cache, + tokenizer=tokenizer, + max_batch_size=max_batch_size, + use_ngram_draft=False, + max_chunk_size=max_chunk_size, + paged=paged, + ) + max_seq_len = cache.max_seq_len + + outlines_tokenizer = OutlinesExLlamaV2Tokenizer(tokenizer) + outlines_exl2_model = ExLlamaV2Model(generator, outlines_tokenizer, max_seq_len) + return outlines_exl2_model diff --git a/vllm/lib/python3.10/site-packages/outlines/models/llamacpp.py b/vllm/lib/python3.10/site-packages/outlines/models/llamacpp.py new file mode 100644 index 0000000000000000000000000000000000000000..904b193c457d9d965aeda023f6b02d2860940eba --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/llamacpp.py @@ -0,0 +1,407 @@ +import dataclasses +import pickle +import warnings +from typing import ( + TYPE_CHECKING, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + TypedDict, + Union, +) + +from typing_extensions import Unpack + +from outlines.generate.api import GenerationParameters, SamplingParameters +from outlines.models.tokenizer import Tokenizer + +if TYPE_CHECKING: + from llama_cpp import Llama, LogitsProcessorList + + +class LlamaCppTokenizer(Tokenizer): + def __init__(self, model: "Llama"): + self.eos_token_id = model.token_eos() + self.eos_token = model.tokenizer().decode([self.eos_token_id]) + self.pad_token_id = self.eos_token_id + self.special_tokens: Set[str] = set() + + self.vocabulary: Dict[str, int] = dict() + + self.tokenizer = model.tokenizer() + + # TODO: Remove when https://github.com/ggerganov/llama.cpp/pull/5613 is resolved + self._hf_tokenizer = None + try: + self.vocabulary = model.tokenizer_.hf_tokenizer.get_vocab() + self._hf_tokenizer = model.tokenizer_.hf_tokenizer + except AttributeError: + # ### + for t in range(model.n_vocab()): + token_piece = model.tokenizer().decode([t]) + self.vocabulary[token_piece] = t + + # ensure stable ordering of vocabulary + self.vocabulary = { + tok: tok_id + for tok, tok_id in sorted(self.vocabulary.items(), key=lambda x: x[1]) + } + + self._hash = None + + def decode(self, token_ids: List[int]) -> List[str]: + decoded_bytes = self.tokenizer.detokenize(token_ids) + return [decoded_bytes.decode("utf-8", errors="ignore")] + + def encode( + self, prompt: Union[str, List[str]], add_bos: bool = True, special: bool = True + ) -> Tuple[List[int], List[int]]: + if isinstance(prompt, list): + raise NotImplementedError( + "llama-cpp-python tokenizer doesn't support batch tokenization" + ) + token_ids = self.tokenizer.tokenize( + prompt.encode("utf-8", errors="ignore"), add_bos=add_bos, special=special + ) + # generate attention mask, missing from llama-cpp-python + attention_mask = [ + 1 if token_id != self.pad_token_id else 0 for token_id in token_ids + ] + return token_ids, attention_mask + + def convert_token_to_string(self, token: str) -> str: + if self._hf_tokenizer is not None: + from transformers.file_utils import SPIECE_UNDERLINE + + token_str = self._hf_tokenizer.convert_tokens_to_string([token]) + if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": + token_str = " " + token_str + return token_str + else: + return token + + def __eq__(self, other): + if not isinstance(other, LlamaCppTokenizer): + return False + return self.__getstate__() == other.__getstate__() + + def __hash__(self): + if self._hash is None: + self._hash = hash(pickle.dumps(self)) + return self._hash + + def __getstate__(self): + """Create a stable representation for outlines.caching""" + return ( + self.vocabulary, + self.eos_token_id, + self.eos_token, + self.pad_token_id, + sorted(self.special_tokens), + ) + + def __setstate__(self, state): + raise NotImplementedError("Cannot load a pickled llamacpp tokenizer") + + +class LlamaCppParams(TypedDict, total=False): + suffix: Optional[str] + temperature: float + top_p: float + min_p: float + typical_p: float + seed: int + max_tokens: int + logits_processor: "LogitsProcessorList" + stop: Optional[Union[str, List[str]]] + frequence_penalty: float + presence_penalty: float + repeat_penalty: float + top_k: int + tfs_z: float + mirostat_mode: int + mirostat_tau: float + mirostat_eta: float + stream: bool + + +class LlamaCpp: + """Represents a model provided by the `llama-cpp-python` library. + + We wrap models from model providing libraries in order to give all of + them the same interface in Outlines and allow users to easily switch + between providers. This class wraps the `llama_cpp.Llama` class from the + `llama-cpp-python` library. + + """ + + def __init__(self, model: "Llama"): + self.model = model + + @property + def tokenizer(self): + return LlamaCppTokenizer(self.model) + + def prepare_generation_parameters( + self, + generation_parameters: GenerationParameters, + sampling_parameters: SamplingParameters, + structure_logits_processor, + **llama_cpp_params: Unpack[LlamaCppParams], + ): + """Prepare the generation parameters. + + `llama-cpp-python` uses different default values + + """ + from llama_cpp import LogitsProcessorList + + max_tokens, stop_at, seed = dataclasses.astuple(generation_parameters) + + # We update `llama_cpp_params` with the values the user passed to the + # generator. + if "stop" not in llama_cpp_params: + llama_cpp_params["stop"] = stop_at + if "seed" not in llama_cpp_params: + llama_cpp_params["seed"] = seed + + # Somehow `llama-cpp-python` generates `max_tokens + 1` tokens + if "max_tokens" not in llama_cpp_params: + if max_tokens is None: + llama_cpp_params["max_tokens"] = -1 # indicates unlimited tokens + else: + llama_cpp_params["max_tokens"] = max_tokens - 1 + else: + llama_cpp_params["max_tokens"] = llama_cpp_params["max_tokens"] - 1 + + sampler, num_samples, top_p, top_k, temperature = dataclasses.astuple( + sampling_parameters + ) + + # We update the `llama_cpp_params` with the sampling values that + # were specified by the user via the `Sampler` class, unless they + # are also specified in `llama_cpp_params`. We also disable other + # sampling methods that are enabled by default and reset the temperature + # value. + # + # See https://github.com/ggerganov/llama.cpp/blob/e11a8999b5690f810c2c99c14347f0834e68c524/common/sampling.h#L22 + # for the default values in `llama.cpp` and indications to disable the sampling modes. + # Mirostat sampling, tail-free sampling and all penalties are disabled by default. + # + # See https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__call__ + # for default values in `llama-cpp-python` + if sampler == "beam_search": + raise NotImplementedError( + "The `llama_cpp_python` library does not support Beam Search." + ) + if num_samples != 1: + raise NotImplementedError( + "The `llama_cpp_python` library does not allow to take several samples." + ) + if "top_p" not in llama_cpp_params: + if top_p is not None: + llama_cpp_params["top_p"] = top_p + else: + llama_cpp_params["top_p"] = 1.0 + + if "min_p" not in llama_cpp_params: + llama_cpp_params["min_p"] = 0.0 + + if "top_k" not in llama_cpp_params: + if top_k is not None: + llama_cpp_params["top_k"] = top_k + else: + llama_cpp_params["top_k"] = -1 + + if "temperature" not in llama_cpp_params: + if temperature is not None: + llama_cpp_params["temperature"] = temperature + else: + llama_cpp_params["temperature"] = 1.0 + + if "repeat_penalty" not in llama_cpp_params: + llama_cpp_params["repeat_penalty"] = 1.0 + + # The choice to stream or not should happen via the high-level API + llama_cpp_params["stream"] = False + + if structure_logits_processor is not None: + if "logits_processor" in llama_cpp_params: + llama_cpp_params["logits_processor"].append(structure_logits_processor) + else: + llama_cpp_params["logits_processor"] = LogitsProcessorList( + [structure_logits_processor] + ) + + return llama_cpp_params + + def generate( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + structure_logits_processor, + sampling_parameters: SamplingParameters, + **llama_cpp_params: Unpack[LlamaCppParams], + ) -> str: + """Generate text using `llama-cpp-python`. + + Arguments + --------- + prompts + A prompt or list of prompts. + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + llama_cpp_params + Keyword arguments that can be passed to + `llama_cpp_python.Llama.__call__`. The values in `llama_cpp_params` + supersede the values of the parameters in `generation_parameters` and + `sampling_parameters`. See the `llama_cpp_python` documentation for + a list of possible values: https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__call__ + + Returns + ------- + The generated text. + + """ + if not isinstance(prompts, str): + raise NotImplementedError( + "The `llama-cpp-python` library does not support batch inference." + ) + + llama_cpp_params = self.prepare_generation_parameters( + generation_parameters, + sampling_parameters, + structure_logits_processor, + **llama_cpp_params, + ) + completion = self.model(prompts, **llama_cpp_params) + result = completion["choices"][0]["text"] + + self.model.reset() + + return result + + def stream( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + structure_logits_processor, + sampling_parameters: SamplingParameters, + **llama_cpp_params: Unpack[LlamaCppParams], + ) -> Iterator[str]: + """Stream text using `llama-cpp-python`. + + Arguments + --------- + prompts + A prompt or list of prompts. + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + llama_cpp_params + Keyword arguments that can be passed to + `llama_cpp_python.Llama.__call__`. The values in `llama_cpp_params` + supersede the values of the parameters in `generation_parameters` and + `sampling_parameters`. See the `llama_cpp_python` documentation for + a list of possible values: https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__call__ + + Returns + ------- + A generator that return strings. + + """ + + if not isinstance(prompts, str): + raise NotImplementedError( + "The `llama-cpp-python` library does not support batch inference." + ) + + llama_cpp_params = self.prepare_generation_parameters( + generation_parameters, + sampling_parameters, + structure_logits_processor, + **llama_cpp_params, + ) + llama_cpp_params["stream"] = True + generator = self.model(prompts, **llama_cpp_params) + + def token_generator() -> Iterator[str]: + while True: + try: + result = next(generator) + yield result["choices"][0]["text"] + except StopIteration: + self.model.reset() + return + + return token_generator() + + def load_lora(self, adapter_path: str): + if self.model._model.apply_lora_from_file( + adapter_path, + 1.0, + ): + raise RuntimeError(f"Failed to apply LoRA from lora path: {adapter_path}") + + +def llamacpp( + repo_id: str, filename: Optional[str] = None, **llamacpp_model_params +) -> LlamaCpp: + """Load a model from the `llama-cpp-python` library. + + We use the `Llama.from_pretrained` classmethod that downloads models + directly from the HuggingFace hub, instead of asking users to specify + a path to the downloaded model. One can still load a local model + by initializing `llama_cpp.Llama` directly. + + Arguments + --------- + repo_id + The name of the model repository. + filename: + A filename of glob pattern to match the model file in the repo. + llama_cpp_model_params + Llama-specific model parameters. See the `llama-cpp-python` documentation + for the full list: https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__init__ + + """ + from llama_cpp import Llama + + # Default to using the model's full context length + if "n_ctx" not in llamacpp_model_params: + llamacpp_model_params["n_ctx"] = 0 + + if "verbose" not in llamacpp_model_params: + llamacpp_model_params["verbose"] = False + + # TODO: Remove when https://github.com/ggerganov/llama.cpp/pull/5613 is resolved + if "tokenizer" not in llamacpp_model_params: + warnings.warn( + "The pre-tokenizer in `llama.cpp` handles unicode improperly " + + "(https://github.com/ggerganov/llama.cpp/pull/5613)\n" + + "Outlines may raise a `RuntimeError` when building the regex index.\n" + + "To circumvent this error when using `models.llamacpp()` you may pass the argument" + + "`tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained()`\n" + ) + + model = Llama.from_pretrained(repo_id, filename, **llamacpp_model_params) + + return LlamaCpp(model) diff --git a/vllm/lib/python3.10/site-packages/outlines/models/mlxlm.py b/vllm/lib/python3.10/site-packages/outlines/models/mlxlm.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b7e032c49a505d3e2483cf81b6e1020752e915 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/mlxlm.py @@ -0,0 +1,242 @@ +import dataclasses +from typing import TYPE_CHECKING, Generator, Iterator, List, Optional, Tuple, Union + +from .transformers import TransformerTokenizer + +if TYPE_CHECKING: + import mlx.core as mx + import mlx.nn as nn + from transformers import PreTrainedTokenizer + + from outlines.generate.api import GenerationParameters, SamplingParameters + from outlines.processors import OutlinesLogitsProcessor + + +class MLXLM: + """ + Represents an `mlx_lm` model + """ + + def __init__( + self, + model: "nn.Module", + tokenizer: "PreTrainedTokenizer", + ): + self.model = model + self.mlx_tokenizer = tokenizer # returns mlx tensors, used for encode() + self.tokenizer = TransformerTokenizer( + tokenizer._tokenizer + ) # _tokenizer is HF Tokenizer + + def generate( + self, + prompts: Union[str, List[str]], + generation_parameters: "GenerationParameters", + logits_processor, + sampling_parameters: "SamplingParameters", + ) -> str: + streamer = self.stream( + prompts, generation_parameters, logits_processor, sampling_parameters + ) + return "".join(list(streamer)) + + def stream( + self, + prompts: Union[str, List[str]], + generation_parameters: "GenerationParameters", + logits_processor, + sampling_parameters: "SamplingParameters", + ) -> Iterator[str]: + """Generate text using `mlx_lm`. + + Arguments + --------- + prompts + A prompt or list of prompts. + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + Returns + ------- + The generated text. + """ + import mlx.core as mx + + max_tokens, stop_at, seed = dataclasses.astuple(generation_parameters) + sampler, num_samples, top_p, top_k, temperature = dataclasses.astuple( + sampling_parameters + ) + if max_tokens is None: + max_tokens = int(1e9) + + if not isinstance(prompts, str): + raise NotImplementedError( + "The `mlx-lm` library does not support batch inference." + ) + if sampler == "beam_search": + raise NotImplementedError( + "The `mlx-lm` library does not support Beam Search." + ) + if num_samples != 1: + raise NotImplementedError( + "The `mlx-lm` library does not allow to take several samples." + ) + if top_k is not None: + raise NotImplementedError("The `mlx-lm` library does not support top_k.") + if seed is not None: + raise NotImplementedError("The `mlx-lm` library does not support seed.") + if stop_at is not None: + raise NotImplementedError("The `mlx-lm` library does not support stop_at.") + + generate_kwargs = { + "temp": temperature, + "top_p": top_p, + "sampler": sampler, + "logits_processor": logits_processor, + } + + # Adapted from + # https://github.com/ml-explore/mlx-examples/blob/4872727/llms/mlx_lm/utils.py#L267 + prompt_tokens = mx.array(self.mlx_tokenizer.encode(prompts)) + + detokenizer = self.mlx_tokenizer.detokenizer + detokenizer.reset() + + for (token, prob), n in zip( + self.generate_step(prompt_tokens, **generate_kwargs), + range(max_tokens), + ): + if token == self.tokenizer.eos_token_id: + break + detokenizer.add_token(token) + yield detokenizer.last_segment + + detokenizer.finalize() + yield detokenizer.last_segment + + def generate_step( + self, + prompt: "mx.array", + temp: Optional[float], + top_p: Optional[float], + sampler: str, + logits_processor: "OutlinesLogitsProcessor", + ) -> Generator[Tuple[int, float], None, None]: + """ + Adapted from + https://github.com/ml-explore/mlx-examples/blob/4872727/llms/mlx_lm/utils.py#L129 + + A generator producing token ids based on the given prompt from the model. + + Args: + prompt (mx.array): The input prompt. + temp (float): The temperature for sampling, if 0 the argmax is used. + Default: ``0``. + top_p (float, optional): Nulceus sampling, higher means model considers + more less likely words. + sampler (str): The sampler string defined by SequenceGeneratorAdapter + logits_processor (OutlinesLogitsProcessor): Augment logits before sampling. + """ + import mlx.core as mx + import mlx_lm + + temperature: float = temp or 1.0 + + def sample(logits: "mx.array") -> Tuple["mx.array", float]: + softmax_logits = mx.softmax(logits) + + if temperature == 0.0 or sampler == "greedy": + token = mx.argmax(logits, axis=-1) + elif sampler == "multinomial": + if top_p is not None and top_p > 0 and top_p < 1.0: + token = mlx_lm.sample_utils.top_p_sampling( + logits, top_p, temperature + ) + else: + token = mx.random.categorical(logits * (1 / temperature)) + else: + raise ValueError(f"Invalid mlx-lm sampler: `{sampler}`") + + prob = softmax_logits[0, token] + return token, prob + + cache = mlx_lm.models.cache.make_prompt_cache(self.model) + + # kv cache contains processed input IDs, we pass the unprocessed inputs and cache to model() + unprocessed_input_ids = prompt + generated_ids: List[int] = [] + + while True: + logits = self.model(unprocessed_input_ids[None], cache=cache) + logits = logits[:, -1, :] + + if logits_processor is not None: + # convert to logits_processor 1d expectation, apply, then convert back + logits_1d = logits.reshape(-1) + logits_1d = logits_processor(generated_ids, logits_1d) + logits = logits_1d.reshape(1, -1) + + new_token_single, prob = sample(logits) + new_token = new_token_single.item() + yield new_token, prob + + generated_ids.append(new_token) + unprocessed_input_ids = new_token_single + + +def mlxlm( + model_name: str, + tokenizer_config: dict = {}, + model_config: dict = {}, + adapter_path: Optional[str] = None, + lazy: bool = False, +): + """Instantiate a model from the `mlx_lm` library and its tokenizer. + + Signature adapted from + https://github.com/ml-explore/mlx-examples/blob/4872727/llms/mlx_lm/utils.py#L422 + + Parameters + ---------- + Args: + path_or_hf_repo (Path): The path or the huggingface repository to load the model from. + tokenizer_config (dict, optional): Configuration parameters specifically for the tokenizer. + Defaults to an empty dictionary. + model_config(dict, optional): Configuration parameters specifically for the model. + Defaults to an empty dictionary. + adapter_path (str, optional): Path to the LoRA adapters. If provided, applies LoRA layers + to the model. Default: ``None``. + lazy (bool): If False eval the model parameters to make sure they are + loaded in memory before returning, otherwise they will be loaded + when needed. Default: ``False`` + + Returns + ------- + A `MLXLM` model instance. + + """ + try: + import mlx.core as mx + import mlx_lm + except ImportError: + raise ImportError( + "The `mlx_lm` library needs to be installed in order to use `mlx_lm` models." + ) + if not mx.metal.is_available(): + raise RuntimeError("You cannot use `mlx_lm` without Apple Silicon (Metal)") + + model, tokenizer = mlx_lm.load( + model_name, + tokenizer_config=tokenizer_config, + model_config=model_config, + adapter_path=adapter_path, + lazy=lazy, + ) + return MLXLM(model, tokenizer) diff --git a/vllm/lib/python3.10/site-packages/outlines/models/openai.py b/vllm/lib/python3.10/site-packages/outlines/models/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..89c26f217938f5cff6883da719dfa5e0b19e3f88 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/openai.py @@ -0,0 +1,297 @@ +"""Integration with OpenAI's API.""" +import copy +import functools +from dataclasses import asdict, dataclass, field, replace +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np + +from outlines.base import vectorize +from outlines.caching import cache + +__all__ = ["OpenAI", "openai", "azure_openai"] + + +@dataclass(frozen=True) +class OpenAIConfig: + """Represents the parameters of the OpenAI API. + + The information was last fetched on 2023/11/20. We document below the + properties that are specific to the OpenAI API. Not all these properties are + supported by Outlines. + + Properties + ---------- + model + The name of the model. Available models can be found on OpenAI's website. + frequence_penalty + Number between 2.0 and -2.0. Positive values penalize new tokens based on + their existing frequency in the text, + logit_bias + Modifies the likelihood of specified tokens to appear in the completion. + Number between -100 (forbid) and +100 (only allows). + n + The number of completions to return for each prompt. + presence_penalty + Similar to frequency penalty. + response_format + Specifies the format the model must output. `{"type": "json_object"}` + enables JSON mode. + seed + Two completions with the same `seed` value should return the same + completion. This is however not guaranteed. + stop + Up to 4 words where the API will stop the completion. + temperature + Number between 0 and 2. Higher values make the output more random, while + lower values make it more deterministic. + top_p + Number between 0 and 1. Parameter for nucleus sampling. + user + A unique identifier for the end-user. + + """ + + model: str = "" + frequency_penalty: float = 0 + logit_bias: Dict[int, int] = field(default_factory=dict) + max_tokens: Optional[int] = None + n: int = 1 + presence_penalty: float = 0 + response_format: Optional[Dict[str, str]] = None + seed: Optional[int] = None + stop: Optional[Union[str, List[str]]] = None + temperature: float = 1.0 + top_p: int = 1 + user: str = field(default_factory=str) + + +class OpenAI: + """An object that represents the OpenAI API.""" + + def __init__( + self, + client, + config, + system_prompt: Optional[str] = None, + ): + """Create an `OpenAI` instance. + + This class supports the standard OpenAI API, the Azure OpeanAI API as + well as compatible APIs that rely on the OpenAI client. + + Parameters + ---------- + client + An instance of the API's async client. + config + An instance of `OpenAIConfig`. Can be useful to specify some + parameters that cannot be set by calling this class' methods. + """ + + self.client = client + self.config = config + + # We count the total number of prompt and generated tokens as returned + # by the OpenAI API, summed over all the requests performed with this + # model instance. + self.prompt_tokens = 0 + self.completion_tokens = 0 + + self.format_sequence = lambda seq: seq + + def __call__( + self, + prompt: Union[str, List[str]], + max_tokens: Optional[int] = None, + stop_at: Optional[Union[List[str], str]] = None, + *, + system_prompt: Optional[str] = None, + temperature: Optional[float] = None, + samples: Optional[int] = None, + ) -> np.ndarray: + """Call the OpenAI API to generate text. + + Parameters + ---------- + prompt + A string or list of strings that will be used to prompt the model + max_tokens + The maximum number of tokens to generate + stop_at + A string or array of strings which, such that the generation stops + when they are generated. + system_prompt + The content of the system message that precedes the user's prompt. + temperature + The value of the temperature used to sample tokens + samples + The number of completions to generate for each prompt + stop_at + Up to 4 words where the API will stop the completion. + + """ + if max_tokens is None: + max_tokens = self.config.max_tokens + if stop_at is None: + stop_at = self.config.stop + if temperature is None: + temperature = self.config.temperature + if samples is None: + samples = self.config.n + + config = replace(self.config, max_tokens=max_tokens, temperature=temperature, n=samples, stop=stop_at) # type: ignore + + response, prompt_tokens, completion_tokens = generate_chat( + prompt, system_prompt, self.client, config + ) + self.prompt_tokens += prompt_tokens + self.completion_tokens += completion_tokens + + return self.format_sequence(response) + + def stream(self, *args, **kwargs): + raise NotImplementedError( + "Streaming is currently not supported for the OpenAI API" + ) + + def new_with_replacements(self, **kwargs): + new_instance = copy.copy(self) + new_instance.config = replace(new_instance.config, **kwargs) + return new_instance + + def __str__(self): + return self.__class__.__name__ + " API" + + def __repr__(self): + return str(self.config) + + +@functools.partial(vectorize, signature="(),(),(),()->(s),(),()") +async def generate_chat( + prompt: str, + system_prompt: Union[str, None], + client, + config: OpenAIConfig, +) -> Tuple[np.ndarray, int, int]: + """Call OpenAI's Chat Completion API. + + Parameters + ---------- + prompt + The prompt we use to start the generation. Passed to the model + with the "user" role. + system_prompt + The system prompt, passed to the model with the "system" role + before the prompt. + client + The API client + config + An `OpenAIConfig` instance. + + Returns + ------- + A tuple that contains the model's response(s) and usage statistics. + + """ + + @error_handler + @cache() + async def call_api(prompt, system_prompt, config): + responses = await client.chat.completions.create( + messages=system_message + user_message, + **asdict(config), # type: ignore + ) + return responses.model_dump() + + system_message = ( + [{"role": "system", "content": system_prompt}] if system_prompt else [] + ) + user_message = [{"role": "user", "content": prompt}] + + responses = await call_api(prompt, system_prompt, config) + + results = np.array( + [responses["choices"][i]["message"]["content"] for i in range(config.n)] + ) + usage = responses["usage"] + + return results, usage["prompt_tokens"], usage["completion_tokens"] + + +def error_handler(api_call_fn: Callable) -> Callable: + """Handle OpenAI API errors and missing API key.""" + + def call(*args, **kwargs): + import openai + + try: + return api_call_fn(*args, **kwargs) + except ( + openai.APITimeoutError, + openai.InternalServerError, + openai.RateLimitError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.AuthenticationError, + openai.BadRequestError, + openai.ConflictError, + openai.PermissionDeniedError, + openai.NotFoundError, + openai.UnprocessableEntityError, + ) as e: + raise e + + return call + + +@functools.singledispatch +def openai(model_or_client, *args, **kwargs): + return OpenAI(model_or_client, *args, **kwargs) + + +@openai.register(str) +def openai_model( + model_name: str, + config: Optional[OpenAIConfig] = None, + **openai_client_params, +): + try: + from openai import AsyncOpenAI + except ImportError: + raise ImportError( + "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." + ) + + if config is not None: + config = replace(config, model=model_name) # type: ignore + else: + config = OpenAIConfig(model=model_name) + + client = AsyncOpenAI(**openai_client_params) + + return OpenAI(client, config) + + +def azure_openai( + deployment_name: str, + model_name: Optional[str] = None, + config: Optional[OpenAIConfig] = None, + **azure_openai_client_params, +): + try: + from openai import AsyncAzureOpenAI + except ImportError: + raise ImportError( + "The `openai` library needs to be installed in order to use Outlines' Azure OpenAI integration." + ) + + if config is not None: + config = replace(config, model=deployment_name) # type: ignore + if config is None: + config = OpenAIConfig(model=deployment_name) + + client = AsyncAzureOpenAI(**azure_openai_client_params) + + return OpenAI(client, config) diff --git a/vllm/lib/python3.10/site-packages/outlines/models/tokenizer.py b/vllm/lib/python3.10/site-packages/outlines/models/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1a5708d85fdb6a6ddcb92f924464e1077b035f2d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/tokenizer.py @@ -0,0 +1,31 @@ +from typing import Dict, Hashable, List, Protocol, Set, Tuple, Union + +import numpy as np +from numpy.typing import NDArray + + +class Tokenizer(Hashable, Protocol): + eos_token: str + eos_token_id: int + pad_token_id: int + vocabulary: Dict[str, int] + special_tokens: Set[str] + + def encode( + self, prompt: Union[str, List[str]] + ) -> Tuple[NDArray[np.int64], NDArray[np.int64]]: + """Translate the input prompts into arrays of token ids and attention mask.""" + ... + + def decode(self, token_ids: NDArray[np.int64]) -> List[str]: + """Translate an array of token ids to a string or list of strings.""" + ... + + def convert_token_to_string(self, token: str) -> str: + """Convert a token to its equivalent string. + + This is for instance useful for BPE tokenizers where whitespaces are + represented by the special characted `Ġ`. This prevents matching a raw + token that includes `Ġ` with a string. + """ + ... diff --git a/vllm/lib/python3.10/site-packages/outlines/models/transformers.py b/vllm/lib/python3.10/site-packages/outlines/models/transformers.py new file mode 100644 index 0000000000000000000000000000000000000000..444492500e7508963953174dbd3b9d0f36853ffe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/transformers.py @@ -0,0 +1,458 @@ +import dataclasses +import inspect +from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, Union + +from outlines.generate.api import GenerationParameters, SamplingParameters +from outlines.models.tokenizer import Tokenizer + +if TYPE_CHECKING: + import torch + from transformers import PreTrainedModel, PreTrainedTokenizer + + from outlines.processors import OutlinesLogitsProcessor + +__all__ = ["transformers"] + + +KVCacheType = Tuple[Tuple["torch.DoubleTensor", "torch.DoubleTensor"], ...] + + +def get_llama_tokenizer_types(): + """Get all the Llama tokenizer types/classes that need work-arounds. + + When they can't be imported, a dummy class is created. + + """ + try: + from transformers.models.llama import LlamaTokenizer + except ImportError: + + class LlamaTokenizer: # type: ignore + pass + + try: + from transformers.models.llama import LlamaTokenizerFast + except ImportError: + + class LlamaTokenizerFast: # type: ignore + pass + + try: + from transformers.models.code_llama import CodeLlamaTokenizer + except ImportError: + + class CodeLlamaTokenizer: # type: ignore + pass + + try: + from transformers.models.code_llama import CodeLlamaTokenizerFast + except ImportError: + + class CodeLlamaTokenizerFast: # type: ignore + pass + + return ( + LlamaTokenizer, + LlamaTokenizerFast, + CodeLlamaTokenizer, + CodeLlamaTokenizerFast, + ) + + +class TransformerTokenizer(Tokenizer): + """Represents a tokenizer for models in the `transformers` library.""" + + def __init__(self, tokenizer: "PreTrainedTokenizer", **kwargs): + self.tokenizer = tokenizer + self.eos_token_id = self.tokenizer.eos_token_id + self.eos_token = self.tokenizer.eos_token + + if self.tokenizer.pad_token_id is None: + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.pad_token_id = self.eos_token_id + else: + self.pad_token_id = self.tokenizer.pad_token_id + self.pad_token = self.tokenizer.pad_token + + self.special_tokens = set(self.tokenizer.all_special_tokens) + + self.vocabulary = self.tokenizer.get_vocab() + self.is_llama = isinstance(self.tokenizer, get_llama_tokenizer_types()) + + def encode( + self, prompt: Union[str, List[str]], **kwargs + ) -> Tuple["torch.LongTensor", "torch.LongTensor"]: + kwargs["padding"] = True + kwargs["return_tensors"] = "pt" + output = self.tokenizer(prompt, **kwargs) + return output["input_ids"], output["attention_mask"] + + def decode(self, token_ids: "torch.LongTensor") -> List[str]: + text = self.tokenizer.batch_decode(token_ids, skip_special_tokens=True) + return text + + def convert_token_to_string(self, token: str) -> str: + from transformers.file_utils import SPIECE_UNDERLINE + + string = self.tokenizer.convert_tokens_to_string([token]) + + if self.is_llama: + # A hack to handle missing spaces to HF's Llama tokenizers + if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": + return " " + string + + return string + + def __eq__(self, other): + if isinstance(other, type(self)): + if hasattr(self, "model_name") and hasattr(self, "kwargs"): + return ( + other.model_name == self.model_name and other.kwargs == self.kwargs + ) + else: + return other.tokenizer == self.tokenizer + return NotImplemented + + def __hash__(self): + from datasets.fingerprint import Hasher + + return hash(Hasher.hash(self.tokenizer)) + + def __getstate__(self): + state = {"tokenizer": self.tokenizer} + return state + + def __setstate__(self, state): + self.__init__(state["tokenizer"]) + + +class Transformers: + """Represents a `transformers` model.""" + + def __init__( + self, + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + ): + self.model = model + self.tokenizer = TransformerTokenizer(tokenizer) + + def forward( + self, + input_ids: "torch.LongTensor", + attention_mask: "torch.LongTensor", + past_key_values: Optional[Tuple] = None, + ) -> Tuple["torch.FloatTensor", Optional[KVCacheType]]: + """Compute a forward pass through the transformer model. + + Parameters + ---------- + input_ids + The input token ids. Must be one or two dimensional. + attention_mask + The attention mask. Must be one or two dimensional. + past_key_values + A tuple of tuples containing the cached key and value tensors for each + attention head. + + Returns + ------- + The computed logits and the new cached key and value tensors. + + """ + try: + import torch + except ImportError: + ImportError( + "The `torch` library needs to be installed to use `transformers` models." + ) + assert 0 < input_ids.ndim < 3 + + if past_key_values: + input_ids = input_ids[..., -1].unsqueeze(-1) + + with torch.inference_mode(): + output = self.model( + input_ids, + attention_mask=attention_mask, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + past_key_values=past_key_values, + ) + + return output.logits, output.past_key_values + + def __call__( + self, + input_ids: "torch.LongTensor", + attention_mask: "torch.LongTensor", + past_key_values: Optional[Tuple] = None, + ) -> "torch.FloatTensor": + logits, kv_cache = self.forward(input_ids, attention_mask, past_key_values) + next_token_logits = logits[..., -1, :] + + return next_token_logits, kv_cache + + def generate( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + logits_processor: Optional["OutlinesLogitsProcessor"], + sampling_parameters: SamplingParameters, + ) -> Union[str, List[str], List[List[str]]]: + """Generate text using `transformers`. + + Arguments + --------- + prompts + A prompt or list of prompts. + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + + Returns + ------- + The generated text + """ + if isinstance(prompts, str): + # convert to 2d + input_ids, attention_mask = self.tokenizer.encode([prompts]) + else: + input_ids, attention_mask = self.tokenizer.encode(prompts) + + inputs = { + "input_ids": input_ids.to(self.model.device), + "attention_mask": attention_mask.to(self.model.device), + } + if ( + "attention_mask" + not in inspect.signature(self.model.forward).parameters.keys() + ): + del inputs["attention_mask"] + + generation_kwargs = self._get_generation_kwargs( + prompts, + generation_parameters, + logits_processor, + sampling_parameters, + ) + generated_ids = self._generate_output_seq(prompts, inputs, **generation_kwargs) + + # if single str input and single sample per input, convert to a 1D output + if isinstance(prompts, str): + generated_ids = generated_ids.squeeze(0) + + return self._decode_generation(generated_ids) + + def stream( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + logits_processor: Optional["OutlinesLogitsProcessor"], + sampling_parameters: SamplingParameters, + ) -> Iterator[Union[str, List[str]]]: + """ + Temporary stream stand-in which implements stream() signature + and equivalent behaviour but isn't yielded until generation completes. + + TODO: implement following completion of https://github.com/huggingface/transformers/issues/30810 + """ + if isinstance(prompts, str): + # convert to 2d + input_ids, attention_mask = self.tokenizer.encode([prompts]) + else: + input_ids, attention_mask = self.tokenizer.encode(prompts) + inputs = { + "input_ids": input_ids.to(self.model.device), + "attention_mask": attention_mask.to(self.model.device), + } + if ( + "attention_mask" + not in inspect.signature(self.model.forward).parameters.keys() + ): + del inputs["attention_mask"] + + generation_kwargs = self._get_generation_kwargs( + prompts, + generation_parameters, + logits_processor, + sampling_parameters, + ) + generated_ids = self._generate_output_seq(prompts, inputs, **generation_kwargs) + + # if single str input and single sample per input, convert to a 1D output + if isinstance(prompts, str): + generated_ids = generated_ids.squeeze(0) + + for i in range(generated_ids.size(-1)): + output_group_ids = generated_ids.select(-1, i).unsqueeze(-1) + yield self._decode_generation(output_group_ids) + + def _get_generation_kwargs( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + logits_processor: Optional["OutlinesLogitsProcessor"], + sampling_parameters: SamplingParameters, + ) -> dict: + """ + Conert outlines generation parameters into model.generate kwargs + """ + from transformers import GenerationConfig, LogitsProcessorList, set_seed + + max_new_tokens, stop_at, seed = dataclasses.astuple(generation_parameters) + sampler, num_samples, top_p, top_k, temperature = dataclasses.astuple( + sampling_parameters + ) + if max_new_tokens is None: + max_new_tokens = int(2**30) + + # global seed, not desirable + if seed is not None: + set_seed(seed) + + if logits_processor is not None: + logits_processor_list = LogitsProcessorList([logits_processor]) + else: + logits_processor_list = None + + generation_config = GenerationConfig( + max_new_tokens=max_new_tokens, + stop_strings=stop_at, + num_return_sequences=(num_samples or 1), + top_p=top_p, + top_k=top_k, + temperature=temperature, + do_sample=(sampler == "multinomial"), + num_beams=(num_samples if sampler == "beam_search" else 1), + eos_token_id=self.tokenizer.eos_token_id, + pad_token_id=self.tokenizer.pad_token_id, + ) + + return dict( + logits_processor=logits_processor_list, + generation_config=generation_config, + tokenizer=self.tokenizer.tokenizer, + ) + + def _generate_output_seq( + self, prompts, inputs, generation_config, **generation_kwargs + ): + input_ids = inputs["input_ids"] + output_ids = self.model.generate( + **inputs, generation_config=generation_config, **generation_kwargs + ) + + # encoder-decoder returns output_ids only, decoder-only returns full seq ids + if self.model.config.is_encoder_decoder: + generated_ids = output_ids + else: + generated_ids = output_ids[:, input_ids.shape[1] :] + + # if batch list inputs AND multiple samples per input, convert generated_id to 3D view + num_samples = generation_config.num_return_sequences or 1 + + if num_samples > 1 and isinstance(prompts, list): + batch_size = input_ids.size(0) + num_return_sequences = generation_config.num_return_sequences or 1 + generated_ids = generated_ids.view(batch_size, num_return_sequences, -1) + + return generated_ids + + def _decode_generation(self, generated_ids: "torch.Tensor"): + if len(generated_ids.shape) == 1: + return self.tokenizer.decode([generated_ids])[0] + elif len(generated_ids.shape) == 2: + return self.tokenizer.decode(generated_ids) + elif len(generated_ids.shape) == 3: + return [ + self.tokenizer.decode(generated_ids[i]) + for i in range(len(generated_ids)) + ] + else: + raise TypeError( + f"Generated outputs aren't 1D, 2D or 3D, but instead are {generated_ids.shape}" + ) + + +def transformers( + model_name: str, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, + model_class=None, + tokenizer_class=None, +): + """Instantiate a model from the `transformers` library and its tokenizer. + + Parameters + ---------- + model_name + The name of the model as listed on Hugging Face's model page. + device + The device(s) on which the model should be loaded. This overrides + the `device_map` entry in `model_kwargs` when provided. + model_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the model. + tokenizer_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the tokenizer. + + Returns + ------- + A `TransformersModel` model instance. + + """ + if model_class is None or tokenizer_class is None: + try: + from transformers import AutoModelForCausalLM, AutoTokenizer + except ImportError: + raise ImportError( + "The `transformers` library needs to be installed in order to use `transformers` models." + ) + if model_class is None: + model_class = AutoModelForCausalLM + if tokenizer_class is None: + tokenizer_class = AutoTokenizer + + if device is not None: + model_kwargs["device_map"] = device + + model = model_class.from_pretrained(model_name, **model_kwargs) + + tokenizer_kwargs.setdefault("padding_side", "left") + tokenizer = tokenizer_class.from_pretrained(model_name, **tokenizer_kwargs) + + return Transformers(model, tokenizer) + + +def mamba( + model_name: str, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, +): + try: + from transformers import MambaForCausalLM + + except ImportError: + raise ImportError( + "The `mamba_ssm`, `torch` and `transformer` libraries needs to be installed in order to use Mamba." + ) + + return transformers( + model_name=model_name, + device=device, + model_kwargs=model_kwargs, + tokenizer_kwargs=tokenizer_kwargs, + model_class=MambaForCausalLM, + ) diff --git a/vllm/lib/python3.10/site-packages/outlines/models/transformers_vision.py b/vllm/lib/python3.10/site-packages/outlines/models/transformers_vision.py new file mode 100644 index 0000000000000000000000000000000000000000..772645b80d36ee240b2a91078f10fffef0738128 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/transformers_vision.py @@ -0,0 +1,138 @@ +from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union + +from outlines.generate.api import GenerationParameters, SamplingParameters +from outlines.models import Transformers + +if TYPE_CHECKING: + from outlines.processors import OutlinesLogitsProcessor + + +class TransformersVision(Transformers): + def __init__(self, model, tokenizer, processor): + super().__init__(model, tokenizer) + self.processor = processor + + def generate( # type: ignore + self, + prompts: Union[str, List[str]], + media: Union[List[Any], List[List[Any]]], + generation_parameters: GenerationParameters, + logits_processor: Optional["OutlinesLogitsProcessor"], + sampling_parameters: SamplingParameters, + ) -> Union[str, List[str], List[List[str]]]: + """Generate text using `transformers`. + + Arguments + --------- + prompts + A prompt or list of prompts. + media + A List[PIL.Image] or List[List[PIL.Image]] + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + + Returns + ------- + The generated text + """ + inputs = self.processor( + text=prompts, images=media, padding=True, return_tensors="pt" + ).to(self.model.device) + + generation_kwargs = self._get_generation_kwargs( + prompts, + generation_parameters, + logits_processor, + sampling_parameters, + ) + generated_ids = self._generate_output_seq(prompts, inputs, **generation_kwargs) + + # if single str input and single sample per input, convert to a 1D output + if isinstance(prompts, str): + # Should always be true until NotImplementedError above is fixed + generated_ids = generated_ids.squeeze(0) + + return self._decode_generation(generated_ids) + + def stream( # type: ignore + self, + prompts: Union[str, List[str]], + media: Union[Any, List[Any]], # TODO: docstring + generation_parameters: GenerationParameters, + logits_processor: Optional["OutlinesLogitsProcessor"], + sampling_parameters: SamplingParameters, + ) -> Iterator[Union[str, List[str]]]: + raise NotImplementedError + + +def transformers_vision( + model_name: str, + model_class, + device: Optional[str] = None, + model_kwargs: dict = {}, + processor_kwargs: dict = {}, + tokenizer_class=None, + processor_class=None, +): + """Instantiate a model from the `transformers` library and its tokenizer. + + Parameters + ---------- + model_name + The name of the model as listed on Hugging Face's model page. + model_class + The `PreTrainedModel` class from transformers to use in initializing the vision model from `model_name`. + https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel + device + The device(s) on which the model should be loaded. This overrides + the `device_map` entry in `model_kwargs` when provided. + model_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the model. + processor_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the processor. + + Returns + ------- + A `TransformersModel` model instance. + + """ + if processor_class is None or tokenizer_class is None: + try: + from transformers import AutoProcessor, AutoTokenizer + except ImportError: + raise ImportError( + "The `transformers` library needs to be installed in order to use `transformers` models." + ) + if processor_class is None: + processor_class = AutoProcessor + if tokenizer_class is None: + tokenizer_class = AutoTokenizer + + if device is not None: + model_kwargs["device_map"] = device + + model = model_class.from_pretrained(model_name, **model_kwargs) + + processor_kwargs.setdefault("padding_side", "left") + processor_kwargs.setdefault("pad_token", "[PAD]") + processor = processor_class.from_pretrained(model_name, **processor_kwargs) + + if tokenizer_class is None: + if getattr(processor, "tokenizer", None): + tokenizer = processor.tokenizer + else: + tokenizer = AutoTokenizer.from_pretrained(model_name, **processor_kwargs) + else: + tokenizer = tokenizer_class.from_pretrained(model_name, **processor_kwargs) + + return TransformersVision(model, tokenizer, processor) diff --git a/vllm/lib/python3.10/site-packages/outlines/models/vllm.py b/vllm/lib/python3.10/site-packages/outlines/models/vllm.py new file mode 100644 index 0000000000000000000000000000000000000000..778c27c6fcec35fd14832459b4daf22e413de85b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/models/vllm.py @@ -0,0 +1,227 @@ +import dataclasses +from typing import TYPE_CHECKING, List, Optional, Union + +from outlines.generate.api import GenerationParameters, SamplingParameters + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizerBase + from vllm import LLM + from vllm.sampling_params import SamplingParams + + +class VLLM: + """Represents a vLLM model. + + We wrap models from model providing libraries in order to give all of + them the same interface in Outlines and allow users to easily switch + between providers. This class wraps the `vllm.LLM` class from the + `vllm` library. + + """ + + def __init__(self, model: "LLM"): + self.model = model + self.lora_request = None + + self.tokenizer = self._get_tokenizer() + + def _get_tokenizer(self): + if hasattr(self.model, "get_tokenizer"): + tokenizer = self.model.get_tokenizer() + elif hasattr(self.model, "tokenizer"): + if hasattr(self.model.tokenizer, "tokenizer"): + tokenizer = self.model.tokenizer.tokenizer + else: + tokenizer = self.model.tokenizer + else: + raise ValueError( + "The provided LLM instance neither has a " + "`tokenizer` attribute or a `get_tokenizer` method." + ) + return adapt_tokenizer(tokenizer=tokenizer) + + def generate( + self, + prompts: Union[str, List[str]], + generation_parameters: GenerationParameters, + logits_processor, + sampling_parameters: SamplingParameters, + *, + sampling_params: Optional["SamplingParams"] = None, + use_tqdm: bool = True, + ): + """Generate text using vLLM. + + Arguments + --------- + prompts + A prompt or list of prompts. + generation_parameters + An instance of `GenerationParameters` that contains the prompt, + the maximum number of tokens, stop sequences and seed. All the + arguments to `SequenceGeneratorAdapter`'s `__cal__` method. + logits_processor + The logits processor to use when generating text. + sampling_parameters + An instance of `SamplingParameters`, a dataclass that contains + the name of the sampler to use and related parameters as available + in Outlines. + sampling_params + An instance of `vllm.sampling_params.SamplingParams`. The values + passed via this dataclass supersede the values of the parameters + in `generation_parameters` and `sampling_parameters`. See the + vLLM documentation for more details: https://docs.vllm.ai/en/latest/dev/sampling_params.html. + use_tqdm + A boolean in order to display progress bar while inferencing + + Returns + ------- + The generated text, of shape `(n_batch, n_samples)`. If there are only + one batch and several samples, the list is of shape `(n_samples)`. If + this is a batch with several sequences but only one sample the list is + of shape `(n_batch)`. If there is only one sequence and one sample, a + string is returned. + + """ + from vllm.sampling_params import SamplingParams + + if sampling_params is None: + sampling_params = SamplingParams() + + max_tokens, stop_at, seed = dataclasses.astuple(generation_parameters) + + # We only update the values in `sampling_params` if they + # are specified by the user when calling the generator. + if max_tokens is not None: + sampling_params.max_tokens = max_tokens + if stop_at is not None: + if isinstance(stop_at, str): + stop_at = [stop_at] + sampling_params.stop = stop_at + if seed is not None: + sampling_params.seed = seed + + sampling_params.logits_processors = ( + [logits_processor] if logits_processor is not None else [] + ) + + sampler, num_samples, top_p, top_k, temperature = dataclasses.astuple( + sampling_parameters + ) + + # We only update the values in `sampling_params` that + # were not specified by the user. + if sampling_params.n == 1: + sampling_params.n = num_samples + sampling_params.best_of = num_samples + if top_p is not None and sampling_params.top_p == 1.0: + sampling_params.top_p = top_p + if top_k is not None and sampling_params.top_k == -1: + sampling_params.top_k = top_k + # TODO: remove this if statement once fixed + # https://github.com/vllm-project/vllm/issues/5404#issuecomment-2175972897 + if top_k == 1: + sampling_params.repetition_penalty = 0 + if temperature is not None and sampling_params.temperature == 1.0: + sampling_params.temperature = temperature + if sampler == "beam_search": + sampling_params.use_beam_search = True + + results = self.model.generate( + prompts, + sampling_params=sampling_params, + lora_request=self.lora_request, + use_tqdm=use_tqdm, + ) + results = [[sample.text for sample in batch.outputs] for batch in results] + + batch_size = len(results) + sample_size = len(results[0]) + + if batch_size == 1 and sample_size == 1: + return results[0][0] + elif batch_size == 1: + return results[0] + elif sample_size == 1: + return [batch[0] for batch in results] + + return results + + def stream(self, *args, **kwargs): + """Return a text generator. + + Streaming is not yet available for `vllm.LLM`. + + TODO: Implement the streaming functionality ourselves. + + """ + raise NotImplementedError( + "Streaming is not available for the vLLM integration." + ) + + def load_lora(self, adapter_path: Optional[str]): + from vllm.lora.request import LoRARequest + + if adapter_path is None: + self.lora_request = None + else: + self.lora_request = LoRARequest(adapter_path, 1, adapter_path) + + +def vllm(model_name: str, **vllm_model_params): + """Load a vLLM model. + + Arguments + --------- + model_name + The name of the model to load from the HuggingFace hub. + vllm_model_params + vLLM-specific model parameters. See the vLLM code for the full list: + https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py + + """ + from vllm import LLM + + model = LLM(model_name, **vllm_model_params) + + return VLLM(model) + + +def adapt_tokenizer(tokenizer: "PreTrainedTokenizerBase") -> "PreTrainedTokenizerBase": + """Adapt a tokenizer to use to compile the FSM. + + The API of Outlines tokenizers is slightly different to that of `transformers`. In + addition we need to handle the missing spaces to Llama's tokenizer to be able to + compile FSMs for this model. + + Parameters + ---------- + tokenizer + The tokenizer of the model. + + Returns + ------- + PreTrainedTokenizerBase + The adapted tokenizer. + """ + from transformers import SPIECE_UNDERLINE + + tokenizer.vocabulary = tokenizer.get_vocab() + tokenizer.special_tokens = set(tokenizer.all_special_tokens) + + def convert_token_to_string(token: Union[str, bytes]) -> str: + string = tokenizer.convert_tokens_to_string([token]) + + # A hack to handle missing spaces to HF's Llama tokenizers + if ( + type(token) is str + and token.startswith(SPIECE_UNDERLINE) + or token == "<0x20>" + ): + return " " + string + + return string + + tokenizer.convert_token_to_string = convert_token_to_string + + return tokenizer diff --git a/vllm/lib/python3.10/site-packages/outlines/processors/__init__.py b/vllm/lib/python3.10/site-packages/outlines/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0f0f829b54b34505115479ee5c1dc8e773d9e28 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/processors/__init__.py @@ -0,0 +1,7 @@ +from .structured import ( + CFGLogitsProcessor, + GuideLogitsProcessor, + JSONLogitsProcessor, + OutlinesLogitsProcessor, + RegexLogitsProcessor, +) diff --git a/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/base_logits_processor.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/base_logits_processor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1245cf9806742b6867da824d238bd2d8b1857e17 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/base_logits_processor.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/structured.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/structured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd423e39d76e7fe99aff119cb4013679355bf0c8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/processors/__pycache__/structured.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/outlines/processors/base_logits_processor.py b/vllm/lib/python3.10/site-packages/outlines/processors/base_logits_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..44b55af2e465adeea7c9ddf26211327c5a5c15fe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/processors/base_logits_processor.py @@ -0,0 +1,159 @@ +from abc import abstractmethod +from typing import TYPE_CHECKING, List, Protocol, Type, Union + +import numpy as np +import torch +from numpy.typing import NDArray + +if TYPE_CHECKING: + import mlx.core as mx + + +Array = Union[NDArray, torch.Tensor, List, "mx.array"] + + +def is_mlx_array_type(array_type): + try: + import mlx.core as mx + except ImportError: + return False + return issubclass(array_type, mx.array) + + +def is_jax_array_type(array_type): + try: + import jaxlib + except ImportError: + return False + return issubclass(array_type, jaxlib.xla_extension.ArrayImpl) or isinstance( + array_type, jaxlib.xla_extension.ArrayImpl + ) + + +class OutlinesLogitsProcessor(Protocol): + """ + Base class for logits processors which normalizes types of logits: + - ndarray (used by llama-cpp-python), converted to torch.Tensor + - mlx.core.array (used by mlx-lm), converted to torch.Tensor + - torch.Tensor (used by everything else) + + Normalization of types and conversion to torch.Tensor + doesn't move memory, it just casts the type. + + Normalizing the types allows all logits processors inheriting from this class + to implement a single method for all the business logit: `process_logits()` + """ + + @abstractmethod + def process_logits( + self, input_ids: List[List[int]], logits: torch.Tensor + ) -> torch.Tensor: + """ + input_ids and logits are always 2D tensors for handling a batch of sequences. + + - input_ids -> List[List[tokens]] + - logits -> 2D_Tensor[logit floats] + + Important to keep in mind when designing universal logits processors + - logits processors are only used once and never re-applied for a new sequence generator + - Some models only pass output_ids, some models such as llamacpp and transformers prefix with input_ids + - Some sampling methods, such as beam search, result in unstable sequence ordering in models like vLLM + """ + pass + + @torch.no_grad() + def __call__( + self, + input_ids: Array, + logits: Array, + ) -> Array: + """ + Apply logits processor + + 1) Unify type + - convert input_ids: either ndarray, mlx array, List[int], or Tensor -> List[List[int]] + - convert logits: either ndarray, mlx array, or Tensor -> 2D float Tensor + 2) Unify shape, ensure logits and input_ids are 2D + 3) Call self.process_logits() to perform business logic + 4) Cast logits back to original array library type + """ + # ensure logits are torch Tensors + torch_logits = self._to_torch(logits) + input_ids = self._to_torch(input_ids) + + assert torch_logits.shape[:-1] == input_ids.shape[:-1] + + # Guarantee passed as 2D Tensors, then covert back to original (1D or 2D) shape + if len(torch_logits.shape) == 2: + processed_logits = self.process_logits(input_ids, torch_logits) + elif len(torch_logits.shape) == 1: + processed_logits = self.process_logits( + input_ids.unsqueeze(0), torch_logits.unsqueeze(0) + ).squeeze(0) + + # return logits as passed array type + return self._from_torch(processed_logits, type(logits)) + + @staticmethod + def _to_torch(tensor_like: Array) -> torch.Tensor: + """Convert various types to torch.Tensor.""" + if isinstance(tensor_like, torch.Tensor): + return tensor_like + + elif isinstance(tensor_like, np.ndarray): + return torch.from_numpy(tensor_like) + + elif isinstance(tensor_like, (list, tuple)): + return torch.tensor(tensor_like) + + elif is_mlx_array_type(type(tensor_like)): + import mlx.core as mx + + # https://ml-explore.github.io/mlx/build/html/usage/numpy.html#pytorch + return torch.from_dlpack( + np.array(tensor_like.astype(mx.float32), copy=False) + ) + + elif is_jax_array_type(type(tensor_like)): + import jax + + torch_tensor = torch.from_dlpack(jax.dlpack.to_dlpack(tensor_like)) + return torch_tensor + + else: + raise TypeError( + "LogitsProcessor must be called with either np.NDArray, " + "torch.Tensor, list, or mlx.core.array typed logits. " + f"Logits type: `{type(tensor_like)}`" + ) + + @staticmethod + def _from_torch(tensor: torch.Tensor, target_type: Type) -> Array: + """Convert torch.Tensor to the specified target type.""" + if target_type == torch.Tensor: + return tensor + + elif target_type == np.ndarray: + return tensor.detach().numpy() + + elif target_type == list: + return tensor.detach().tolist() + + elif target_type == tuple: + return tuple(tensor.detach().tolist()) + + elif is_mlx_array_type(target_type): + import mlx.core as mx + + # numpy doesn't support bfloat16, mlx doesn't support direct conversion from torch + return mx.array(tensor.float().numpy()) + + elif is_jax_array_type(target_type): + import jax + + return jax.dlpack.from_dlpack(tensor) + + else: + raise TypeError( + f"Failed to convert torch tensors to target_type `{target_type}`" + ) diff --git a/vllm/lib/python3.10/site-packages/outlines/processors/structured.py b/vllm/lib/python3.10/site-packages/outlines/processors/structured.py new file mode 100644 index 0000000000000000000000000000000000000000..64892b73f76f0e36855f2f67a44e11c3515edcd9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/processors/structured.py @@ -0,0 +1,247 @@ +""" + _______________________________ +/ Don't want to self-host? \ +\\ Try .json at http://dottxt.co / + ------------------------------- + \\ ^__^ + \\ (oo)\\_______ + (__)\\ )\\/\ + ||----w | + || || + +Copyright 2024- the Outlines developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import math +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union + +import torch +from outlines_core.fsm.json_schema import build_regex_from_schema +from pydantic import BaseModel + +from outlines.fsm.guide import CFGGuide, Guide, RegexGuide +from outlines.fsm.json_schema import convert_json_schema_to_str + +from .base_logits_processor import OutlinesLogitsProcessor + +if TYPE_CHECKING: + from outlines.models.tokenizer import Tokenizer + + +class GuideLogitsProcessor(OutlinesLogitsProcessor): + """Bias generation using a finite + + Attributes + ---------- + tokenizer + The tokenizer used to convert tokens to ids. + guide + The `outlines.fsm.Guide` which is used to bias the logits. + """ + + tokenizer: "Tokenizer" + guide: Guide + _guide_states: Dict[int, Any] + _seq_start_idx: Optional[int] + + def __init__(self, tokenizer: "Tokenizer", guide: Guide): + """A Guide-based logits processor. + + Parameters + ---------- + tokenizer + The tokenizer used to convert tokens to ids. + guide + The `outlines.fsm.Guide. which is used to bias the logits. + """ + self.tokenizer = tokenizer + self.guide = guide + self._guide_states = {hash(tuple([])): self.guide.initial_state} + self._seq_start_idx = None + + def process_logits( + self, input_ids: torch.LongTensor, logits: torch.FloatTensor + ) -> torch.Tensor: + """Use the Guide to bias the logits before sampling the next token. + + Parameters + ---------- + input_ids + The input token ids. + logits + The logits. + + Returns + ------- + torch.Tensor + The biased logits. + """ + if self._seq_start_idx is None: + self._seq_start_idx = len(input_ids[0]) + + sequence_states: List[int] = [] # vector of states corresponding to `input_ids` + + for seq_ids in input_ids: + gen_ids = seq_ids[self._seq_start_idx :] + curr_state_key = hash(tuple(gen_ids.tolist())) + + if curr_state_key not in self._guide_states: + prev_state = self._guide_states[hash(tuple(gen_ids[:-1].tolist()))] + curr_state = self.guide.get_next_state(prev_state, gen_ids[-1].item()) + self._guide_states[curr_state_key] = curr_state + + sequence_states.append(self._guide_states[curr_state_key]) + + mask = torch.ones_like(logits, dtype=torch.bool) + + allowed_tokens_batch = [] + batch_indices = [] + for i, guide_state in enumerate(sequence_states): + allowed_tokens = self.guide.get_next_instruction(guide_state).tokens.to( + mask.device, non_blocking=True + ) + allowed_tokens_batch.append(allowed_tokens) + batch_indices.append( + torch.full_like(allowed_tokens, i) + ) # Store batch index for each allowed token + + allowed_tokens_concat = torch.cat(allowed_tokens_batch) + batch_indices_concat = torch.cat(batch_indices) + + mask[batch_indices_concat, allowed_tokens_concat] = False + logits.masked_fill_(mask, float("-inf")) + + return logits + + def copy(self) -> "GuideLogitsProcessor": + """Return a copy of the logits processor.""" + return GuideLogitsProcessor(tokenizer=self.tokenizer, guide=self.guide.copy()) + + +class RegexLogitsProcessor(GuideLogitsProcessor): + """Bias generation based on a regular expression. + + Attributes + ---------- + tokenizer + The tokenizer used to convert tokens to ids. + guide + The `outlines.fsm.RegexGuide. which is used to bias the logits. + """ + + def __init__(self, regex_string: str, tokenizer: "Tokenizer"): + """Compile the RegexGuide that drives the regex-guided generation. + + Parameters + ---------- + regex_string + A string that represents a regular expression + tokenizer + An Outlines tokenizer + """ + guide = RegexGuide.from_regex(regex_string, tokenizer) + super().__init__(tokenizer=tokenizer, guide=guide) + + +class JSONLogitsProcessor(RegexLogitsProcessor): + """Bias generation based on a JSON schema. + + Attributes + ---------- + tokenizer + The tokenizer used to convert tokens to ids. + guide + The `outlines.fsm.RegexGuide. which is used to bias the logits. + """ + + def __init__( + self, + schema: Union[dict, Type[BaseModel], str], + tokenizer: "Tokenizer", + whitespace_pattern: Optional[str] = None, + ): + """Compile the Guide that drives the JSON-guided generation. + + Parameters + ---------- + schema + A JSON schema that encodes the structure we want the model to generate. + tokenizer + The tokenizer used to convert tokens to ids. + whitespace_pattern + Pattern to use for JSON syntactic whitespace (doesn't impact string + literals). For example, to allow only a single space or newline with + `whitespace_pattern=r"[\n ]?"` + """ + schema_str = convert_json_schema_to_str(json_schema=schema) + regex_string = build_regex_from_schema(schema_str, whitespace_pattern) + super().__init__(regex_string=regex_string, tokenizer=tokenizer) + + +class CFGLogitsProcessor(GuideLogitsProcessor): + """Bias generation based on a context-free grammar. + + Attributes + ---------- + tokenizer + The tokenizer used to convert tokens to ids. + guide + The `outlines.fsm.CFGGuide. which is used to bias the logits. + """ + + guide: CFGGuide + + def __init__(self, cfg_str: str, tokenizer: "Tokenizer"): + """Compile the CFGGuide that drives the CFG-guided generation. + + Parameters + ---------- + cfg_str + A string that represents a grammar + tokenizer + The tokenizer used to convert tokens to ids. + """ + cfg_guide = CFGGuide(cfg_string=cfg_str, tokenizer=tokenizer) + super().__init__(tokenizer=tokenizer, guide=cfg_guide) + + def process_logits( + self, input_ids: torch.LongTensor, logits: torch.Tensor + ) -> torch.Tensor: + """Same behavior as GuideLogitsProcessor, but uses rejection sampling""" + if self._seq_start_idx is None: + self._seq_start_idx = len(input_ids[0]) + + sequence_states: List = [] # vector of states corresponding to `input_ids` + + for seq_ids in input_ids: + gen_ids = seq_ids[self._seq_start_idx :] + curr_state_key = hash(tuple(gen_ids.tolist())) + + if curr_state_key not in self._guide_states: + prev_state = self._guide_states[hash(tuple(gen_ids[:-1].tolist()))] + curr_state = self.guide.get_next_state(prev_state, gen_ids[-1].item()) + self._guide_states[curr_state_key] = curr_state + + sequence_states.append(self._guide_states[curr_state_key]) + + mask = torch.full_like(logits, -math.inf) + for i, guide_state in enumerate(sequence_states): + first_legal_token = next( + self.guide.iter_valid_token_ids( + guide_state, torch.argsort(logits[i], descending=True) + ) + ) + mask[i, [first_legal_token]] = logits[i, [first_legal_token]] + + return mask diff --git a/vllm/lib/python3.10/site-packages/outlines/samplers.py b/vllm/lib/python3.10/site-packages/outlines/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..b1421971f6bfc12c5d05652a36d7bd364fb608fe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/outlines/samplers.py @@ -0,0 +1,324 @@ +import math +from typing import TYPE_CHECKING, Callable, Optional, Protocol, Tuple + +if TYPE_CHECKING: + import torch + + +class Sampler(Protocol): + samples: int + + def __call__( + self, + next_token_logits: "torch.DoubleTensor", + sequence_weights: "torch.DoubleTensor", + rng: "torch.Generator", + ) -> "torch.DoubleTensor": + ... + + +class GreedySampler: + """Greedy Sampling algorithm. + + Greedy sampling consists in choosing the token with the largest + likelihood at every step. + + We don't allow more than one sample. We could attribute this a meaning, for + instance the k-th sample represents the k-th most likely token. In which + case it would be equivalent to beam search without the sequence weights. + + Attributes + ---------- + samples + The number of samples taken for each input sequence. + + """ + + def __init__(self): + self.samples = 1 + + def __call__( + self, + next_token_logits: "torch.DoubleTensor", + sequence_weights: "torch.DoubleTensor", + _, + ) -> "torch.DoubleTensor": + """Call the greedy sampler. + + Parameters + ---------- + next_token_logits + A tensor of shape ``(n_seqs, vocab_size,)`` that represents the + probability distribution of the next token over the vocabulary. + sequence_weights + A tensor of shape ``(n_seqs,)`` that represents the cumulative + weight of each sequence. + rng + A random number generator. + + Returns + ------- + A tuple with an array that contains the ids of the sampled tokens of + shape ``(n_seqs, 1)``, an array that contains the ancestors of each + sampled id of shape ``(n_seqs,)`` and an array that contains the updated + cumulative weights of each sequence of shape ``(n_seqs,)``. + + """ + import torch + + logprobs = torch.nn.functional.log_softmax(next_token_logits, dim=-1) + next_token_ids = torch.argmax(logprobs, dim=-1, keepdim=True) + + ancestors = torch.arange( + next_token_logits.shape[0], device=next_token_logits.device + ) + weights = sequence_weights + torch.gather(logprobs, 1, next_token_ids).squeeze() + + return next_token_ids, ancestors, weights + + +greedy = GreedySampler + + +class MultinomialSampler: + """Multinomial sampling algorithm. + + Multinomial sampling consists in randomly sampling the next token assuming + its distribution is a Categorical distribution parametrized by the + next-token logits. + + + Attributes + ---------- + samples + The number of samples taken for each input sequence. + + """ + + def __init__( + self, + samples: int = 1, + *, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + temperature: Optional[float] = None, + ): + self.samples = samples + self.top_k = top_k + self.top_p = top_p + self.temperature = temperature + + self.logits_processors = [] + if top_k is not None: + self.logits_processors.append(keep_top_k_logits(top_k)) + elif top_p is not None: + self.logits_processors.append(keep_top_p_logits(top_p)) + + if temperature is not None: + self.logits_processors.append(rescale_logits(temperature)) + + def __call__( + self, + next_token_logits: "torch.DoubleTensor", + sequence_weights: "torch.DoubleTensor", + rng: "torch.Generator", + ) -> Tuple["torch.DoubleTensor", "torch.DoubleTensor", "torch.DoubleTensor"]: + """Call the multinomial sampler. + + Parameters + ---------- + next_token_logits + A tensor of shape ``(n_seqs, vocab_size,)`` that represents the + probability distribution of the next token over the vocabulary. + sequence_weights + A tensor of shape ``(n_seqs,)`` that represents the cumulative + weight of each sequence. + rng + A random number generator. + + Returns + ------- + A tuple with an array that contains the ids of the sampled tokens of + shape ``(n_seqs, 1)``, an array that contains the ancestors of each + sampled id of shape ``(n_seqs,)`` and an array that contains the updated + cumulative weights of each sequence of shape ``(n_seqs,)``. + + """ + import torch + + altered_next_token_logits = next_token_logits + for logit_processor in self.logits_processors: + altered_next_token_logits = logit_processor(next_token_logits) + + probs = torch.nn.functional.softmax(altered_next_token_logits, dim=-1) + next_token_ids = torch.multinomial(probs, num_samples=1, generator=rng) + + logprobs = torch.nn.functional.log_softmax(altered_next_token_logits, dim=-1) + ancestors = torch.arange( + altered_next_token_logits.shape[0], device=next_token_logits.device + ) + weights = sequence_weights + torch.gather(logprobs, 1, next_token_ids).squeeze() + + return next_token_ids, ancestors, weights + + +multinomial = MultinomialSampler + + +def keep_top_k_logits(k: int) -> Callable[["torch.Tensor"], "torch.Tensor"]: + """Build a function that masks logits values smaller than the top `k` ones. + + Parameters + ---------- + k + The ranking below which logit values are replaced by `-math.inf`. + + """ + import torch + + if not isinstance(k, int) or k < 1: + raise ValueError(f"`k` must be a strictly positive integers, got {k} instead.") + + def logits_processor(logits: torch.Tensor) -> torch.Tensor: + num_to_keep = min(k, logits.size(-1)) + mask_idx = logits < torch.topk(logits, num_to_keep)[0][..., -1, None] + return logits.masked_fill(mask_idx, -math.inf) + + return logits_processor + + +def keep_top_p_logits(p: float) -> Callable[["torch.Tensor"], "torch.Tensor"]: + """Build a function that masks the lowest probability tokens whose + cumulative probability is below a certain threshold. + + Parameters + ---------- + p + The value of the threshold. We keep the highest probability tokens whose + cumulative distribution is greater than or equal to `p` and mask the + others. Its value must be between 0 (excluded) and 1 (included). + + """ + import torch + + if p <= 0.0 or p > 1.0: + raise ValueError( + f"`p` must be a floating point number between 0 (excluded) and 1 (included), got {p} instead." + ) + + def logits_processor(logits: torch.Tensor) -> torch.Tensor: + sorted_logits, sorted_idx = torch.sort(logits, descending=False) + cumulative_probabilties = torch.nn.functional.softmax( + sorted_logits, dim=-1 + ).cumsum(dim=-1) + + sorted_masked_idx = cumulative_probabilties <= (1 - p) + mask_idx = torch.scatter(sorted_masked_idx, 1, sorted_idx, sorted_masked_idx) + return logits.masked_fill(mask_idx, -math.inf) + + return logits_processor + + +def rescale_logits(temperature: float) -> Callable[["torch.Tensor"], "torch.Tensor"]: + """Build a function that rescales the token probabilities exponentially. + + Parameters + ---------- + temperature + The value by which we rescale the logits. + + """ + + if not isinstance(temperature, float) or temperature < 0.0: + raise ValueError( + f"`temperature` must be a strictly positive floating point number, got {temperature} instead." + ) + elif temperature == 0.0: + raise ValueError( + "Please use the greedy sampler instead of setting the temperature to 0." + ) + + def logits_processor(logits: "torch.Tensor") -> "torch.Tensor": + return logits / temperature + + return logits_processor + + +class BeamSearchSampler: + """Beam Search sampling algorithm. + + Attributes + ---------- + samples + The number of samples taken for each input sequence. Equivalent to the + number of beams. + """ + + def __init__(self, beams: int = 1): + self.samples = beams + + def __call__( + self, + next_token_logits: "torch.DoubleTensor", + sequence_weights: "torch.DoubleTensor", + _, + ) -> Tuple["torch.DoubleTensor", "torch.DoubleTensor", "torch.DoubleTensor"]: + """Call the beam search sampler. + + Parameters + ---------- + next_token_logits + A tensor of shape ``(n_seqs, vocab_size,)`` that represents the + probability distribution of the next token over the vocabulary. + sequence_weights + A tensor of shape ``(n_seqs,)`` that represents the cumulative + weight of each sequence. + rng + A random number generator. + + Returns + ------- + A tuple with an array that contains the ids of the sampled tokens of + shape ``(n_seqs, 1)``, an array that contains the ancestors of each + sampled id of shape ``(n_seqs,)`` and an array that contains the updated + cumulative weights of each sequence of shape ``(n_seqs,)``. + + """ + import torch + + logprobs = torch.nn.functional.log_softmax(next_token_logits, dim=-1) + weights = logprobs + sequence_weights.unsqueeze(1).expand_as(next_token_logits) + + # Flatten scores to (n_batch, n_samples * vocab_size) + # and find the top-k weights for each batch. + batch_size = next_token_logits.shape[0] // self.samples + vocab_size = next_token_logits.shape[-1] + weights = weights.view(batch_size, self.samples * vocab_size) + + # If the weights are all equal to 0 we are at the beginning of the search + # and thus only need to sample from one set of token logits for each + # batch. + if torch.all(sequence_weights == 0): + weights = weights[:, :vocab_size] + + weights, indices = torch.topk( + weights, self.samples, dim=1, largest=True, sorted=True + ) + + ancestors = torch.div(indices, vocab_size, rounding_mode="floor") + next_token_ids = indices % vocab_size + + # Re-shape the weights, next_token_ids and ancestors to (n_batch * n_samples, 1) + first_batch_idx = torch.arange( + 0, batch_size * self.samples, self.samples, device=next_token_logits.device + ).unsqueeze(1) + ancestors = ancestors + first_batch_idx + + ancestors = ancestors.view(self.samples * batch_size) + weights = weights.view(self.samples * batch_size) + next_token_ids = next_token_ids.view(self.samples * batch_size, 1) + + return next_token_ids, ancestors, weights + + +beam_search = BeamSearchSampler diff --git a/vllm/lib/python3.10/site-packages/outlines/serve/__init__.py b/vllm/lib/python3.10/site-packages/outlines/serve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/outlines/serve/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/outlines/serve/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf76a97d61c44f8127f78336ecfb3db9531f26f7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/outlines/serve/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/METADATA b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..01219366ba3d3783b4a8c2b9f7ec1dd367d9031d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/METADATA @@ -0,0 +1,191 @@ +Metadata-Version: 2.1 +Name: partial-json-parser +Version: 0.2.1.1.post4 +Summary: Parse partial JSON generated by LLM +Keywords: JSON,parser,LLM,nlp +Home-page: https://promplate.dev/partial-json-parser +Author-Email: Muspi Merol +License: MIT +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Project-URL: Repository, https://github.com/promplate/partial-json-parser +Project-URL: Homepage, https://promplate.dev/partial-json-parser +Requires-Python: >=3.6 +Provides-Extra: playground +Requires-Dist: rich; extra == "playground" +Description-Content-Type: text/markdown + +# Partial JSON Parser + +Sometimes we need **LLM (Large Language Models)** to produce **structural information** instead of natural language. The easiest way is to use JSON. + +But before receiving the last token of response, the JSON is broken, which means you can't use `JSON.parse` to decode it. But we still want to stream the data to the user. + +Here comes `partial-json-parser`, a lightweight and customizable library for parsing partial JSON strings. Here is a [demo](https://promplate.dev/partial-json-parser). + +(Note that there is [a JavaScript implementation](https://github.com/promplate/partial-json-parser-js) too) + +## Installation + +```sh +pip install partial-json-parser # or poetry / pdm / uv +``` + +`partial-json-parser` is implemented purely in Python, with good type hints. It is zero-dependency and works with Python 3.6+. + +You can install run its demo playground by installing `rich` too or: + +```sh +pip install partial-json-parser[playground] +``` + +Then run the `json-playground` in your terminal, and you can try the parser interactively. + +## Usage + +```py +from partial_json_parser import loads + +>>> loads('{"key": "v') # {'key': 'v'} +``` + +Alternatively, you can use `ensure_json` to get the completed JSON string: + +```py +from partial_json_parser import ensure_json + +>>> ensure_json('{"key": "v') # '{"key": "v"}' +``` + +### Detailed Usage + +You can import the `loads` function and the `Allow` object from the library like this: + +```py +from partial_json_parser import loads, Allow +``` + +The `Allow` object is just an Enum for options. It determines what types can be partial. types not included in `allow` only appears after its completion can be ensured. + +### Parsing complete / partial JSON strings + +The `loads` function works just like the built-in `json.loads` when parsing a complete JSON string: + +```py +result = loads('{"key":"value"}') +print(result) # Outputs: {'key': 'value'} +``` + +You can parse a partial JSON string by passing an additional parameter to the `loads` function. This parameter is a **bitwise OR** of the constants from the `Allow` flag: + +(Note that you can directly import the constants you need from `partial-json-parser`) + +```py +from partial_json_parser import loads, Allow, STR, OBJ + +result = loads('{"key": "v', STR | OBJ) +print(result) # Outputs: {'key': 'v'} +``` + +In this example, `Allow.STR` tells the parser that it's okay if a string is incomplete, and `Allow.OBJ` tells the parser so as a dict. The parser then try to return as much data as it can. + +If you don't allow partial strings, then it will not add `"key"` to the object because `"v` is not close: + +```py +result = loads('{"key": "v', OBJ) +print(result) # Outputs: {} + +result = loads('{"key": "value"', OBJ) +print(result) # Outputs: {'key': 'value'} +``` + +Similarity, you can parse partial lists or even partial special values if you allow it: + +(Note that `allow` defaults to `Allow.ALL`) + +```py +result = loads('[ {"key1": "value1", "key2": [ "value2') +print(result) # Outputs: [{'key1': 'value1', 'key2': ['value2']}] + +result = loads("-Inf") +print(result) # Outputs: -inf +``` + +### Handling malformed JSON + +If the JSON string is malformed, the `parse` function will throw an error: + +```py +loads("wrong") # MalformedJSON: Malformed node or string on line 1 +``` + +## API Reference + +### loads(json_string, [allow_partial], [parser]) + +- `json_string` ``: The (incomplete) JSON string to parse. +- `allow_partial` ``: Specify what kind of partialness is allowed during JSON parsing (default: `Allow.ALL`). +- `parser` `(str) -> JSON`: An ordinary JSON parser. Default is `json.loads`. + +Complete the JSON string and parse it with `parser` function. + +Returns the parsed Python value. + +Alias: `decode`, `parse_json`. + +### ensure_json(json_string, [allow_partial]) + +- `json_string` ``: The (incomplete) JSON string to complete. +- `allow_partial` ``: Specify what kind of partialness is allowed during JSON parsing (default: `Allow.ALL`). + +Returns the completed JSON string. + +### fix(json_string, [allow_partial]) + +- `json_string` ``: The (incomplete) JSON string to complete. +- `allow_partial` ``: Specify what kind of partialness is allowed during JSON parsing (default: `Allow.ALL`). + +Returns a tuple of a slice of the input string and the completion. + +Note that this is a low-level API, only useful for debugging and demonstration. + +### Allow + +Enum class that specifies what kind of partialness is allowed during JSON parsing. It has the following members: + +- `STR`: Allow partial string. +- `NUM`: Allow partial number. +- `ARR`: Allow partial array. +- `OBJ`: Allow partial object. +- `NULL`: Allow partial null. +- `BOOL`: Allow partial boolean. +- `NAN`: Allow partial NaN. +- `INFINITY`: Allow partial Infinity. +- `_INFINITY`: Allow partial -Infinity. +- `INF`: Allow both partial Infinity and -Infinity. +- `SPECIAL`: Allow all special values. +- `ATOM`: Allow all atomic values. +- `COLLECTION`: Allow all collection values. +- `ALL`: Allow all values. + +## Testing + +To run the tests for this library, you should clone the repository and install the dependencies: + +```sh +git clone https://github.com/promplate/partial-json-parser.git +cd partial-json-parser +pdm install +``` + +Then, you can run the tests using [Hypothesis](https://hypothesis.works/) and [Pytest](https://pytest.org/): + +```sh +pdm test +``` + +Please note that while we strive to cover as many edge cases as possible, it's always possible that some cases might not be covered. + +## License + +This project is licensed under the MIT License. diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/RECORD b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e166aea7ce2f8acd56e0ccfd49c24873c878bb97 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/RECORD @@ -0,0 +1,23 @@ +../../../bin/json-playground,sha256=5EzxxTpyQ8XtxvNDpviGxpdc3Ym-4CmnzTcTdpfdywk,239 +partial_json_parser-0.2.1.1.post4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +partial_json_parser-0.2.1.1.post4.dist-info/METADATA,sha256=KtaHq6x6G2X9ToOiDOFUxXXS-Pyi72Lqff3FbRIKvCk,6182 +partial_json_parser-0.2.1.1.post4.dist-info/RECORD,, +partial_json_parser-0.2.1.1.post4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +partial_json_parser-0.2.1.1.post4.dist-info/WHEEL,sha256=SOP-4bEE0jbVaCHQGVvF08uWxk5rcSsfEybvoQVHlD8,90 +partial_json_parser-0.2.1.1.post4.dist-info/entry_points.txt,sha256=5kq4IGObtkNmUXXIgrhdd1AYGCkZYUZrMrwBMEgmyOc,73 +partial_json_parser/__init__.py,sha256=e1VyiU5gI_I9VF8h1Mb6hlfLWZWKFDQGFr7z-0I4ziU,212 +partial_json_parser/__pycache__/__init__.cpython-310.pyc,, +partial_json_parser/__pycache__/options.cpython-310.pyc,, +partial_json_parser/__pycache__/playground.cpython-310.pyc,, +partial_json_parser/core/__pycache__/api.cpython-310.pyc,, +partial_json_parser/core/__pycache__/complete.cpython-310.pyc,, +partial_json_parser/core/__pycache__/exceptions.cpython-310.pyc,, +partial_json_parser/core/__pycache__/myelin.cpython-310.pyc,, +partial_json_parser/core/__pycache__/options.cpython-310.pyc,, +partial_json_parser/core/api.py,sha256=PrnRel4aR9AeuPciaYdkurfDTeuo5nstDbOXYfJEboM,860 +partial_json_parser/core/complete.py,sha256=OomNIzJPm61msZaV7uJwIcZeqS4T-NmiLgP2IVrN39s,7033 +partial_json_parser/core/exceptions.py,sha256=XM0bbHCoDPf8wkx7h43DkrSUSMk0Nx48CAOng9mgJ6g,150 +partial_json_parser/core/myelin.py,sha256=ayy1HfZ25aweFKHKciC-ogKJ4iNl47C72IG5faX4iOQ,9295 +partial_json_parser/core/options.py,sha256=45Y0F6eOr86paYsyZR9eSPWWzv2POBXOlj3yTZ_mOgg,992 +partial_json_parser/options.py,sha256=RFRdoYX5u3eEPwDEnsCm0ZIx3oh9wtwvF8e9fH0OdCQ,66 +partial_json_parser/playground.py,sha256=f1TMOpDECH3oe44EKJ7ONtH_hv1wB-EKALPbI2KiBK4,895 diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e3fca595850bcd5e36cff5547daf6491c785374e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.3.1) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/entry_points.txt b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..14f127646425df12469723dc48b7be1c48a363b9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/partial_json_parser-0.2.1.1.post4.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +json-playground = partial_json_parser.playground:main +