diff --git a/.venv/lib/python3.11/site-packages/gguf/__init__.py b/.venv/lib/python3.11/site-packages/gguf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..243defc4c1ca42d3713017d8902592f54ac849cd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/__init__.py @@ -0,0 +1,9 @@ +from .constants import * +from .lazy import * +from .gguf_reader import * +from .gguf_writer import * +from .quants import * +from .tensor_mapping import * +from .vocab import * +from .utility import * +from .metadata import * diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96c52642761e58c839529bf8d17de706e929524b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/constants.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/constants.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6c8cb9b87ce17d76ca31b82c5e1023b6f89d122 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/constants.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5754693f0c6fe26ffa280b1e0eee75f1bf937c05 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_reader.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_reader.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f92ad1efa04d70685076d0c663b1cbd9f0d7e359 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_reader.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_writer.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_writer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b43928ae76e6aaf2f4622450e7d9020cb574754a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/gguf_writer.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/lazy.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/lazy.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67b27dc2e614bae1cc8f0e4ab6ac1de48dd0db18 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/lazy.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/metadata.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/metadata.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8dec410e545135532d789db8c4ffe0dabfcb469 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/metadata.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/quants.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/quants.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bea142fa9d230131903bb65a51755bc56fd8b375 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/quants.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/tensor_mapping.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/tensor_mapping.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..332cebc1d47d50d64ee7c453c7b56c1cff4000d3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/tensor_mapping.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/utility.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/utility.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..533ef35777ba065596cf02c392a4539db1084275 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/utility.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/__pycache__/vocab.cpython-311.pyc b/.venv/lib/python3.11/site-packages/gguf/__pycache__/vocab.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d032b627011eb750020c79eb977987def9adb790 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/gguf/__pycache__/vocab.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/gguf/constants.py b/.venv/lib/python3.11/site-packages/gguf/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..b55effa9907b100106cc1fe5a88e2abbb7dd505d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/constants.py @@ -0,0 +1,1398 @@ +from __future__ import annotations + +from enum import Enum, IntEnum, auto +from typing import Any + +# +# constants +# + +GGUF_MAGIC = 0x46554747 # "GGUF" +GGUF_VERSION = 3 +GGUF_DEFAULT_ALIGNMENT = 32 +GGML_QUANT_VERSION = 2 # GGML_QNT_VERSION from ggml.h + +# +# metadata keys +# + + +class Keys: + class General: + TYPE = "general.type" + ARCHITECTURE = "general.architecture" + QUANTIZATION_VERSION = "general.quantization_version" + ALIGNMENT = "general.alignment" + FILE_TYPE = "general.file_type" + + # Authorship Metadata + NAME = "general.name" + AUTHOR = "general.author" + VERSION = "general.version" + ORGANIZATION = "general.organization" + + FINETUNE = "general.finetune" + BASENAME = "general.basename" + + DESCRIPTION = "general.description" + QUANTIZED_BY = "general.quantized_by" + + SIZE_LABEL = "general.size_label" + + # Licensing details + LICENSE = "general.license" + LICENSE_NAME = "general.license.name" + LICENSE_LINK = "general.license.link" + + # Typically represents the converted GGUF repo (Unless native) + URL = "general.url" # Model Website/Paper + DOI = "general.doi" + UUID = "general.uuid" + REPO_URL = "general.repo_url" # Model Source Repository (git/svn/etc...) + + # Model Source during conversion + SOURCE_URL = "general.source.url" # Model Website/Paper + SOURCE_DOI = "general.source.doi" + SOURCE_UUID = "general.source.uuid" + SOURCE_REPO_URL = "general.source.repo_url" # Model Source Repository (git/svn/etc...) + + # Base Model Source. There can be more than one source if it's a merged + # model like with 'Mistral-7B-Merge-14-v0.1'. This will assist in + # tracing linage of models as it is finetuned or merged over time. + BASE_MODEL_COUNT = "general.base_model.count" + BASE_MODEL_NAME = "general.base_model.{id}.name" + BASE_MODEL_AUTHOR = "general.base_model.{id}.author" + BASE_MODEL_VERSION = "general.base_model.{id}.version" + BASE_MODEL_ORGANIZATION = "general.base_model.{id}.organization" + BASE_MODEL_URL = "general.base_model.{id}.url" # Model Website/Paper + BASE_MODEL_DOI = "general.base_model.{id}.doi" + BASE_MODEL_UUID = "general.base_model.{id}.uuid" + BASE_MODEL_REPO_URL = "general.base_model.{id}.repo_url" # Model Source Repository (git/svn/etc...) + + # Array based KV stores + TAGS = "general.tags" + LANGUAGES = "general.languages" + DATASETS = "general.datasets" + + class LLM: + VOCAB_SIZE = "{arch}.vocab_size" + CONTEXT_LENGTH = "{arch}.context_length" + EMBEDDING_LENGTH = "{arch}.embedding_length" + BLOCK_COUNT = "{arch}.block_count" + LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count" + FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" + EXPERT_FEED_FORWARD_LENGTH = "{arch}.expert_feed_forward_length" + EXPERT_SHARED_FEED_FORWARD_LENGTH = "{arch}.expert_shared_feed_forward_length" + USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual" + TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout" + EXPERT_COUNT = "{arch}.expert_count" + EXPERT_USED_COUNT = "{arch}.expert_used_count" + EXPERT_SHARED_COUNT = "{arch}.expert_shared_count" + EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale" + POOLING_TYPE = "{arch}.pooling_type" + LOGIT_SCALE = "{arch}.logit_scale" + DECODER_START_TOKEN_ID = "{arch}.decoder_start_token_id" + ATTN_LOGIT_SOFTCAPPING = "{arch}.attn_logit_softcapping" + FINAL_LOGIT_SOFTCAPPING = "{arch}.final_logit_softcapping" + + class Attention: + HEAD_COUNT = "{arch}.attention.head_count" + HEAD_COUNT_KV = "{arch}.attention.head_count_kv" + MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" + CLAMP_KQV = "{arch}.attention.clamp_kqv" + KEY_LENGTH = "{arch}.attention.key_length" + VALUE_LENGTH = "{arch}.attention.value_length" + LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" + LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" + CAUSAL = "{arch}.attention.causal" + Q_LORA_RANK = "{arch}.attention.q_lora_rank" + KV_LORA_RANK = "{arch}.attention.kv_lora_rank" + REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count" + SLIDING_WINDOW = "{arch}.attention.sliding_window" + + class Rope: + DIMENSION_COUNT = "{arch}.rope.dimension_count" + FREQ_BASE = "{arch}.rope.freq_base" + SCALING_TYPE = "{arch}.rope.scaling.type" + SCALING_FACTOR = "{arch}.rope.scaling.factor" + SCALING_ATTN_FACTOR = "{arch}.rope.scaling.attn_factor" + SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length" + SCALING_FINETUNED = "{arch}.rope.scaling.finetuned" + SCALING_YARN_LOG_MUL = "{arch}.rope.scaling.yarn_log_multiplier" + + class Split: + LLM_KV_SPLIT_NO = "split.no" + LLM_KV_SPLIT_COUNT = "split.count" + LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count" + + class SSM: + CONV_KERNEL = "{arch}.ssm.conv_kernel" + INNER_SIZE = "{arch}.ssm.inner_size" + STATE_SIZE = "{arch}.ssm.state_size" + TIME_STEP_RANK = "{arch}.ssm.time_step_rank" + DT_B_C_RMS = "{arch}.ssm.dt_b_c_rms" + + class Tokenizer: + MODEL = "tokenizer.ggml.model" + PRE = "tokenizer.ggml.pre" + LIST = "tokenizer.ggml.tokens" + TOKEN_TYPE = "tokenizer.ggml.token_type" + TOKEN_TYPE_COUNT = "tokenizer.ggml.token_type_count" # for BERT-style token types + SCORES = "tokenizer.ggml.scores" + MERGES = "tokenizer.ggml.merges" + BOS_ID = "tokenizer.ggml.bos_token_id" + EOS_ID = "tokenizer.ggml.eos_token_id" + UNK_ID = "tokenizer.ggml.unknown_token_id" + SEP_ID = "tokenizer.ggml.seperator_token_id" + PAD_ID = "tokenizer.ggml.padding_token_id" + CLS_ID = "tokenizer.ggml.cls_token_id" + MASK_ID = "tokenizer.ggml.mask_token_id" + ADD_BOS = "tokenizer.ggml.add_bos_token" + ADD_EOS = "tokenizer.ggml.add_eos_token" + ADD_PREFIX = "tokenizer.ggml.add_space_prefix" + REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces" + PRECOMPILED_CHARSMAP = "tokenizer.ggml.precompiled_charsmap" + HF_JSON = "tokenizer.huggingface.json" + RWKV = "tokenizer.rwkv.world" + CHAT_TEMPLATE = "tokenizer.chat_template" + CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}" + CHAT_TEMPLATES = "tokenizer.chat_templates" + # FIM/Infill special tokens constants + PREFIX_ID = "tokenizer.ggml.prefix_token_id" + SUFFIX_ID = "tokenizer.ggml.suffix_token_id" + MIDDLE_ID = "tokenizer.ggml.middle_token_id" + EOT_ID = "tokenizer.ggml.eot_token_id" + EOM_ID = "tokenizer.ggml.eom_token_id" + + class Adapter: + TYPE = "adapter.type" + LORA_ALPHA = "adapter.lora.alpha" + +# +# recommended mapping of model tensor names for storage in gguf +# + + +class GGUFType: + MODEL = "model" + ADAPTER = "adapter" + + +class MODEL_ARCH(IntEnum): + LLAMA = auto() + FALCON = auto() + BAICHUAN = auto() + GROK = auto() + GPT2 = auto() + GPTJ = auto() + GPTNEOX = auto() + MPT = auto() + STARCODER = auto() + REFACT = auto() + BERT = auto() + NOMIC_BERT = auto() + JINA_BERT_V2 = auto() + BLOOM = auto() + STABLELM = auto() + QWEN = auto() + QWEN2 = auto() + QWEN2MOE = auto() + PHI2 = auto() + PHI3 = auto() + PLAMO = auto() + CODESHELL = auto() + ORION = auto() + INTERNLM2 = auto() + MINICPM = auto() + GEMMA = auto() + GEMMA2 = auto() + STARCODER2 = auto() + MAMBA = auto() + XVERSE = auto() + COMMAND_R = auto() + DBRX = auto() + OLMO = auto() + OPENELM = auto() + ARCTIC = auto() + DEEPSEEK2 = auto() + CHATGLM = auto() + BITNET = auto() + T5 = auto() + T5ENCODER = auto() + JAIS = auto() + NEMOTRON = auto() + EXAONE = auto() + + +class MODEL_TENSOR(IntEnum): + TOKEN_EMBD = auto() + TOKEN_EMBD_NORM = auto() + TOKEN_TYPES = auto() + POS_EMBD = auto() + OUTPUT = auto() + OUTPUT_NORM = auto() + ROPE_FREQS = auto() + ROPE_FACTORS_LONG = auto() + ROPE_FACTORS_SHORT = auto() + ATTN_Q = auto() + ATTN_K = auto() + ATTN_V = auto() + ATTN_QKV = auto() + ATTN_OUT = auto() + ATTN_NORM = auto() + ATTN_NORM_2 = auto() + ATTN_OUT_NORM = auto() + ATTN_POST_NORM = auto() + ATTN_ROT_EMBD = auto() + FFN_GATE_INP = auto() + FFN_GATE_INP_SHEXP = auto() + FFN_NORM = auto() + FFN_PRE_NORM = auto() + FFN_POST_NORM = auto() + FFN_GATE = auto() + FFN_DOWN = auto() + FFN_UP = auto() + FFN_ACT = auto() + FFN_NORM_EXP = auto() + FFN_GATE_EXP = auto() + FFN_DOWN_EXP = auto() + FFN_UP_EXP = auto() + FFN_GATE_SHEXP = auto() + FFN_DOWN_SHEXP = auto() + FFN_UP_SHEXP = auto() + ATTN_Q_NORM = auto() + ATTN_K_NORM = auto() + LAYER_OUT_NORM = auto() + SSM_IN = auto() + SSM_CONV1D = auto() + SSM_X = auto() + SSM_DT = auto() + SSM_A = auto() + SSM_D = auto() + SSM_OUT = auto() + ATTN_Q_A = auto() + ATTN_Q_B = auto() + ATTN_KV_A_MQA = auto() + ATTN_KV_B = auto() + ATTN_Q_A_NORM = auto() + ATTN_KV_A_NORM = auto() + FFN_SUB_NORM = auto() + ATTN_SUB_NORM = auto() + DEC_ATTN_NORM = auto() + DEC_ATTN_Q = auto() + DEC_ATTN_K = auto() + DEC_ATTN_V = auto() + DEC_ATTN_OUT = auto() + DEC_ATTN_REL_B = auto() + DEC_CROSS_ATTN_NORM = auto() + DEC_CROSS_ATTN_Q = auto() + DEC_CROSS_ATTN_K = auto() + DEC_CROSS_ATTN_V = auto() + DEC_CROSS_ATTN_OUT = auto() + DEC_CROSS_ATTN_REL_B = auto() + DEC_FFN_NORM = auto() + DEC_FFN_GATE = auto() + DEC_FFN_DOWN = auto() + DEC_FFN_UP = auto() + DEC_OUTPUT_NORM = auto() + ENC_ATTN_NORM = auto() + ENC_ATTN_Q = auto() + ENC_ATTN_K = auto() + ENC_ATTN_V = auto() + ENC_ATTN_OUT = auto() + ENC_ATTN_REL_B = auto() + ENC_FFN_NORM = auto() + ENC_FFN_GATE = auto() + ENC_FFN_DOWN = auto() + ENC_FFN_UP = auto() + ENC_OUTPUT_NORM = auto() + + +MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { + MODEL_ARCH.LLAMA: "llama", + MODEL_ARCH.FALCON: "falcon", + MODEL_ARCH.BAICHUAN: "baichuan", + MODEL_ARCH.GROK: "grok", + MODEL_ARCH.GPT2: "gpt2", + MODEL_ARCH.GPTJ: "gptj", + MODEL_ARCH.GPTNEOX: "gptneox", + MODEL_ARCH.MPT: "mpt", + MODEL_ARCH.STARCODER: "starcoder", + MODEL_ARCH.REFACT: "refact", + MODEL_ARCH.BERT: "bert", + MODEL_ARCH.NOMIC_BERT: "nomic-bert", + MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2", + MODEL_ARCH.BLOOM: "bloom", + MODEL_ARCH.STABLELM: "stablelm", + MODEL_ARCH.QWEN: "qwen", + MODEL_ARCH.QWEN2: "qwen2", + MODEL_ARCH.QWEN2MOE: "qwen2moe", + MODEL_ARCH.PHI2: "phi2", + MODEL_ARCH.PHI3: "phi3", + MODEL_ARCH.PLAMO: "plamo", + MODEL_ARCH.CODESHELL: "codeshell", + MODEL_ARCH.ORION: "orion", + MODEL_ARCH.INTERNLM2: "internlm2", + MODEL_ARCH.MINICPM: "minicpm", + MODEL_ARCH.GEMMA: "gemma", + MODEL_ARCH.GEMMA2: "gemma2", + MODEL_ARCH.STARCODER2: "starcoder2", + MODEL_ARCH.MAMBA: "mamba", + MODEL_ARCH.XVERSE: "xverse", + MODEL_ARCH.COMMAND_R: "command-r", + MODEL_ARCH.DBRX: "dbrx", + MODEL_ARCH.OLMO: "olmo", + MODEL_ARCH.OPENELM: "openelm", + MODEL_ARCH.ARCTIC: "arctic", + MODEL_ARCH.DEEPSEEK2: "deepseek2", + MODEL_ARCH.CHATGLM: "chatglm", + MODEL_ARCH.BITNET: "bitnet", + MODEL_ARCH.T5: "t5", + MODEL_ARCH.T5ENCODER: "t5encoder", + MODEL_ARCH.JAIS: "jais", + MODEL_ARCH.NEMOTRON: "nemotron", + MODEL_ARCH.EXAONE: "exaone", +} + +TENSOR_NAMES: dict[MODEL_TENSOR, str] = { + MODEL_TENSOR.TOKEN_EMBD: "token_embd", + MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm", + MODEL_TENSOR.TOKEN_TYPES: "token_types", + MODEL_TENSOR.POS_EMBD: "position_embd", + MODEL_TENSOR.OUTPUT_NORM: "output_norm", + MODEL_TENSOR.OUTPUT: "output", + MODEL_TENSOR.ROPE_FREQS: "rope_freqs", + MODEL_TENSOR.ROPE_FACTORS_LONG: "rope_factors_long", + MODEL_TENSOR.ROPE_FACTORS_SHORT: "rope_factors_short", + MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2", + MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", + MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q", + MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k", + MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v", + MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd", + MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm", + MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm", + MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm", + MODEL_TENSOR.ATTN_POST_NORM: "blk.{bid}.post_attention_norm", + MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp", + MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp", + MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_PRE_NORM: "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_POST_NORM: "blk.{bid}.post_ffw_norm", + MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate", + MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", + MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp", + MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp", + MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp", + MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn", + MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps", + MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps", + MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps", + MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps", + MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm", + MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in", + MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d", + MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x", + MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt", + MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a", + MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d", + MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out", + MODEL_TENSOR.ATTN_Q_A: "blk.{bid}.attn_q_a", + MODEL_TENSOR.ATTN_Q_B: "blk.{bid}.attn_q_b", + MODEL_TENSOR.ATTN_KV_A_MQA: "blk.{bid}.attn_kv_a_mqa", + MODEL_TENSOR.ATTN_KV_B: "blk.{bid}.attn_kv_b", + MODEL_TENSOR.ATTN_Q_A_NORM: "blk.{bid}.attn_q_a_norm", + MODEL_TENSOR.ATTN_KV_A_NORM: "blk.{bid}.attn_kv_a_norm", + MODEL_TENSOR.ATTN_SUB_NORM: "blk.{bid}.attn_sub_norm", + MODEL_TENSOR.FFN_SUB_NORM: "blk.{bid}.ffn_sub_norm", + MODEL_TENSOR.DEC_ATTN_NORM: "dec.blk.{bid}.attn_norm", + MODEL_TENSOR.DEC_ATTN_Q: "dec.blk.{bid}.attn_q", + MODEL_TENSOR.DEC_ATTN_K: "dec.blk.{bid}.attn_k", + MODEL_TENSOR.DEC_ATTN_V: "dec.blk.{bid}.attn_v", + MODEL_TENSOR.DEC_ATTN_OUT: "dec.blk.{bid}.attn_o", + MODEL_TENSOR.DEC_ATTN_REL_B: "dec.blk.{bid}.attn_rel_b", + MODEL_TENSOR.DEC_CROSS_ATTN_NORM: "dec.blk.{bid}.cross_attn_norm", + MODEL_TENSOR.DEC_CROSS_ATTN_Q: "dec.blk.{bid}.cross_attn_q", + MODEL_TENSOR.DEC_CROSS_ATTN_K: "dec.blk.{bid}.cross_attn_k", + MODEL_TENSOR.DEC_CROSS_ATTN_V: "dec.blk.{bid}.cross_attn_v", + MODEL_TENSOR.DEC_CROSS_ATTN_OUT: "dec.blk.{bid}.cross_attn_o", + MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: "dec.blk.{bid}.cross_attn_rel_b", + MODEL_TENSOR.DEC_FFN_NORM: "dec.blk.{bid}.ffn_norm", + MODEL_TENSOR.DEC_FFN_GATE: "dec.blk.{bid}.ffn_gate", + MODEL_TENSOR.DEC_FFN_DOWN: "dec.blk.{bid}.ffn_down", + MODEL_TENSOR.DEC_FFN_UP: "dec.blk.{bid}.ffn_up", + MODEL_TENSOR.DEC_OUTPUT_NORM: "dec.output_norm", + MODEL_TENSOR.ENC_ATTN_NORM: "enc.blk.{bid}.attn_norm", + MODEL_TENSOR.ENC_ATTN_Q: "enc.blk.{bid}.attn_q", + MODEL_TENSOR.ENC_ATTN_K: "enc.blk.{bid}.attn_k", + MODEL_TENSOR.ENC_ATTN_V: "enc.blk.{bid}.attn_v", + MODEL_TENSOR.ENC_ATTN_OUT: "enc.blk.{bid}.attn_o", + MODEL_TENSOR.ENC_ATTN_REL_B: "enc.blk.{bid}.attn_rel_b", + MODEL_TENSOR.ENC_FFN_NORM: "enc.blk.{bid}.ffn_norm", + MODEL_TENSOR.ENC_FFN_GATE: "enc.blk.{bid}.ffn_gate", + MODEL_TENSOR.ENC_FFN_DOWN: "enc.blk.{bid}.ffn_down", + MODEL_TENSOR.ENC_FFN_UP: "enc.blk.{bid}.ffn_up", + MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm", +} + +MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { + MODEL_ARCH.LLAMA: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], + MODEL_ARCH.GROK: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.LAYER_OUT_NORM, + ], + MODEL_ARCH.GPTNEOX: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.FALCON: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_NORM_2, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BAICHUAN: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.STARCODER: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BERT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.TOKEN_TYPES, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.LAYER_OUT_NORM, + ], + MODEL_ARCH.NOMIC_BERT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.TOKEN_TYPES, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.LAYER_OUT_NORM, + ], + MODEL_ARCH.JINA_BERT_V2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.TOKEN_TYPES, + MODEL_TENSOR.ATTN_NORM_2, + MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.LAYER_OUT_NORM, + ], + MODEL_ARCH.MPT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_ACT, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.POS_EMBD, + ], + MODEL_ARCH.GPTJ: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.REFACT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BLOOM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.STABLELM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + ], + MODEL_ARCH.QWEN: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.QWEN2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.QWEN2MOE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_GATE_INP_SHEXP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP_SHEXP, + ], + MODEL_ARCH.PLAMO: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.GPT2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.PHI2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.PHI3: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.CODESHELL: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.ORION: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.INTERNLM2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.MINICPM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], + MODEL_ARCH.GEMMA: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_NORM, + ], + MODEL_ARCH.GEMMA2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_POST_NORM, + MODEL_TENSOR.FFN_PRE_NORM, + MODEL_TENSOR.FFN_POST_NORM, + ], + MODEL_ARCH.STARCODER2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.MAMBA: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.SSM_IN, + MODEL_TENSOR.SSM_CONV1D, + MODEL_TENSOR.SSM_X, + MODEL_TENSOR.SSM_DT, + MODEL_TENSOR.SSM_A, + MODEL_TENSOR.SSM_D, + MODEL_TENSOR.SSM_OUT, + ], + MODEL_ARCH.XVERSE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.COMMAND_R: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_Q_NORM, + ], + MODEL_ARCH.DBRX: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], + MODEL_ARCH.OLMO: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.OPENELM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.ARCTIC: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_NORM_EXP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], + MODEL_ARCH.DEEPSEEK2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_A, + MODEL_TENSOR.ATTN_Q_B, + MODEL_TENSOR.ATTN_KV_A_MQA, + MODEL_TENSOR.ATTN_KV_B, + MODEL_TENSOR.ATTN_Q_A_NORM, + MODEL_TENSOR.ATTN_KV_A_NORM, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP_SHEXP, + ], + MODEL_ARCH.CHATGLM : [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BITNET: [ + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_SUB_NORM, + MODEL_TENSOR.FFN_SUB_NORM, + ], + MODEL_ARCH.T5: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.DEC_ATTN_NORM, + MODEL_TENSOR.DEC_ATTN_Q, + MODEL_TENSOR.DEC_ATTN_K, + MODEL_TENSOR.DEC_ATTN_V, + MODEL_TENSOR.DEC_ATTN_OUT, + MODEL_TENSOR.DEC_ATTN_REL_B, + MODEL_TENSOR.DEC_CROSS_ATTN_NORM, + MODEL_TENSOR.DEC_CROSS_ATTN_Q, + MODEL_TENSOR.DEC_CROSS_ATTN_K, + MODEL_TENSOR.DEC_CROSS_ATTN_V, + MODEL_TENSOR.DEC_CROSS_ATTN_OUT, + MODEL_TENSOR.DEC_CROSS_ATTN_REL_B, + MODEL_TENSOR.DEC_FFN_NORM, + MODEL_TENSOR.DEC_FFN_GATE, + MODEL_TENSOR.DEC_FFN_DOWN, + MODEL_TENSOR.DEC_FFN_UP, + MODEL_TENSOR.DEC_OUTPUT_NORM, + MODEL_TENSOR.ENC_ATTN_NORM, + MODEL_TENSOR.ENC_ATTN_Q, + MODEL_TENSOR.ENC_ATTN_K, + MODEL_TENSOR.ENC_ATTN_V, + MODEL_TENSOR.ENC_ATTN_OUT, + MODEL_TENSOR.ENC_ATTN_REL_B, + MODEL_TENSOR.ENC_FFN_NORM, + MODEL_TENSOR.ENC_FFN_GATE, + MODEL_TENSOR.ENC_FFN_DOWN, + MODEL_TENSOR.ENC_FFN_UP, + MODEL_TENSOR.ENC_OUTPUT_NORM, + ], + MODEL_ARCH.T5ENCODER: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ENC_ATTN_NORM, + MODEL_TENSOR.ENC_ATTN_Q, + MODEL_TENSOR.ENC_ATTN_K, + MODEL_TENSOR.ENC_ATTN_V, + MODEL_TENSOR.ENC_ATTN_OUT, + MODEL_TENSOR.ENC_ATTN_REL_B, + MODEL_TENSOR.ENC_FFN_NORM, + MODEL_TENSOR.ENC_FFN_GATE, + MODEL_TENSOR.ENC_FFN_DOWN, + MODEL_TENSOR.ENC_FFN_UP, + MODEL_TENSOR.ENC_OUTPUT_NORM, + ], + MODEL_ARCH.JAIS: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.NEMOTRON: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.EXAONE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + # TODO +} + +# tensors that will not be serialized +MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { + MODEL_ARCH.LLAMA: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.BAICHUAN: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.QWEN: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.CODESHELL: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.ORION: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.STARCODER2: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.XVERSE: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.DEEPSEEK2: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.CHATGLM: [ + MODEL_TENSOR.ROPE_FREQS, + ], + MODEL_ARCH.NEMOTRON: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], +} + +# +# types +# + + +class TokenType(IntEnum): + NORMAL = 1 + UNKNOWN = 2 + CONTROL = 3 + USER_DEFINED = 4 + UNUSED = 5 + BYTE = 6 + + +class RopeScalingType(Enum): + NONE = 'none' + LINEAR = 'linear' + YARN = 'yarn' + + +class PoolingType(IntEnum): + NONE = 0 + MEAN = 1 + CLS = 2 + + +class GGMLQuantizationType(IntEnum): + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 + IQ2_XXS = 16 + IQ2_XS = 17 + IQ3_XXS = 18 + IQ1_S = 19 + IQ4_NL = 20 + IQ3_S = 21 + IQ2_S = 22 + IQ4_XS = 23 + I8 = 24 + I16 = 25 + I32 = 26 + I64 = 27 + F64 = 28 + IQ1_M = 29 + BF16 = 30 + Q4_0_4_4 = 31 + Q4_0_4_8 = 32 + Q4_0_8_8 = 33 + + +# TODO: add GGMLFileType from ggml_ftype in ggml.h + + +# from llama_ftype in llama.h +# ALL VALUES SHOULD BE THE SAME HERE AS THEY ARE OVER THERE. +class LlamaFileType(IntEnum): + ALL_F32 = 0 + MOSTLY_F16 = 1 # except 1d tensors + MOSTLY_Q4_0 = 2 # except 1d tensors + MOSTLY_Q4_1 = 3 # except 1d tensors + # MOSTLY_Q4_1_SOME_F16 = 4 # tok_embeddings.weight and output.weight are F16 + # MOSTLY_Q4_2 = 5 # support has been removed + # MOSTLY_Q4_3 = 6 # support has been removed + MOSTLY_Q8_0 = 7 # except 1d tensors + MOSTLY_Q5_0 = 8 # except 1d tensors + MOSTLY_Q5_1 = 9 # except 1d tensors + MOSTLY_Q2_K = 10 # except 1d tensors + MOSTLY_Q3_K_S = 11 # except 1d tensors + MOSTLY_Q3_K_M = 12 # except 1d tensors + MOSTLY_Q3_K_L = 13 # except 1d tensors + MOSTLY_Q4_K_S = 14 # except 1d tensors + MOSTLY_Q4_K_M = 15 # except 1d tensors + MOSTLY_Q5_K_S = 16 # except 1d tensors + MOSTLY_Q5_K_M = 17 # except 1d tensors + MOSTLY_Q6_K = 18 # except 1d tensors + MOSTLY_IQ2_XXS = 19 # except 1d tensors + MOSTLY_IQ2_XS = 20 # except 1d tensors + MOSTLY_Q2_K_S = 21 # except 1d tensors + MOSTLY_IQ3_XS = 22 # except 1d tensors + MOSTLY_IQ3_XXS = 23 # except 1d tensors + MOSTLY_IQ1_S = 24 # except 1d tensors + MOSTLY_IQ4_NL = 25 # except 1d tensors + MOSTLY_IQ3_S = 26 # except 1d tensors + MOSTLY_IQ3_M = 27 # except 1d tensors + MOSTLY_IQ2_S = 28 # except 1d tensors + MOSTLY_IQ2_M = 29 # except 1d tensors + MOSTLY_IQ4_XS = 30 # except 1d tensors + MOSTLY_IQ1_M = 31 # except 1d tensors + MOSTLY_BF16 = 32 # except 1d tensors + MOSTLY_Q4_0_4_4 = 33 # except 1d tensors + MOSTLY_Q4_0_4_8 = 34 # except 1d tensors + MOSTLY_Q4_0_8_8 = 35 # except 1d tensors + + GUESSED = 1024 # not specified in the model file + + +class GGUFEndian(IntEnum): + LITTLE = 0 + BIG = 1 + + +class GGUFValueType(IntEnum): + UINT8 = 0 + INT8 = 1 + UINT16 = 2 + INT16 = 3 + UINT32 = 4 + INT32 = 5 + FLOAT32 = 6 + BOOL = 7 + STRING = 8 + ARRAY = 9 + UINT64 = 10 + INT64 = 11 + FLOAT64 = 12 + + @staticmethod + def get_type(val: Any) -> GGUFValueType: + if isinstance(val, (str, bytes, bytearray)): + return GGUFValueType.STRING + elif isinstance(val, list): + return GGUFValueType.ARRAY + elif isinstance(val, float): + return GGUFValueType.FLOAT32 + elif isinstance(val, bool): + return GGUFValueType.BOOL + elif isinstance(val, int): + return GGUFValueType.INT32 + # TODO: need help with 64-bit types in Python + else: + raise ValueError(f"Unknown type: {type(val)}") + + +# Items here are (block size, type size) +QK_K = 256 +GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { + GGMLQuantizationType.F32: (1, 4), + GGMLQuantizationType.F16: (1, 2), + GGMLQuantizationType.Q4_0: (32, 2 + 16), + GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16), + GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16), + GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16), + GGMLQuantizationType.Q8_0: (32, 2 + 32), + GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32), + GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4), + GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12), + GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12), + GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8), + GGMLQuantizationType.IQ2_XXS: (256, 2 + QK_K // 4), + GGMLQuantizationType.IQ2_XS: (256, 2 + QK_K // 4 + QK_K // 32), + GGMLQuantizationType.IQ3_XXS: (256, 2 + QK_K // 4 + QK_K // 8), + GGMLQuantizationType.IQ1_S: (256, 2 + QK_K // 8 + QK_K // 16), + GGMLQuantizationType.IQ4_NL: (32, 2 + 16), + GGMLQuantizationType.IQ3_S: (256, 2 + QK_K // 4 + QK_K // 8 + QK_K // 32 + 4), + GGMLQuantizationType.IQ2_S: (256, 2 + QK_K // 4 + QK_K // 16), + GGMLQuantizationType.IQ4_XS: (256, 2 + 2 + QK_K // 2 + QK_K // 64), + GGMLQuantizationType.I8: (1, 1), + GGMLQuantizationType.I16: (1, 2), + GGMLQuantizationType.I32: (1, 4), + GGMLQuantizationType.I64: (1, 8), + GGMLQuantizationType.F64: (1, 8), + GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32), + GGMLQuantizationType.BF16: (1, 2), + GGMLQuantizationType.Q4_0_4_4:(32, 2 + 16), + GGMLQuantizationType.Q4_0_4_8:(32, 2 + 16), + GGMLQuantizationType.Q4_0_8_8:(32, 2 + 16), +} + + +# Aliases for backward compatibility. + +# general +KEY_GENERAL_ARCHITECTURE = Keys.General.ARCHITECTURE +KEY_GENERAL_QUANTIZATION_VERSION = Keys.General.QUANTIZATION_VERSION +KEY_GENERAL_ALIGNMENT = Keys.General.ALIGNMENT +KEY_GENERAL_NAME = Keys.General.NAME +KEY_GENERAL_AUTHOR = Keys.General.AUTHOR +KEY_GENERAL_URL = Keys.General.URL +KEY_GENERAL_DESCRIPTION = Keys.General.DESCRIPTION +KEY_GENERAL_LICENSE = Keys.General.LICENSE +KEY_GENERAL_SOURCE_URL = Keys.General.SOURCE_URL +KEY_GENERAL_FILE_TYPE = Keys.General.FILE_TYPE + +# LLM +KEY_VOCAB_SIZE = Keys.LLM.VOCAB_SIZE +KEY_CONTEXT_LENGTH = Keys.LLM.CONTEXT_LENGTH +KEY_EMBEDDING_LENGTH = Keys.LLM.EMBEDDING_LENGTH +KEY_BLOCK_COUNT = Keys.LLM.BLOCK_COUNT +KEY_FEED_FORWARD_LENGTH = Keys.LLM.FEED_FORWARD_LENGTH +KEY_USE_PARALLEL_RESIDUAL = Keys.LLM.USE_PARALLEL_RESIDUAL +KEY_TENSOR_DATA_LAYOUT = Keys.LLM.TENSOR_DATA_LAYOUT + +# attention +KEY_ATTENTION_HEAD_COUNT = Keys.Attention.HEAD_COUNT +KEY_ATTENTION_HEAD_COUNT_KV = Keys.Attention.HEAD_COUNT_KV +KEY_ATTENTION_MAX_ALIBI_BIAS = Keys.Attention.MAX_ALIBI_BIAS +KEY_ATTENTION_CLAMP_KQV = Keys.Attention.CLAMP_KQV +KEY_ATTENTION_LAYERNORM_EPS = Keys.Attention.LAYERNORM_EPS +KEY_ATTENTION_LAYERNORM_RMS_EPS = Keys.Attention.LAYERNORM_RMS_EPS + +# RoPE +KEY_ROPE_DIMENSION_COUNT = Keys.Rope.DIMENSION_COUNT +KEY_ROPE_FREQ_BASE = Keys.Rope.FREQ_BASE +KEY_ROPE_SCALING_TYPE = Keys.Rope.SCALING_TYPE +KEY_ROPE_SCALING_FACTOR = Keys.Rope.SCALING_FACTOR +KEY_ROPE_SCALING_ORIG_CTX_LEN = Keys.Rope.SCALING_ORIG_CTX_LEN +KEY_ROPE_SCALING_FINETUNED = Keys.Rope.SCALING_FINETUNED + +# SSM +KEY_SSM_CONV_KERNEL = Keys.SSM.CONV_KERNEL +KEY_SSM_INNER_SIZE = Keys.SSM.INNER_SIZE +KEY_SSM_STATE_SIZE = Keys.SSM.STATE_SIZE +KEY_SSM_TIME_STEP_RANK = Keys.SSM.TIME_STEP_RANK +KEY_SSM_DT_B_C_RMS = Keys.SSM.DT_B_C_RMS + +# tokenization +KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL +KEY_TOKENIZER_PRE = Keys.Tokenizer.PRE +KEY_TOKENIZER_LIST = Keys.Tokenizer.LIST +KEY_TOKENIZER_TOKEN_TYPE = Keys.Tokenizer.TOKEN_TYPE +KEY_TOKENIZER_SCORES = Keys.Tokenizer.SCORES +KEY_TOKENIZER_MERGES = Keys.Tokenizer.MERGES +KEY_TOKENIZER_BOS_ID = Keys.Tokenizer.BOS_ID +KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID +KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID +KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID +KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID +KEY_TOKENIZER_CLS_ID = Keys.Tokenizer.CLS_ID +KEY_TOKENIZER_MASK_ID = Keys.Tokenizer.MASK_ID +KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON +KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV +KEY_TOKENIZER_PRIFIX_ID = Keys.Tokenizer.PREFIX_ID +KEY_TOKENIZER_SUFFIX_ID = Keys.Tokenizer.SUFFIX_ID +KEY_TOKENIZER_MIDDLE_ID = Keys.Tokenizer.MIDDLE_ID +KEY_TOKENIZER_EOT_ID = Keys.Tokenizer.EOT_ID +KEY_TOKENIZER_EOM_ID = Keys.Tokenizer.EOM_ID diff --git a/.venv/lib/python3.11/site-packages/gguf/gguf.py b/.venv/lib/python3.11/site-packages/gguf/gguf.py new file mode 100644 index 0000000000000000000000000000000000000000..651a81eb828248728f854c85c1a437b52892f275 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/gguf.py @@ -0,0 +1,15 @@ +# This file left for compatibility. If you want to use the GGUF API from Python +# then don't import gguf/gguf.py directly. If you're looking for examples, see the +# examples/ directory for gguf-py + +import importlib +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Compatibility for people trying to import gguf/gguf.py directly instead of as a package. +importlib.invalidate_caches() +import gguf # noqa: E402 + +importlib.reload(gguf) diff --git a/.venv/lib/python3.11/site-packages/gguf/gguf_reader.py b/.venv/lib/python3.11/site-packages/gguf/gguf_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..e8e61abf86ae4a57a44b0e451fd14c9ee3619ae8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/gguf_reader.py @@ -0,0 +1,317 @@ +# +# GGUF file reading/modification support. For API usage information, +# please see the files scripts/ for some fairly simple examples. +# +from __future__ import annotations + +import logging +import os +from collections import OrderedDict +from typing import Any, Literal, NamedTuple, TypeVar, Union + +import numpy as np +import numpy.typing as npt + +from .quants import quant_shape_to_byte_shape + +if __name__ == "__main__": + import sys + from pathlib import Path + + # Allow running file in package as a script. + sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf.constants import ( + GGML_QUANT_SIZES, + GGUF_DEFAULT_ALIGNMENT, + GGUF_MAGIC, + GGUF_VERSION, + GGMLQuantizationType, + GGUFValueType, +) + +logger = logging.getLogger(__name__) + +READER_SUPPORTED_VERSIONS = [2, GGUF_VERSION] + + +class ReaderField(NamedTuple): + # Offset to start of this field. + offset: int + + # Name of the field (not necessarily from file data). + name: str + + # Data parts. Some types have multiple components, such as strings + # that consist of a length followed by the string data. + parts: list[npt.NDArray[Any]] = [] + + # Indexes into parts that we can call the actual data. For example + # an array of strings will be populated with indexes to the actual + # string data. + data: list[int] = [-1] + + types: list[GGUFValueType] = [] + + +class ReaderTensor(NamedTuple): + name: str + tensor_type: GGMLQuantizationType + shape: npt.NDArray[np.uint32] + n_elements: int + n_bytes: int + data_offset: int + data: npt.NDArray[Any] + field: ReaderField + + +class GGUFReader: + # I - same as host, S - swapped + byte_order: Literal['I', 'S'] = 'I' + alignment: int = GGUF_DEFAULT_ALIGNMENT + data_offset: int + + # Note: Internal helper, API may change. + gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = { + GGUFValueType.UINT8: np.uint8, + GGUFValueType.INT8: np.int8, + GGUFValueType.UINT16: np.uint16, + GGUFValueType.INT16: np.int16, + GGUFValueType.UINT32: np.uint32, + GGUFValueType.INT32: np.int32, + GGUFValueType.FLOAT32: np.float32, + GGUFValueType.UINT64: np.uint64, + GGUFValueType.INT64: np.int64, + GGUFValueType.FLOAT64: np.float64, + GGUFValueType.BOOL: np.bool_, + } + + def __init__(self, path: os.PathLike[str] | str, mode: Literal['r', 'r+', 'c'] = 'r'): + self.data = np.memmap(path, mode = mode) + offs = 0 + + # Check for GGUF magic + if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC: + raise ValueError('GGUF magic invalid') + offs += 4 + + # Check GGUF version + temp_version = self._get(offs, np.uint32) + if temp_version[0] & 65535 == 0: + # If we get 0 here that means it's (probably) a GGUF file created for + # the opposite byte order of the machine this script is running on. + self.byte_order = 'S' + temp_version = temp_version.newbyteorder(self.byte_order) + version = temp_version[0] + if version not in READER_SUPPORTED_VERSIONS: + raise ValueError(f'Sorry, file appears to be version {version} which we cannot handle') + self.fields: OrderedDict[str, ReaderField] = OrderedDict() + self.tensors: list[ReaderTensor] = [] + offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32])) + + # Check tensor count and kv count + temp_counts = self._get(offs, np.uint64, 2) + offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64])) + offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64])) + tensor_count, kv_count = temp_counts + offs = self._build_fields(offs, kv_count) + + # Build Tensor Info Fields + offs, tensors_fields = self._build_tensor_info(offs, tensor_count) + new_align = self.fields.get('general.alignment') + if new_align is not None: + if new_align.types != [GGUFValueType.UINT32]: + raise ValueError('Bad type for general.alignment field') + self.alignment = new_align.parts[-1][0] + padding = offs % self.alignment + if padding != 0: + offs += self.alignment - padding + self.data_offset = offs + self._build_tensors(offs, tensors_fields) + + _DT = TypeVar('_DT', bound = npt.DTypeLike) + + # Fetch a key/value metadata field by key. + def get_field(self, key: str) -> Union[ReaderField, None]: + return self.fields.get(key, None) + + # Fetch a tensor from the list by index. + def get_tensor(self, idx: int) -> ReaderTensor: + return self.tensors[idx] + + def _get( + self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I', 'S', '<'] = None, + ) -> npt.NDArray[Any]: + count = int(count) + itemsize = int(np.empty([], dtype = dtype).itemsize) + end_offs = offset + itemsize * count + return ( + self.data[offset:end_offs] + .view(dtype = dtype)[:count] + .newbyteorder(override_order or self.byte_order) + ) + + def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int: + if field.name in self.fields: + # TODO: add option to generate error on duplicate keys + # raise KeyError(f'Duplicate {field.name} already in list at offset {field.offset}') + + logger.warning(f'Duplicate key {field.name} at offset {field.offset}') + self.fields[field.name + '_{}'.format(field.offset)] = field + else: + self.fields[field.name] = field + return 0 if skip_sum else sum(int(part.nbytes) for part in field.parts) + + def _get_str(self, offset: int) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint8]]: + slen = self._get(offset, np.uint64) + return slen, self._get(offset + 8, np.uint8, slen[0]) + + def _get_field_parts( + self, orig_offs: int, raw_type: int, + ) -> tuple[int, list[npt.NDArray[Any]], list[int], list[GGUFValueType]]: + offs = orig_offs + types: list[GGUFValueType] = [] + gtype = GGUFValueType(raw_type) + types.append(gtype) + # Handle strings. + if gtype == GGUFValueType.STRING: + sparts: list[npt.NDArray[Any]] = list(self._get_str(offs)) + size = sum(int(part.nbytes) for part in sparts) + return size, sparts, [1], types + # Check if it's a simple scalar type. + nptype = self.gguf_scalar_to_np.get(gtype) + if nptype is not None: + val = self._get(offs, nptype) + return int(val.nbytes), [val], [0], types + # Handle arrays. + if gtype == GGUFValueType.ARRAY: + raw_itype = self._get(offs, np.uint32) + offs += int(raw_itype.nbytes) + alen = self._get(offs, np.uint64) + offs += int(alen.nbytes) + aparts: list[npt.NDArray[Any]] = [raw_itype, alen] + data_idxs: list[int] = [] + for idx in range(alen[0]): + curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0]) + if idx == 0: + types += curr_types + idxs_offs = len(aparts) + aparts += curr_parts + data_idxs += (idx + idxs_offs for idx in curr_idxs) + offs += curr_size + return offs - orig_offs, aparts, data_idxs, types + # We can't deal with this one. + raise ValueError('Unknown/unhandled field type {gtype}') + + def _get_tensor_info_field(self, orig_offs: int) -> ReaderField: + offs = orig_offs + + # Get Tensor Name + name_len, name_data = self._get_str(offs) + offs += int(name_len.nbytes + name_data.nbytes) + + # Get Tensor Dimensions Count + n_dims = self._get(offs, np.uint32) + offs += int(n_dims.nbytes) + + # Get Tensor Dimension Array + dims = self._get(offs, np.uint64, n_dims[0]) + offs += int(dims.nbytes) + + # Get Tensor Encoding Scheme Type + raw_dtype = self._get(offs, np.uint32) + offs += int(raw_dtype.nbytes) + + # Get Tensor Offset + offset_tensor = self._get(offs, np.uint64) + offs += int(offset_tensor.nbytes) + + return ReaderField( + orig_offs, + str(bytes(name_data), encoding = 'utf-8'), + [name_len, name_data, n_dims, dims, raw_dtype, offset_tensor], + [1, 3, 4, 5], + ) + + def _build_fields(self, offs: int, count: int) -> int: + for _ in range(count): + orig_offs = offs + kv_klen, kv_kdata = self._get_str(offs) + offs += int(kv_klen.nbytes + kv_kdata.nbytes) + raw_kv_type = self._get(offs, np.uint32) + offs += int(raw_kv_type.nbytes) + parts: list[npt.NDArray[Any]] = [kv_klen, kv_kdata, raw_kv_type] + idxs_offs = len(parts) + field_size, field_parts, field_idxs, field_types = self._get_field_parts(offs, raw_kv_type[0]) + parts += field_parts + self._push_field(ReaderField( + orig_offs, + str(bytes(kv_kdata), encoding = 'utf-8'), + parts, + [idx + idxs_offs for idx in field_idxs], + field_types, + ), skip_sum = True) + offs += field_size + return offs + + def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderField]]: + tensor_fields = [] + for _ in range(count): + field = self._get_tensor_info_field(offs) + offs += sum(int(part.nbytes) for part in field.parts) + tensor_fields.append(field) + return offs, tensor_fields + + def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None: + tensors = [] + tensor_names = set() # keep track of name to prevent duplicated tensors + for field in fields: + _name_len, name_data, _n_dims, dims, raw_dtype, offset_tensor = field.parts + # check if there's any tensor having same name already in the list + tensor_name = str(bytes(name_data), encoding = 'utf-8') + if tensor_name in tensor_names: + raise ValueError(f'Found duplicated tensor with name {tensor_name}') + tensor_names.add(tensor_name) + ggml_type = GGMLQuantizationType(raw_dtype[0]) + n_elems = int(np.prod(dims)) + np_dims = tuple(reversed(dims.tolist())) + block_size, type_size = GGML_QUANT_SIZES[ggml_type] + n_bytes = n_elems * type_size // block_size + data_offs = int(start_offs + offset_tensor[0]) + item_type: npt.DTypeLike + if ggml_type == GGMLQuantizationType.F16: + item_count = n_elems + item_type = np.float16 + elif ggml_type == GGMLQuantizationType.F32: + item_count = n_elems + item_type = np.float32 + elif ggml_type == GGMLQuantizationType.F64: + item_count = n_elems + item_type = np.float64 + elif ggml_type == GGMLQuantizationType.I8: + item_count = n_elems + item_type = np.int8 + elif ggml_type == GGMLQuantizationType.I16: + item_count = n_elems + item_type = np.int16 + elif ggml_type == GGMLQuantizationType.I32: + item_count = n_elems + item_type = np.int32 + elif ggml_type == GGMLQuantizationType.I64: + item_count = n_elems + item_type = np.int64 + else: + item_count = n_bytes + item_type = np.uint8 + np_dims = quant_shape_to_byte_shape(np_dims, ggml_type) + tensors.append(ReaderTensor( + name = tensor_name, + tensor_type = ggml_type, + shape = dims, + n_elements = n_elems, + n_bytes = n_bytes, + data_offset = data_offs, + data = self._get(data_offs, item_type, item_count).reshape(np_dims), + field = field, + )) + self.tensors = tensors diff --git a/.venv/lib/python3.11/site-packages/gguf/gguf_writer.py b/.venv/lib/python3.11/site-packages/gguf/gguf_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..af3b98c679b0b66cd36d0a1ab5dafb8262936a0f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/gguf_writer.py @@ -0,0 +1,888 @@ +from __future__ import annotations + +import logging +import os +import shutil +import struct +import tempfile +from dataclasses import dataclass +from enum import Enum, auto +from math import prod +from pathlib import Path +from io import BufferedWriter +from typing import IO, Any, Sequence, Mapping +from string import ascii_letters, digits + +import numpy as np + +from .constants import ( + GGUF_DEFAULT_ALIGNMENT, + GGUF_MAGIC, + GGUF_VERSION, + GGMLQuantizationType, + GGUFEndian, + GGUFValueType, + Keys, + RopeScalingType, + PoolingType, + TokenType, +) + +from .quants import quant_shape_from_byte_shape + +logger = logging.getLogger(__name__) + + +SHARD_NAME_FORMAT = "{:s}-{:05d}-of-{:05d}.gguf" + + +@dataclass +class TensorInfo: + shape: Sequence[int] + dtype: GGMLQuantizationType + nbytes: int + tensor: np.ndarray[Any, Any] | None = None + + +@dataclass +class GGUFValue: + value: Any + type: GGUFValueType + + +class WriterState(Enum): + NO_FILE = auto() + EMPTY = auto() + HEADER = auto() + KV_DATA = auto() + TI_DATA = auto() + WEIGHTS = auto() + + +class GGUFWriter: + fout: list[BufferedWriter] | None + path: Path | None + temp_file: tempfile.SpooledTemporaryFile[bytes] | None + tensors: list[dict[str, TensorInfo]] + kv_data: list[dict[str, GGUFValue]] + state: WriterState + _simple_value_packing = { + GGUFValueType.UINT8: "B", + GGUFValueType.INT8: "b", + GGUFValueType.UINT16: "H", + GGUFValueType.INT16: "h", + GGUFValueType.UINT32: "I", + GGUFValueType.INT32: "i", + GGUFValueType.FLOAT32: "f", + GGUFValueType.UINT64: "Q", + GGUFValueType.INT64: "q", + GGUFValueType.FLOAT64: "d", + GGUFValueType.BOOL: "?", + } + + def __init__( + self, path: os.PathLike[str] | str | None, arch: str, use_temp_file: bool = False, endianess: GGUFEndian = GGUFEndian.LITTLE, + split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False + ): + self.fout = None + self.path = Path(path) if path else None + self.arch = arch + self.endianess = endianess + self.data_alignment = GGUF_DEFAULT_ALIGNMENT + self.use_temp_file = use_temp_file + self.temp_file = None + self.tensors = [{}] + self.kv_data = [{}] + self.split_max_tensors = split_max_tensors + self.split_max_size = split_max_size + self.dry_run = dry_run + self.small_first_shard = small_first_shard + logger.info("gguf: This GGUF file is for {0} Endian only".format( + "Big" if self.endianess == GGUFEndian.BIG else "Little", + )) + self.state = WriterState.NO_FILE + + if self.small_first_shard: + self.tensors.append({}) + + self.add_architecture() + + def get_total_parameter_count(self) -> tuple[int, int, int, int]: + total_params = 0 + shared_params = 0 + expert_params = 0 + + expert_sum = 0 + n_expert_tensors = 0 + + last_lora_a: tuple[str, TensorInfo] | None = None + + for tensors in self.tensors: + for name, info in tensors.items(): + + shape = info.shape + + if name.endswith(".lora_a"): + last_lora_a = (name, info) + continue + elif name.endswith(".lora_b"): + if last_lora_a is None or last_lora_a[0] != name[:-1] + "a": + # Bail when the LoRA pair can't be found trivially + logger.warning("can't measure LoRA size correctly, tensor order is unusual") + return 0, 0, 0, 0 + else: + shape = (*shape[:-1], last_lora_a[1].shape[-1]) + + size = prod(shape) + + if "_exps." in name: + expert_params += (size // shape[-3]) + expert_sum += shape[-3] + n_expert_tensors += 1 + else: + shared_params += size + + total_params += size + + # Hopefully this should work even for variable-expert-count models + expert_count = (expert_sum // n_expert_tensors) if n_expert_tensors > 0 else 0 + + # Negate the total to signal it's likely not exact + if last_lora_a is not None: + total_params = -total_params + + # NOTE: keep the output in the same order as accepted by 'size_label' in gguf-py/gguf/utility.py + return total_params, shared_params, expert_params, expert_count + + def format_shard_names(self, path: Path) -> list[Path]: + if len(self.tensors) == 1: + return [path] + return [path.with_name(SHARD_NAME_FORMAT.format(path.stem, i + 1, len(self.tensors))) for i in range(len(self.tensors))] + + def open_output_file(self, path: Path | None = None) -> None: + if self.state is WriterState.EMPTY and self.fout is not None and (path is None or path == self.path): + # allow calling this multiple times as long as the path is the same + return + + if self.state is not WriterState.NO_FILE: + raise ValueError(f'Expected output file to be not yet opened, got {self.state}') + + if path is not None: + self.path = path + + if self.path is not None: + filenames = self.print_plan() + self.fout = [open(filename, "wb") for filename in filenames] + self.state = WriterState.EMPTY + + def print_plan(self) -> list[Path]: + logger.info("Writing the following files:") + assert self.path is not None + filenames = self.format_shard_names(self.path) + assert len(filenames) == len(self.tensors) + for name, tensors in zip(filenames, self.tensors): + logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}") + + if self.dry_run: + logger.info("Dry run, not writing files") + for name in filenames: + print(name) # noqa: NP100 + exit() + + return filenames + + def add_shard_kv_data(self) -> None: + if len(self.tensors) == 1: + return + + total_tensors = sum(len(t) for t in self.tensors) + assert self.fout is not None + total_splits = len(self.fout) + self.kv_data.extend({} for _ in range(len(self.kv_data), total_splits)) + for i, kv_data in enumerate(self.kv_data): + kv_data[Keys.Split.LLM_KV_SPLIT_NO] = GGUFValue(i, GGUFValueType.UINT16) + kv_data[Keys.Split.LLM_KV_SPLIT_COUNT] = GGUFValue(total_splits, GGUFValueType.UINT16) + kv_data[Keys.Split.LLM_KV_SPLIT_TENSORS_COUNT] = GGUFValue(total_tensors, GGUFValueType.INT32) + + def write_header_to_file(self, path: Path | None = None) -> None: + if len(self.tensors) == 1 and (self.split_max_tensors != 0 or self.split_max_size != 0): + logger.warning("Model fails split requirements, not splitting") + + self.open_output_file(path) + + if self.state is not WriterState.EMPTY: + raise ValueError(f'Expected output file to be empty, got {self.state}') + + assert self.fout is not None + assert len(self.fout) == len(self.tensors) + assert len(self.kv_data) == 1 + + self.add_shard_kv_data() + + for fout, tensors, kv_data in zip(self.fout, self.tensors, self.kv_data): + fout.write(self._pack(" None: + if self.state is not WriterState.HEADER: + raise ValueError(f'Expected output file to contain the header, got {self.state}') + assert self.fout is not None + + for fout, kv_data in zip(self.fout, self.kv_data): + kv_bytes = bytearray() + + for key, val in kv_data.items(): + kv_bytes += self._pack_val(key, GGUFValueType.STRING, add_vtype=False) + kv_bytes += self._pack_val(val.value, val.type, add_vtype=True) + + fout.write(kv_bytes) + + self.flush() + self.state = WriterState.KV_DATA + + def write_ti_data_to_file(self) -> None: + if self.state is not WriterState.KV_DATA: + raise ValueError(f'Expected output file to contain KV data, got {self.state}') + assert self.fout is not None + + for fout, tensors in zip(self.fout, self.tensors): + ti_data = bytearray() + offset_tensor = 0 + + for name, ti in tensors.items(): + ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False) + n_dims = len(ti.shape) + ti_data += self._pack("I", n_dims) + for j in range(n_dims): + ti_data += self._pack("Q", ti.shape[n_dims - 1 - j]) + ti_data += self._pack("I", ti.dtype) + ti_data += self._pack("Q", offset_tensor) + offset_tensor += GGUFWriter.ggml_pad(ti.nbytes, self.data_alignment) + + fout.write(ti_data) + fout.flush() + self.state = WriterState.TI_DATA + + def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None: + if any(key in kv_data for kv_data in self.kv_data): + raise ValueError(f'Duplicated key name {key!r}') + + self.kv_data[0][key] = GGUFValue(value=val, type=vtype) + + def add_uint8(self, key: str, val: int) -> None: + self.add_key_value(key,val, GGUFValueType.UINT8) + + def add_int8(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.INT8) + + def add_uint16(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.UINT16) + + def add_int16(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.INT16) + + def add_uint32(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.UINT32) + + def add_int32(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.INT32) + + def add_float32(self, key: str, val: float) -> None: + self.add_key_value(key, val, GGUFValueType.FLOAT32) + + def add_uint64(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.UINT64) + + def add_int64(self, key: str, val: int) -> None: + self.add_key_value(key, val, GGUFValueType.INT64) + + def add_float64(self, key: str, val: float) -> None: + self.add_key_value(key, val, GGUFValueType.FLOAT64) + + def add_bool(self, key: str, val: bool) -> None: + self.add_key_value(key, val, GGUFValueType.BOOL) + + def add_string(self, key: str, val: str) -> None: + if not val: + return + self.add_key_value(key, val, GGUFValueType.STRING) + + def add_array(self, key: str, val: Sequence[Any]) -> None: + if len(val) == 0: + return + self.add_key_value(key, val, GGUFValueType.ARRAY) + + @staticmethod + def ggml_pad(x: int, n: int) -> int: + return ((x + n - 1) // n) * n + + def add_tensor_info( + self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype, + tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None, + ) -> None: + if self.state is not WriterState.NO_FILE: + raise ValueError(f'Expected output file to be not yet opened, got {self.state}') + + if any(name in tensors for tensors in self.tensors): + raise ValueError(f'Duplicated tensor name {name!r}') + + if raw_dtype is None: + if tensor_dtype == np.float16: + dtype = GGMLQuantizationType.F16 + elif tensor_dtype == np.float32: + dtype = GGMLQuantizationType.F32 + elif tensor_dtype == np.float64: + dtype = GGMLQuantizationType.F64 + elif tensor_dtype == np.int8: + dtype = GGMLQuantizationType.I8 + elif tensor_dtype == np.int16: + dtype = GGMLQuantizationType.I16 + elif tensor_dtype == np.int32: + dtype = GGMLQuantizationType.I32 + elif tensor_dtype == np.int64: + dtype = GGMLQuantizationType.I64 + else: + raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now") + else: + dtype = raw_dtype + if tensor_dtype == np.uint8: + tensor_shape = quant_shape_from_byte_shape(tensor_shape, raw_dtype) + + # make sure there is at least one tensor before splitting + if len(self.tensors[-1]) > 0: + if ( # split when over tensor limit + self.split_max_tensors != 0 + and len(self.tensors[-1]) >= self.split_max_tensors + ) or ( # split when over size limit + self.split_max_size != 0 + and sum(ti.nbytes for ti in self.tensors[-1].values()) + tensor_nbytes > self.split_max_size + ): + self.tensors.append({}) + + self.tensors[-1][name] = TensorInfo(shape=tensor_shape, dtype=dtype, nbytes=tensor_nbytes) + + def add_tensor( + self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None, + raw_dtype: GGMLQuantizationType | None = None, + ) -> None: + if self.endianess == GGUFEndian.BIG: + tensor.byteswap(inplace=True) + if self.use_temp_file and self.temp_file is None: + fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256 * 1024 * 1024) + fp.seek(0) + self.temp_file = fp + + shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape + self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype=raw_dtype) + + if self.temp_file is None: + self.tensors[-1][name].tensor = tensor + return + + tensor.tofile(self.temp_file) + self.write_padding(self.temp_file, tensor.nbytes) + + def write_padding(self, fp: IO[bytes], n: int, align: int | None = None) -> None: + pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n + if pad != 0: + fp.write(bytes([0] * pad)) + + def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: + if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS: + raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}') + assert self.fout is not None + + if self.endianess == GGUFEndian.BIG: + tensor.byteswap(inplace=True) + + file_id = -1 + for i, tensors in enumerate(self.tensors): + if len(tensors) > 0: + file_id = i + break + + fout = self.fout[file_id] + + # pop the first tensor info + # TODO: cleaner way to get the first key + first_tensor_name = [name for name, _ in zip(self.tensors[file_id].keys(), range(1))][0] + ti = self.tensors[file_id].pop(first_tensor_name) + assert ti.nbytes == tensor.nbytes + + self.write_padding(fout, fout.tell()) + tensor.tofile(fout) + self.write_padding(fout, tensor.nbytes) + + self.state = WriterState.WEIGHTS + + def write_tensors_to_file(self, *, progress: bool = False) -> None: + self.write_ti_data_to_file() + + assert self.fout is not None + + for fout in self.fout: + self.write_padding(fout, fout.tell()) + + if self.temp_file is None: + shard_bar = None + bar = None + + if progress: + from tqdm import tqdm + + total_bytes = sum(ti.nbytes for t in self.tensors for ti in t.values()) + + if len(self.fout) > 1: + shard_bar = tqdm(desc=f"Shard (0/{len(self.fout)})", total=None, unit="byte", unit_scale=True) + bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True) + + for i, (fout, tensors) in enumerate(zip(self.fout, self.tensors)): + if shard_bar is not None: + shard_bar.set_description(f"Shard ({i + 1}/{len(self.fout)})") + total = sum(ti.nbytes for ti in tensors.values()) + shard_bar.reset(total=(total if total > 0 else None)) + + # relying on the fact that Python dicts preserve insertion order (since 3.7) + for ti in tensors.values(): + assert ti.tensor is not None # can only iterate once over the tensors + assert ti.tensor.nbytes == ti.nbytes + ti.tensor.tofile(fout) + if shard_bar is not None: + shard_bar.update(ti.nbytes) + if bar is not None: + bar.update(ti.nbytes) + self.write_padding(fout, ti.nbytes) + ti.tensor = None + else: + self.temp_file.seek(0) + + shutil.copyfileobj(self.temp_file, self.fout[0 if not self.small_first_shard else 1]) + self.flush() + self.temp_file.close() + + self.state = WriterState.WEIGHTS + + def flush(self) -> None: + assert self.fout is not None + for fout in self.fout: + fout.flush() + + def close(self) -> None: + if self.fout is not None: + for fout in self.fout: + fout.close() + self.fout = None + + def add_type(self, type_name: str) -> None: + self.add_string(Keys.General.TYPE, type_name) + + def add_architecture(self) -> None: + self.add_string(Keys.General.ARCHITECTURE, self.arch) + + def add_quantization_version(self, quantization_version: int) -> None: + self.add_uint32(Keys.General.QUANTIZATION_VERSION, quantization_version) + + def add_custom_alignment(self, alignment: int) -> None: + self.data_alignment = alignment + self.add_uint32(Keys.General.ALIGNMENT, alignment) + + def add_file_type(self, ftype: int) -> None: + self.add_uint32(Keys.General.FILE_TYPE, ftype) + + def add_name(self, name: str) -> None: + self.add_string(Keys.General.NAME, name) + + def add_author(self, author: str) -> None: + self.add_string(Keys.General.AUTHOR, author) + + def add_version(self, version: str) -> None: + self.add_string(Keys.General.VERSION, version) + + def add_organization(self, organization: str) -> None: + self.add_string(Keys.General.ORGANIZATION, organization) + + def add_finetune(self, finetune: str) -> None: + self.add_string(Keys.General.FINETUNE, finetune) + + def add_basename(self, basename: str) -> None: + self.add_string(Keys.General.BASENAME, basename) + + def add_description(self, description: str) -> None: + self.add_string(Keys.General.DESCRIPTION, description) + + def add_quantized_by(self, quantized: str) -> None: + self.add_string(Keys.General.QUANTIZED_BY, quantized) + + def add_size_label(self, size_label: str) -> None: + self.add_string(Keys.General.SIZE_LABEL, size_label) + + def add_license(self, license: str) -> None: + self.add_string(Keys.General.LICENSE, license) + + def add_license_name(self, license: str) -> None: + self.add_string(Keys.General.LICENSE_NAME, license) + + def add_license_link(self, license: str) -> None: + self.add_string(Keys.General.LICENSE_LINK, license) + + def add_url(self, url: str) -> None: + self.add_string(Keys.General.URL, url) + + def add_doi(self, doi: str) -> None: + self.add_string(Keys.General.DOI, doi) + + def add_uuid(self, uuid: str) -> None: + self.add_string(Keys.General.UUID, uuid) + + def add_repo_url(self, repo_url: str) -> None: + self.add_string(Keys.General.REPO_URL, repo_url) + + def add_source_url(self, url: str) -> None: + self.add_string(Keys.General.SOURCE_URL, url) + + def add_source_doi(self, doi: str) -> None: + self.add_string(Keys.General.SOURCE_DOI, doi) + + def add_source_uuid(self, uuid: str) -> None: + self.add_string(Keys.General.SOURCE_UUID, uuid) + + def add_source_repo_url(self, repo_url: str) -> None: + self.add_string(Keys.General.SOURCE_REPO_URL, repo_url) + + def add_base_model_count(self, source_count: int) -> None: + self.add_uint32(Keys.General.BASE_MODEL_COUNT, source_count) + + def add_base_model_name(self, source_id: int, name: str) -> None: + self.add_string(Keys.General.BASE_MODEL_NAME.format(id=source_id), name) + + def add_base_model_author(self, source_id: int, author: str) -> None: + self.add_string(Keys.General.BASE_MODEL_AUTHOR.format(id=source_id), author) + + def add_base_model_version(self, source_id: int, version: str) -> None: + self.add_string(Keys.General.BASE_MODEL_VERSION.format(id=source_id), version) + + def add_base_model_organization(self, source_id: int, organization: str) -> None: + self.add_string(Keys.General.BASE_MODEL_ORGANIZATION.format(id=source_id), organization) + + def add_base_model_url(self, source_id: int, url: str) -> None: + self.add_string(Keys.General.BASE_MODEL_URL.format(id=source_id), url) + + def add_base_model_doi(self, source_id: int, doi: str) -> None: + self.add_string(Keys.General.BASE_MODEL_DOI.format(id=source_id), doi) + + def add_base_model_uuid(self, source_id: int, uuid: str) -> None: + self.add_string(Keys.General.BASE_MODEL_UUID.format(id=source_id), uuid) + + def add_base_model_repo_url(self, source_id: int, repo_url: str) -> None: + self.add_string(Keys.General.BASE_MODEL_REPO_URL.format(id=source_id), repo_url) + + def add_tags(self, tags: Sequence[str]) -> None: + self.add_array(Keys.General.TAGS, tags) + + def add_languages(self, languages: Sequence[str]) -> None: + self.add_array(Keys.General.LANGUAGES, languages) + + def add_datasets(self, datasets: Sequence[str]) -> None: + self.add_array(Keys.General.DATASETS, datasets) + + def add_tensor_data_layout(self, layout: str) -> None: + self.add_string(Keys.LLM.TENSOR_DATA_LAYOUT.format(arch=self.arch), layout) + + def add_vocab_size(self, size: int) -> None: + self.add_uint32(Keys.LLM.VOCAB_SIZE.format(arch=self.arch), size) + + def add_context_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.CONTEXT_LENGTH.format(arch=self.arch), length) + + def add_embedding_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.EMBEDDING_LENGTH.format(arch=self.arch), length) + + def add_block_count(self, length: int) -> None: + self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length) + + def add_leading_dense_block_count(self, length: int) -> None: + self.add_uint32(Keys.LLM.LEADING_DENSE_BLOCK_COUNT.format(arch=self.arch), length) + + def add_feed_forward_length(self, length: int | Sequence[int]) -> None: + if isinstance(length, int): + self.add_uint32(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length) + else: + self.add_array(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length) + + def add_expert_feed_forward_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_FEED_FORWARD_LENGTH.format(arch=self.arch), length) + + def add_expert_shared_feed_forward_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_SHARED_FEED_FORWARD_LENGTH.format(arch=self.arch), length) + + def add_parallel_residual(self, use: bool) -> None: + self.add_bool(Keys.LLM.USE_PARALLEL_RESIDUAL.format(arch=self.arch), use) + + def add_decoder_start_token_id(self, id: int) -> None: + self.add_uint32(Keys.LLM.DECODER_START_TOKEN_ID.format(arch=self.arch), id) + + def add_head_count(self, count: int | Sequence[int]) -> None: + if isinstance(count, int): + self.add_uint32(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count) + else: + self.add_array(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count) + + def add_head_count_kv(self, count: int | Sequence[int]) -> None: + if isinstance(count, int): + self.add_uint32(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count) + else: + self.add_array(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count) + + def add_key_length(self, length: int) -> None: + self.add_uint32(Keys.Attention.KEY_LENGTH.format(arch=self.arch), length) + + def add_value_length(self, length: int) -> None: + self.add_uint32(Keys.Attention.VALUE_LENGTH.format(arch=self.arch), length) + + def add_max_alibi_bias(self, bias: float) -> None: + self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias) + + def add_clamp_kqv(self, value: float) -> None: + self.add_float32(Keys.Attention.CLAMP_KQV.format(arch=self.arch), value) + + def add_logit_scale(self, value: float) -> None: + self.add_float32(Keys.LLM.LOGIT_SCALE.format(arch=self.arch), value) + + def add_attn_logit_softcapping(self, value: float) -> None: + self.add_float32(Keys.LLM.ATTN_LOGIT_SOFTCAPPING.format(arch=self.arch), value) + + def add_final_logit_softcapping(self, value: float) -> None: + self.add_float32(Keys.LLM.FINAL_LOGIT_SOFTCAPPING.format(arch=self.arch), value) + + def add_expert_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_COUNT.format(arch=self.arch), count) + + def add_expert_used_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_USED_COUNT.format(arch=self.arch), count) + + def add_expert_shared_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_SHARED_COUNT.format(arch=self.arch), count) + + def add_expert_weights_scale(self, value: float) -> None: + self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value) + + def add_layer_norm_eps(self, value: float) -> None: + self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value) + + def add_layer_norm_rms_eps(self, value: float) -> None: + self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value) + + def add_causal_attention(self, value: bool) -> None: + self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value) + + def add_q_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.Q_LORA_RANK.format(arch=self.arch), length) + + def add_kv_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.KV_LORA_RANK.format(arch=self.arch), length) + + def add_relative_attn_buckets_count(self, value: int) -> None: + self.add_uint32(Keys.Attention.REL_BUCKETS_COUNT.format(arch=self.arch), value) + + def add_sliding_window(self, value: int) -> None: + self.add_uint32(Keys.Attention.SLIDING_WINDOW.format(arch=self.arch), value) + + def add_pooling_type(self, value: PoolingType) -> None: + self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value) + + def add_rope_dimension_count(self, count: int) -> None: + self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count) + + def add_rope_freq_base(self, value: float) -> None: + self.add_float32(Keys.Rope.FREQ_BASE.format(arch=self.arch), value) + + def add_rope_scaling_type(self, value: RopeScalingType) -> None: + self.add_string(Keys.Rope.SCALING_TYPE.format(arch=self.arch), value.value) + + def add_rope_scaling_factor(self, value: float) -> None: + self.add_float32(Keys.Rope.SCALING_FACTOR.format(arch=self.arch), value) + + def add_rope_scaling_attn_factors(self, value: float) -> None: + self.add_float32(Keys.Rope.SCALING_ATTN_FACTOR.format(arch=self.arch), value) + + def add_rope_scaling_orig_ctx_len(self, value: int) -> None: + self.add_uint32(Keys.Rope.SCALING_ORIG_CTX_LEN.format(arch=self.arch), value) + + def add_rope_scaling_finetuned(self, value: bool) -> None: + self.add_bool(Keys.Rope.SCALING_FINETUNED.format(arch=self.arch), value) + + def add_rope_scaling_yarn_log_mul(self, value: float) -> None: + self.add_float32(Keys.Rope.SCALING_YARN_LOG_MUL.format(arch=self.arch), value) + + def add_ssm_conv_kernel(self, value: int) -> None: + self.add_uint32(Keys.SSM.CONV_KERNEL.format(arch=self.arch), value) + + def add_ssm_inner_size(self, value: int) -> None: + self.add_uint32(Keys.SSM.INNER_SIZE.format(arch=self.arch), value) + + def add_ssm_state_size(self, value: int) -> None: + self.add_uint32(Keys.SSM.STATE_SIZE.format(arch=self.arch), value) + + def add_ssm_time_step_rank(self, value: int) -> None: + self.add_uint32(Keys.SSM.TIME_STEP_RANK.format(arch=self.arch), value) + + def add_ssm_dt_b_c_rms(self, value: bool) -> None: + self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value) + + def add_tokenizer_model(self, model: str) -> None: + self.add_string(Keys.Tokenizer.MODEL, model) + + def add_tokenizer_pre(self, pre: str) -> None: + self.add_string(Keys.Tokenizer.PRE, pre) + + def add_token_list(self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None: + self.add_array(Keys.Tokenizer.LIST, tokens) + + def add_token_merges(self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None: + self.add_array(Keys.Tokenizer.MERGES, merges) + + def add_token_types(self, types: Sequence[TokenType] | Sequence[int]) -> None: + self.add_array(Keys.Tokenizer.TOKEN_TYPE, types) + + def add_token_type_count(self, value: int) -> None: + self.add_uint32(Keys.Tokenizer.TOKEN_TYPE_COUNT, value) + + def add_token_scores(self, scores: Sequence[float]) -> None: + self.add_array(Keys.Tokenizer.SCORES, scores) + + def add_bos_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.BOS_ID, id) + + def add_eos_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.EOS_ID, id) + + def add_unk_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.UNK_ID, id) + + def add_sep_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.SEP_ID, id) + + def add_pad_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.PAD_ID, id) + + def add_cls_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.CLS_ID, id) + + def add_mask_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.MASK_ID, id) + + def add_add_bos_token(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_BOS, value) + + def add_add_eos_token(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_EOS, value) + + def add_add_space_prefix(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_PREFIX, value) + + def add_remove_extra_whitespaces(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.REMOVE_EXTRA_WS, value) + + def add_precompiled_charsmap(self, charsmap: Sequence[bytes]) -> None: + self.add_array(Keys.Tokenizer.PRECOMPILED_CHARSMAP, charsmap) + + def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None: + if not isinstance(value, str): + template_default = None + template_names = set() + + for choice in value: + name = choice.get('name', '') + template = choice.get('template') + + # Allowing non-alphanumerical characters in template name is probably not a good idea, so filter it + name = ''.join((c if c in ascii_letters + digits else '_' for c in name)) + + if name and template is not None: + if name == 'default': + template_default = template + else: + template_names.add(name) + self.add_string(Keys.Tokenizer.CHAT_TEMPLATE_N.format(name=name), template) + + if template_names: + self.add_array(Keys.Tokenizer.CHAT_TEMPLATES, list(template_names)) + + if template_default is None: + return + + value = template_default + + self.add_string(Keys.Tokenizer.CHAT_TEMPLATE, value) + + def add_prefix_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.PREFIX_ID, id) + + def add_suffix_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.SUFFIX_ID, id) + + def add_middle_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.MIDDLE_ID, id) + + def add_eot_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.EOT_ID, id) + + def add_eom_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.EOM_ID, id) + + def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes: + pack_prefix = '' + if not skip_pack_prefix: + pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>' + return struct.pack(f'{pack_prefix}{fmt}', value) + + def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes: + kv_data = bytearray() + + if add_vtype: + kv_data += self._pack("I", vtype) + + pack_fmt = self._simple_value_packing.get(vtype) + if pack_fmt is not None: + kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL) + elif vtype == GGUFValueType.STRING: + encoded_val = val.encode("utf-8") if isinstance(val, str) else val + kv_data += self._pack("Q", len(encoded_val)) + kv_data += encoded_val + elif vtype == GGUFValueType.ARRAY: + + if not isinstance(val, Sequence): + raise ValueError("Invalid GGUF metadata array, expecting sequence") + + if len(val) == 0: + raise ValueError("Invalid GGUF metadata array. Empty array") + + if isinstance(val, bytes): + ltype = GGUFValueType.UINT8 + else: + ltype = GGUFValueType.get_type(val[0]) + if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]): + raise ValueError("All items in a GGUF array should be of the same type") + kv_data += self._pack("I", ltype) + kv_data += self._pack("Q", len(val)) + for item in val: + kv_data += self._pack_val(item, ltype, add_vtype=False) + else: + raise ValueError("Invalid GGUF metadata value type or value") + + return kv_data + + @staticmethod + def format_n_bytes_to_str(num: int) -> str: + if num == 0: + return "negligible - metadata only" + fnum = float(num) + for unit in ("", "K", "M", "G"): + if abs(fnum) < 1000.0: + return f"{fnum:3.1f}{unit}" + fnum /= 1000.0 + return f"{fnum:.1f}T - over 1TB, split recommended" diff --git a/.venv/lib/python3.11/site-packages/gguf/lazy.py b/.venv/lib/python3.11/site-packages/gguf/lazy.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4fece2dca86983286a3c0de15ca86578ce4dfa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/lazy.py @@ -0,0 +1,213 @@ +from __future__ import annotations +from abc import ABC, ABCMeta, abstractmethod + +import logging +from typing import Any, Callable + +import numpy as np +from numpy.typing import DTypeLike + + +logger = logging.getLogger(__name__) + + +class LazyMeta(ABCMeta): + + def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs): + def __getattr__(self, name: str) -> Any: + meta_attr = getattr(self._meta, name) + if callable(meta_attr): + return type(self)._wrap_fn( + (lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)), + use_self=self, + ) + elif isinstance(meta_attr, self._tensor_type): + # e.g. self.T with torch.Tensor should still be wrapped + return type(self)._wrap_fn(lambda s: getattr(s, name))(self) + else: + # no need to wrap non-tensor properties, + # and they likely don't depend on the actual contents of the tensor + return meta_attr + + namespace["__getattr__"] = __getattr__ + + # need to make a builder for the wrapped wrapper to copy the name, + # or else it fails with very cryptic error messages, + # because somehow the same string would end up in every closures + def mk_wrap(op_name: str, *, meta_noop: bool = False): + # need to wrap the wrapper to get self + def wrapped_special_op(self, *args, **kwargs): + return type(self)._wrap_fn( + getattr(type(self)._tensor_type, op_name), + meta_noop=meta_noop, + )(self, *args, **kwargs) + return wrapped_special_op + + # special methods bypass __getattr__, so they need to be added manually + # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup + # NOTE: doing this from a metaclass is very convenient + # TODO: make this even more comprehensive + for binary_op in ( + "lt", "le", "eq", "ne", "ge", "gt", "not" + "abs", "add", "and", "floordiv", "invert", "lshift", "mod", "mul", "matmul", + "neg", "or", "pos", "pow", "rshift", "sub", "truediv", "xor", + "iadd", "iand", "ifloordiv", "ilshift", "imod", "imul", "ior", "irshift", "isub", "ixor", + "radd", "rand", "rfloordiv", "rmul", "ror", "rpow", "rsub", "rtruediv", "rxor", + ): + attr_name = f"__{binary_op}__" + # the result of these operators usually has the same shape and dtype as the input, + # so evaluation on the meta tensor can be skipped. + namespace[attr_name] = mk_wrap(attr_name, meta_noop=True) + + for special_op in ( + "getitem", "setitem", "len", + ): + attr_name = f"__{special_op}__" + namespace[attr_name] = mk_wrap(attr_name, meta_noop=False) + + return super().__new__(cls, name, bases, namespace, **kwargs) + + +# Tree of lazy tensors +class LazyBase(ABC, metaclass=LazyMeta): + _tensor_type: type + _meta: Any + _data: Any | None + _args: tuple + _kwargs: dict[str, Any] + _func: Callable[[Any], Any] | None + + def __init__(self, *, meta: Any, data: Any | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, func: Callable[[Any], Any] | None = None): + super().__init__() + self._meta = meta + self._data = data + self._args = args + self._kwargs = kwargs if kwargs is not None else {} + self._func = func + assert self._func is not None or self._data is not None + + def __init_subclass__(cls) -> None: + if "_tensor_type" not in cls.__dict__: + raise TypeError(f"property '_tensor_type' must be defined for {cls!r}") + return super().__init_subclass__() + + @staticmethod + def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any: + # TODO: dict and set + if isinstance(o, (list, tuple)): + L = [] + for item in o: + L.append(LazyBase._recurse_apply(item, fn)) + if isinstance(o, tuple): + L = tuple(L) + return L + elif isinstance(o, LazyBase): + return fn(o) + else: + return o + + @classmethod + def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False) -> Callable[[Any], Any]: + def wrapped_fn(*args, **kwargs): + if kwargs is None: + kwargs = {} + args = ((use_self,) if use_self is not None else ()) + args + + meta_args = LazyBase._recurse_apply(args, lambda t: t._meta) + # TODO: maybe handle tensors in kwargs too + + if isinstance(meta_noop, bool) and not meta_noop: + try: + res = fn(*meta_args, **kwargs) + except NotImplementedError: + # running some operations on PyTorch's Meta tensors can cause this exception + res = None + else: + # some operators don't need to actually run on the meta tensors + assert len(args) > 0 + res = args[0] + assert isinstance(res, cls) + res = res._meta + # allow operations to override the dtype and shape + if meta_noop is not True: + if isinstance(meta_noop, tuple): + dtype, shape = meta_noop + assert callable(shape) + res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape)) + else: + res = cls.meta_with_dtype_and_shape(meta_noop, res.shape) + + if isinstance(res, cls._tensor_type): + return cls(meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn) + else: + del res # not needed + # non-tensor return likely relies on the contents of the args + # (e.g. the result of torch.equal) + eager_args = cls.to_eager(args) + return fn(*eager_args, **kwargs) + return wrapped_fn + + @classmethod + def to_eager(cls, t: Any) -> Any: + def simple_to_eager(_t: LazyBase) -> Any: + if _t._data is not None: + return _t._data + + # NOTE: there's a recursion limit in Python (usually 1000) + + assert _t._func is not None + _t._args = cls._recurse_apply(_t._args, simple_to_eager) + _t._data = _t._func(*_t._args, **_t._kwargs) + # sanity check + assert _t._data is not None + assert _t._data.dtype == _t._meta.dtype + assert _t._data.shape == _t._meta.shape + + return _t._data + + # recurse into lists and/or tuples, keeping their structure + return cls._recurse_apply(t, simple_to_eager) + + @classmethod + def eager_to_meta(cls, t: Any) -> Any: + return cls.meta_with_dtype_and_shape(t.dtype, t.shape) + + # must be overridden, meta tensor init is backend-specific + @classmethod + @abstractmethod + def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any: pass + + @classmethod + def from_eager(cls, t: Any) -> Any: + if type(t) is cls: + # already lazy + return t + elif isinstance(t, cls._tensor_type): + return cls(meta=cls.eager_to_meta(t), data=t) + else: + return TypeError(f"{type(t)!r} is not compatible with {cls._tensor_type!r}") + + +class LazyNumpyTensor(LazyBase): + _tensor_type = np.ndarray + + shape: tuple[int, ...] # Makes the type checker happy in quants.py + + @classmethod + def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> np.ndarray[Any, Any]: + # The initial idea was to use np.nan as the fill value, + # but non-float types like np.int16 can't use that. + # So zero it is. + cheat = np.zeros(1, dtype) + return np.lib.stride_tricks.as_strided(cheat, shape, (0 for _ in shape)) + + def astype(self, dtype, *args, **kwargs): + meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape) + full_args = (self, dtype,) + args + return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs))) + + def tofile(self, *args, **kwargs): + eager = LazyNumpyTensor.to_eager(self) + return eager.tofile(*args, **kwargs) + + # TODO: __array_function__ diff --git a/.venv/lib/python3.11/site-packages/gguf/metadata.py b/.venv/lib/python3.11/site-packages/gguf/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..db318542a279b606e95ff51c82fd77615fce30b8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/metadata.py @@ -0,0 +1,510 @@ +from __future__ import annotations + +import re +import json +import yaml +import logging +from pathlib import Path +from typing import Any, Literal, Optional +from dataclasses import dataclass + +from .constants import Keys + +import gguf + +logger = logging.getLogger("metadata") + + +@dataclass +class Metadata: + # Authorship Metadata to be written to GGUF KV Store + name: Optional[str] = None + author: Optional[str] = None + version: Optional[str] = None + organization: Optional[str] = None + finetune: Optional[str] = None + basename: Optional[str] = None + description: Optional[str] = None + quantized_by: Optional[str] = None + size_label: Optional[str] = None + url: Optional[str] = None + doi: Optional[str] = None + uuid: Optional[str] = None + repo_url: Optional[str] = None + source_url: Optional[str] = None + source_doi: Optional[str] = None + source_uuid: Optional[str] = None + source_repo_url: Optional[str] = None + license: Optional[str] = None + license_name: Optional[str] = None + license_link: Optional[str] = None + base_models: Optional[list[dict]] = None + tags: Optional[list[str]] = None + languages: Optional[list[str]] = None + datasets: Optional[list[str]] = None + + @staticmethod + def load(metadata_override_path: Optional[Path] = None, model_path: Optional[Path] = None, model_name: Optional[str] = None, total_params: int = 0) -> Metadata: + # This grabs as many contextual authorship metadata as possible from the model repository + # making any conversion as required to match the gguf kv store metadata format + # as well as giving users the ability to override any authorship metadata that may be incorrect + + # Create a new Metadata instance + metadata = Metadata() + + model_card = Metadata.load_model_card(model_path) + hf_params = Metadata.load_hf_parameters(model_path) + # TODO: load adapter_config.json when possible, it usually contains the base model of the LoRA adapter + + # heuristics + metadata = Metadata.apply_metadata_heuristic(metadata, model_card, hf_params, model_path, total_params) + + # Metadata Override File Provided + # This is based on LLM_KV_NAMES mapping in llama.cpp + metadata_override = Metadata.load_metadata_override(metadata_override_path) + + metadata.name = metadata_override.get(Keys.General.NAME, metadata.name) + metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author) + metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version) + metadata.organization = metadata_override.get(Keys.General.ORGANIZATION, metadata.organization) + + metadata.finetune = metadata_override.get(Keys.General.FINETUNE, metadata.finetune) + metadata.basename = metadata_override.get(Keys.General.BASENAME, metadata.basename) + + metadata.description = metadata_override.get(Keys.General.DESCRIPTION, metadata.description) + metadata.quantized_by = metadata_override.get(Keys.General.QUANTIZED_BY, metadata.quantized_by) + + metadata.size_label = metadata_override.get(Keys.General.SIZE_LABEL, metadata.size_label) + metadata.license_name = metadata_override.get(Keys.General.LICENSE_NAME, metadata.license_name) + metadata.license_link = metadata_override.get(Keys.General.LICENSE_LINK, metadata.license_link) + + metadata.url = metadata_override.get(Keys.General.URL, metadata.url) + metadata.doi = metadata_override.get(Keys.General.DOI, metadata.doi) + metadata.uuid = metadata_override.get(Keys.General.UUID, metadata.uuid) + metadata.repo_url = metadata_override.get(Keys.General.REPO_URL, metadata.repo_url) + + metadata.source_url = metadata_override.get(Keys.General.SOURCE_URL, metadata.source_url) + metadata.source_doi = metadata_override.get(Keys.General.SOURCE_DOI, metadata.source_doi) + metadata.source_uuid = metadata_override.get(Keys.General.SOURCE_UUID, metadata.source_uuid) + metadata.source_repo_url = metadata_override.get(Keys.General.SOURCE_REPO_URL, metadata.source_repo_url) + + # Base Models is received here as an array of models + metadata.base_models = metadata_override.get("general.base_models", metadata.base_models) + + metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags) + metadata.languages = metadata_override.get(Keys.General.LANGUAGES, metadata.languages) + metadata.datasets = metadata_override.get(Keys.General.DATASETS, metadata.datasets) + + # Direct Metadata Override (via direct cli argument) + if model_name is not None: + metadata.name = model_name + + return metadata + + @staticmethod + def load_metadata_override(metadata_override_path: Optional[Path] = None) -> dict[str, Any]: + if metadata_override_path is None or not metadata_override_path.is_file(): + return {} + + with open(metadata_override_path, "r", encoding="utf-8") as f: + return json.load(f) + + @staticmethod + def load_model_card(model_path: Optional[Path] = None) -> dict[str, Any]: + if model_path is None or not model_path.is_dir(): + return {} + + model_card_path = model_path / "README.md" + + if not model_card_path.is_file(): + return {} + + # The model card metadata is assumed to always be in YAML + # ref: https://github.com/huggingface/transformers/blob/a5c642fe7a1f25d3bdcd76991443ba6ff7ee34b2/src/transformers/modelcard.py#L468-L473 + with open(model_card_path, "r", encoding="utf-8") as f: + if f.readline() == "---\n": + raw = f.read().partition("---\n")[0] + data = yaml.safe_load(raw) + if isinstance(data, dict): + return data + else: + logger.error(f"while reading YAML model card frontmatter, data is {type(data)} instead of dict") + return {} + else: + return {} + + @staticmethod + def load_hf_parameters(model_path: Optional[Path] = None) -> dict[str, Any]: + if model_path is None or not model_path.is_dir(): + return {} + + config_path = model_path / "config.json" + + if not config_path.is_file(): + return {} + + with open(config_path, "r", encoding="utf-8") as f: + return json.load(f) + + @staticmethod + def id_to_title(string): + # Convert capitalization into title form unless acronym or version number + return ' '.join([w.title() if w.islower() and not re.match(r'^(v\d+(?:\.\d+)*|\d.*)$', w) else w for w in string.strip().replace('-', ' ').split()]) + + @staticmethod + def get_model_id_components(model_id: Optional[str] = None, total_params: int = 0) -> tuple[str | None, str | None, str | None, str | None, str | None, str | None]: + # Huggingface often store model id as '/' + # so let's parse it and apply some heuristics if possible for model name components + + if model_id is None: + # model ID missing + return None, None, None, None, None, None + + if ' ' in model_id: + # model ID is actually a normal human sentence + # which means its most likely a normal model name only + # not part of the hugging face naming standard, but whatever + return model_id, None, None, None, None, None + + if '/' in model_id: + # model ID (huggingface style) + org_component, model_full_name_component = model_id.split('/', 1) + else: + # model ID but missing org components + org_component, model_full_name_component = None, model_id + + # Check if we erroneously matched against './' or '../' etc... + if org_component is not None and len(org_component) > 0 and org_component[0] == '.': + org_component = None + + name_parts: list[str] = model_full_name_component.split('-') + + # Remove empty parts + for i in reversed(range(len(name_parts))): + if len(name_parts[i]) == 0: + del name_parts[i] + + name_types: list[ + set[Literal["basename", "size_label", "finetune", "version", "type"]] + ] = [set() for _ in name_parts] + + # Annotate the name + for i, part in enumerate(name_parts): + # Version + if re.fullmatch(r'(v|iter)?\d+([.]\d+)*', part, re.IGNORECASE): + name_types[i].add("version") + # Quant type (should not be there for base models, but still annotated) + elif re.fullmatch(r'i?q\d(_\w)*|b?fp?(16|32)', part, re.IGNORECASE): + name_types[i].add("type") + name_parts[i] = part.upper() + # Model size + elif i > 0 and re.fullmatch(r'(([A]|\d+[x])?\d+([._]\d+)?[KMBT][\d]?|small|mini|medium|large|x?xl)', part, re.IGNORECASE): + part = part.replace("_", ".") + # Handle weird bloom-7b1 notation + if part[-1].isdecimal(): + part = part[:-2] + "." + part[-1] + part[-2] + # Normalize the size suffixes + if len(part) > 1 and part[-2].isdecimal(): + if part[-1] in "kmbt": + part = part[:-1] + part[-1].upper() + if total_params != 0: + try: + label_params = float(part[:-1]) * pow(1000, " KMBT".find(part[-1])) + # Only use it as a size label if it's close or bigger than the model size + # Note that LoRA adapters don't necessarily include all layers, + # so this is why bigger label sizes are accepted. + # Do not use the size label when it's smaller than 1/8 of the model size + if (total_params < 0 and label_params < abs(total_params) // 8) or ( + # Check both directions when the current model isn't a LoRA adapter + total_params > 0 and abs(label_params - total_params) > 7 * total_params // 8 + ): + # Likely a context length + name_types[i].add("finetune") + # Lowercase the size when it's a context length + part = part[:-1] + part[-1].lower() + except ValueError: + # Failed to convert the size label to float, use it anyway + pass + if len(name_types[i]) == 0: + name_types[i].add("size_label") + name_parts[i] = part + # Some easy to recognize finetune names + elif i > 0 and re.fullmatch(r'chat|instruct|vision|lora', part, re.IGNORECASE): + if total_params < 0 and part.lower() == "lora": + # ignore redundant "lora" in the finetune part when the output is a lora adapter + name_types[i].add("type") + else: + name_types[i].add("finetune") + + # Ignore word-based size labels when there is at least a number-based one present + # TODO: should word-based size labels always be removed instead? + if any(c.isdecimal() for n, t in zip(name_parts, name_types) if "size_label" in t for c in n): + for n, t in zip(name_parts, name_types): + if "size_label" in t: + if all(c.isalpha() for c in n): + t.remove("size_label") + + at_start = True + # Find the basename through the annotated name + for part, t in zip(name_parts, name_types): + if at_start and ((len(t) == 0 and part[0].isalpha()) or "version" in t): + t.add("basename") + else: + if at_start: + at_start = False + if len(t) == 0: + t.add("finetune") + + # Remove the basename annotation from trailing version + for part, t in zip(reversed(name_parts), reversed(name_types)): + if "basename" in t and len(t) > 1: + t.remove("basename") + else: + break + + basename = "-".join(n for n, t in zip(name_parts, name_types) if "basename" in t) or None + # Deduplicate size labels using order-preserving 'dict' ('set' seems to sort the keys) + size_label = "-".join(dict.fromkeys(s for s, t in zip(name_parts, name_types) if "size_label" in t).keys()) or None + finetune = "-".join(f for f, t in zip(name_parts, name_types) if "finetune" in t) or None + # TODO: should the basename version always be excluded? + # NOTE: multiple finetune versions are joined together + version = "-".join(v for v, t, in zip(name_parts, name_types) if "version" in t and "basename" not in t) or None + + if size_label is None and finetune is None and version is None: + # Too ambiguous, output nothing + basename = None + + return model_full_name_component, org_component, basename, finetune, version, size_label + + @staticmethod + def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = None, hf_params: Optional[dict] = None, model_path: Optional[Path] = None, total_params: int = 0) -> Metadata: + # Reference Model Card Metadata: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 + + # Model Card Heuristics + ######################## + if model_card is not None: + + def use_model_card_metadata(metadata_key: str, model_card_key: str): + if model_card_key in model_card and getattr(metadata, metadata_key, None) is None: + setattr(metadata, metadata_key, model_card.get(model_card_key)) + + def use_array_model_card_metadata(metadata_key: str, model_card_key: str): + # Note: Will append rather than replace if already exist + tags_value = model_card.get(model_card_key, None) + if tags_value is None: + return + + current_value = getattr(metadata, metadata_key, None) + if current_value is None: + current_value = [] + + if isinstance(tags_value, str): + current_value.append(tags_value) + elif isinstance(tags_value, list): + current_value.extend(tags_value) + + setattr(metadata, metadata_key, current_value) + + # LLAMA.cpp's direct internal convention + # (Definitely not part of hugging face formal/informal standard) + ######################################### + use_model_card_metadata("name", "name") + use_model_card_metadata("author", "author") + use_model_card_metadata("version", "version") + use_model_card_metadata("organization", "organization") + use_model_card_metadata("description", "description") + use_model_card_metadata("finetune", "finetune") + use_model_card_metadata("basename", "basename") + use_model_card_metadata("size_label", "size_label") + use_model_card_metadata("source_url", "url") + use_model_card_metadata("source_doi", "doi") + use_model_card_metadata("source_uuid", "uuid") + use_model_card_metadata("source_repo_url", "repo_url") + + # LLAMA.cpp's huggingface style convention + # (Definitely not part of hugging face formal/informal standard... but with model_ appended to match their style) + ########################################### + use_model_card_metadata("name", "model_name") + use_model_card_metadata("author", "model_author") + use_model_card_metadata("version", "model_version") + use_model_card_metadata("organization", "model_organization") + use_model_card_metadata("description", "model_description") + use_model_card_metadata("finetune", "model_finetune") + use_model_card_metadata("basename", "model_basename") + use_model_card_metadata("size_label", "model_size_label") + use_model_card_metadata("source_url", "model_url") + use_model_card_metadata("source_doi", "model_doi") + use_model_card_metadata("source_uuid", "model_uuid") + use_model_card_metadata("source_repo_url", "model_repo_url") + + # Hugging Face Direct Convention + ################################# + + # Not part of huggingface model card standard but notice some model creator using it + # such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF' + use_model_card_metadata("name", "model_name") + use_model_card_metadata("author", "model_creator") + use_model_card_metadata("basename", "model_type") + + if "base_model" in model_card: + # This represents the parent models that this is based on + # Example: stabilityai/stable-diffusion-xl-base-1.0. Can also be a list (for merges) + # Example of merges: https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0.1/blob/main/README.md + metadata_base_models = [] + base_model_value = model_card.get("base_model", None) + + if base_model_value is not None: + if isinstance(base_model_value, str): + metadata_base_models.append(base_model_value) + elif isinstance(base_model_value, list): + metadata_base_models.extend(base_model_value) + + if metadata.base_models is None: + metadata.base_models = [] + + for model_id in metadata_base_models: + # NOTE: model size of base model is assumed to be similar to the size of the current model + model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params) + base_model = {} + if model_full_name_component is not None: + base_model["name"] = Metadata.id_to_title(model_full_name_component) + if org_component is not None: + base_model["organization"] = Metadata.id_to_title(org_component) + if version is not None: + base_model["version"] = version + if org_component is not None and model_full_name_component is not None: + base_model["repo_url"] = f"https://huggingface.co/{org_component}/{model_full_name_component}" + metadata.base_models.append(base_model) + + use_model_card_metadata("license", "license") + use_model_card_metadata("license_name", "license_name") + use_model_card_metadata("license_link", "license_link") + + use_array_model_card_metadata("tags", "tags") + use_array_model_card_metadata("tags", "pipeline_tag") + + use_array_model_card_metadata("languages", "languages") + use_array_model_card_metadata("languages", "language") + + use_array_model_card_metadata("datasets", "datasets") + use_array_model_card_metadata("datasets", "dataset") + + # Hugging Face Parameter Heuristics + #################################### + + if hf_params is not None: + + hf_name_or_path = hf_params.get("_name_or_path") + if hf_name_or_path is not None and hf_name_or_path.count('/') <= 1: + # Use _name_or_path only if its actually a model name and not some computer path + # e.g. 'meta-llama/Llama-2-7b-hf' + model_id = hf_name_or_path + model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params) + if metadata.name is None and model_full_name_component is not None: + metadata.name = Metadata.id_to_title(model_full_name_component) + if metadata.organization is None and org_component is not None: + metadata.organization = Metadata.id_to_title(org_component) + if metadata.basename is None and basename is not None: + metadata.basename = basename + if metadata.finetune is None and finetune is not None: + metadata.finetune = finetune + if metadata.version is None and version is not None: + metadata.version = version + if metadata.size_label is None and size_label is not None: + metadata.size_label = size_label + + # Directory Folder Name Fallback Heuristics + ############################################ + if model_path is not None: + model_id = model_path.name + model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params) + if metadata.name is None and model_full_name_component is not None: + metadata.name = Metadata.id_to_title(model_full_name_component) + if metadata.organization is None and org_component is not None: + metadata.organization = Metadata.id_to_title(org_component) + if metadata.basename is None and basename is not None: + metadata.basename = basename + if metadata.finetune is None and finetune is not None: + metadata.finetune = finetune + if metadata.version is None and version is not None: + metadata.version = version + if metadata.size_label is None and size_label is not None: + metadata.size_label = size_label + + return metadata + + def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter): + assert self.name is not None + gguf_writer.add_name(self.name) + + if self.author is not None: + gguf_writer.add_author(self.author) + if self.version is not None: + gguf_writer.add_version(self.version) + if self.organization is not None: + gguf_writer.add_organization(self.organization) + + if self.finetune is not None: + gguf_writer.add_finetune(self.finetune) + if self.basename is not None: + gguf_writer.add_basename(self.basename) + + if self.description is not None: + gguf_writer.add_description(self.description) + if self.quantized_by is not None: + gguf_writer.add_quantized_by(self.quantized_by) + + if self.size_label is not None: + gguf_writer.add_size_label(self.size_label) + + if self.license is not None: + gguf_writer.add_license(self.license) + if self.license_name is not None: + gguf_writer.add_license_name(self.license_name) + if self.license_link is not None: + gguf_writer.add_license_link(self.license_link) + + if self.url is not None: + gguf_writer.add_url(self.url) + if self.doi is not None: + gguf_writer.add_doi(self.doi) + if self.uuid is not None: + gguf_writer.add_uuid(self.uuid) + if self.repo_url is not None: + gguf_writer.add_repo_url(self.repo_url) + + if self.source_url is not None: + gguf_writer.add_source_url(self.source_url) + if self.source_doi is not None: + gguf_writer.add_source_doi(self.source_doi) + if self.source_uuid is not None: + gguf_writer.add_source_uuid(self.source_uuid) + if self.source_repo_url is not None: + gguf_writer.add_source_repo_url(self.source_repo_url) + + if self.base_models is not None: + gguf_writer.add_base_model_count(len(self.base_models)) + for key, base_model_entry in enumerate(self.base_models): + if "name" in base_model_entry: + gguf_writer.add_base_model_name(key, base_model_entry["name"]) + if "author" in base_model_entry: + gguf_writer.add_base_model_author(key, base_model_entry["author"]) + if "version" in base_model_entry: + gguf_writer.add_base_model_version(key, base_model_entry["version"]) + if "organization" in base_model_entry: + gguf_writer.add_base_model_organization(key, base_model_entry["organization"]) + if "url" in base_model_entry: + gguf_writer.add_base_model_url(key, base_model_entry["url"]) + if "doi" in base_model_entry: + gguf_writer.add_base_model_doi(key, base_model_entry["doi"]) + if "uuid" in base_model_entry: + gguf_writer.add_base_model_uuid(key, base_model_entry["uuid"]) + if "repo_url" in base_model_entry: + gguf_writer.add_base_model_repo_url(key, base_model_entry["repo_url"]) + + if self.tags is not None: + gguf_writer.add_tags(self.tags) + if self.languages is not None: + gguf_writer.add_languages(self.languages) + if self.datasets is not None: + gguf_writer.add_datasets(self.datasets) diff --git a/.venv/lib/python3.11/site-packages/gguf/py.typed b/.venv/lib/python3.11/site-packages/gguf/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.11/site-packages/gguf/quants.py b/.venv/lib/python3.11/site-packages/gguf/quants.py new file mode 100644 index 0000000000000000000000000000000000000000..ff589b85245e5f4a3dfda72aae6402b523459dea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/quants.py @@ -0,0 +1,1188 @@ +from __future__ import annotations +from abc import ABC, abstractmethod +from typing import Any, Callable, Sequence +from math import log2, ceil + +from numpy.typing import DTypeLike + +from .constants import GGML_QUANT_SIZES, GGMLQuantizationType, QK_K +from .lazy import LazyNumpyTensor + +import numpy as np + + +def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType) -> tuple[int, ...]: + block_size, type_size = GGML_QUANT_SIZES[quant_type] + if shape[-1] % block_size != 0: + raise ValueError(f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})") + return (*shape[:-1], shape[-1] // block_size * type_size) + + +def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType) -> tuple[int, ...]: + block_size, type_size = GGML_QUANT_SIZES[quant_type] + if shape[-1] % type_size != 0: + raise ValueError(f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})") + return (*shape[:-1], shape[-1] // type_size * block_size) + + +# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time +def _apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.ndarray, otype: DTypeLike, oshape: tuple[int, ...]) -> np.ndarray: + rows = arr.reshape((-1, arr.shape[-1])) + osize = 1 + for dim in oshape: + osize *= dim + out = np.empty(shape=osize, dtype=otype) + # compute over groups of 16 rows (arbitrary, but seems good for performance) + n_groups = (rows.shape[0] // 16) or 1 + np.concatenate([func(group).ravel() for group in np.array_split(rows, n_groups)], axis=0, out=out) + return out.reshape(oshape) + + +# round away from zero +# ref: https://stackoverflow.com/a/59143326/22827863 +def np_roundf(n: np.ndarray) -> np.ndarray: + a = abs(n) + floored = np.floor(a) + b = floored + np.floor(2 * (a - floored)) + return np.sign(n) * b + + +class QuantError(Exception): ... + + +_type_traits: dict[GGMLQuantizationType, type[__Quant]] = {} + + +def quantize(data: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray: + if qtype == GGMLQuantizationType.F32: + return data.astype(np.float32, copy=False) + elif qtype == GGMLQuantizationType.F16: + return data.astype(np.float16, copy=False) + elif (q := _type_traits.get(qtype)) is not None: + return q.quantize(data) + else: + raise NotImplementedError(f"Quantization for {qtype.name} is not yet implemented") + + +def dequantize(data: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray: + if qtype == GGMLQuantizationType.F32: + return data.view(np.float32) + elif qtype == GGMLQuantizationType.F16: + return data.view(np.float16).astype(np.float32) + elif (q := _type_traits.get(qtype)) is not None: + return q.dequantize(data) + else: + raise NotImplementedError(f"Dequantization for {qtype.name} is not yet implemented") + + +class __Quant(ABC): + qtype: GGMLQuantizationType + block_size: int + type_size: int + + grid: np.ndarray[Any, np.dtype[np.float32]] | None = None + grid_shape: tuple[int, int] = (0, 0) + grid_map: tuple[int | float, ...] = () + grid_hex: bytes | None = None + + def __init__(self): + return TypeError("Quant conversion classes can't have instances") + + def __init_subclass__(cls, qtype: GGMLQuantizationType) -> None: + cls.qtype = qtype + cls.block_size, cls.type_size = GGML_QUANT_SIZES[qtype] + cls.__quantize_lazy = LazyNumpyTensor._wrap_fn( + cls.__quantize_array, + meta_noop=(np.uint8, cls.__shape_to_bytes) + ) + cls.__dequantize_lazy = LazyNumpyTensor._wrap_fn( + cls.__dequantize_array, + meta_noop=(np.float32, cls.__shape_from_bytes) + ) + assert qtype not in _type_traits + _type_traits[qtype] = cls + + @classmethod + def init_grid(cls): + if cls.grid is not None or cls.grid_hex is None: + return + + bits_per_elem = ceil(log2(len(cls.grid_map))) + assert bits_per_elem != 0, cls.qtype.name + elems_per_byte = 8 // bits_per_elem + + grid = np.frombuffer(cls.grid_hex, dtype=np.uint8) + # decode hexadecimal chars from grid + grid = grid.reshape((-1, 2)) + grid = (np.where(grid > 0x40, grid + 9, grid) & 0x0F) << np.array([4, 0], dtype=np.uint8).reshape((1, 2)) + grid = grid[..., 0] | grid[..., 1] + # unpack the grid values + grid = grid.reshape((-1, 1)) >> np.array([i for i in range(0, 8, 8 // elems_per_byte)], dtype=np.uint8).reshape((1, elems_per_byte)) + grid = (grid & ((1 << bits_per_elem) - 1)).reshape((-1, 1)) + grid_map = np.array(cls.grid_map, dtype=np.float32).reshape((1, -1)) + grid = np.take_along_axis(grid_map, grid, axis=-1) + cls.grid = grid.reshape((1, 1, *cls.grid_shape)) + + @classmethod + @abstractmethod + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + raise NotImplementedError + + @classmethod + @abstractmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + raise NotImplementedError + + @classmethod + def quantize_rows(cls, rows: np.ndarray) -> np.ndarray: + rows = rows.astype(np.float32, copy=False) + shape = rows.shape + n_blocks = rows.size // cls.block_size + blocks = rows.reshape((n_blocks, cls.block_size)) + blocks = cls.quantize_blocks(blocks) + assert blocks.dtype == np.uint8 + assert blocks.shape[-1] == cls.type_size + return blocks.reshape(cls.__shape_to_bytes(shape)) + + @classmethod + def dequantize_rows(cls, rows: np.ndarray) -> np.ndarray: + rows = rows.view(np.uint8) + shape = rows.shape + n_blocks = rows.size // cls.type_size + blocks = rows.reshape((n_blocks, cls.type_size)) + blocks = cls.dequantize_blocks(blocks) + assert blocks.dtype == np.float32 + assert blocks.shape[-1] == cls.block_size + return blocks.reshape(cls.__shape_from_bytes(shape)) + + @classmethod + def __shape_to_bytes(cls, shape: Sequence[int]): + return quant_shape_to_byte_shape(shape, cls.qtype) + + @classmethod + def __shape_from_bytes(cls, shape: Sequence[int]): + return quant_shape_from_byte_shape(shape, cls.qtype) + + @classmethod + def __quantize_array(cls, array: np.ndarray) -> np.ndarray: + return _apply_over_grouped_rows(cls.quantize_rows, arr=array, otype=np.uint8, oshape=cls.__shape_to_bytes(array.shape)) + + @classmethod + def __dequantize_array(cls, array: np.ndarray) -> np.ndarray: + cls.init_grid() + return _apply_over_grouped_rows(cls.dequantize_rows, arr=array, otype=np.float32, oshape=cls.__shape_from_bytes(array.shape)) + + @classmethod + def __quantize_lazy(cls, lazy_tensor: LazyNumpyTensor, /) -> Any: + pass + + @classmethod + def __dequantize_lazy(cls, lazy_tensor: LazyNumpyTensor, /) -> Any: + pass + + @classmethod + def can_quantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> bool: + return tensor.shape[-1] % cls.block_size == 0 + + @classmethod + def quantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> np.ndarray: + if not cls.can_quantize(tensor): + raise QuantError(f"Can't quantize tensor with shape {tensor.shape} to {cls.qtype.name}") + if isinstance(tensor, LazyNumpyTensor): + return cls.__quantize_lazy(tensor) + else: + return cls.__quantize_array(tensor) + + @classmethod + def dequantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> np.ndarray: + if isinstance(tensor, LazyNumpyTensor): + return cls.__dequantize_lazy(tensor) + else: + return cls.__dequantize_array(tensor) + + +class BF16(__Quant, qtype=GGMLQuantizationType.BF16): + @classmethod + # same as ggml_compute_fp32_to_bf16 in ggml-impl.h + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n = blocks.view(np.uint32) + # force nan to quiet + n = np.where((n & 0x7fffffff) > 0x7f800000, (n & np.uint32(0xffff0000)) | np.uint32(64 << 16), n) + # round to nearest even + n = (np.uint64(n) + (0x7fff + ((n >> 16) & 1))) >> 16 + return n.astype(np.uint16).view(np.uint8) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + return (blocks.view(np.int16).astype(np.int32) << 16).view(np.float32) + + +class Q4_0(__Quant, qtype=GGMLQuantizationType.Q4_0): + @classmethod + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + imax = abs(blocks).argmax(axis=-1, keepdims=True) + max = np.take_along_axis(blocks, imax, axis=-1) + + d = max / -8 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + # FIXME: Q4_0's reference rounding is cursed and depends on FMA + qs = np.trunc((np.float64(blocks) * np.float64(id)) + np.float64(8.5), dtype=np.float32).astype(np.uint8).clip(0, 15) + + qs = qs.reshape((n_blocks, 2, cls.block_size // 2)) + qs = qs[..., 0, :] | (qs[..., 1, :] << np.uint8(4)) + + d = d.astype(np.float16).view(np.uint8) + + return np.concatenate([d, qs], axis=-1) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, qs = np.hsplit(blocks, [2]) + + d = d.view(np.float16).astype(np.float32) + + qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1)).astype(np.int8) - np.int8(8) + + return (d * qs.astype(np.float32)) + + +class Q4_1(__Quant, qtype=GGMLQuantizationType.Q4_1): + @classmethod + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + max = blocks.max(axis=-1, keepdims=True) + min = blocks.min(axis=-1, keepdims=True) + + d = (max - min) / 15 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + qs = np.trunc((blocks - min) * id + np.float32(0.5), dtype=np.float32).astype(np.uint8).clip(0, 15) + + qs = qs.reshape((n_blocks, 2, cls.block_size // 2)) + qs = qs[..., 0, :] | (qs[..., 1, :] << np.uint8(4)) + + d = d.astype(np.float16).view(np.uint8) + m = min.astype(np.float16).view(np.uint8) + + return np.concatenate([d, m, qs], axis=-1) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + m, qs = np.hsplit(rest, [2]) + + d = d.view(np.float16).astype(np.float32) + m = m.view(np.float16).astype(np.float32) + + qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1)).astype(np.float32) + + return (d * qs) + m + + +class Q5_0(__Quant, qtype=GGMLQuantizationType.Q5_0): + @classmethod + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + imax = abs(blocks).argmax(axis=-1, keepdims=True) + max = np.take_along_axis(blocks, imax, axis=-1) + + d = max / -16 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + # FIXME: Q5_0's reference rounding is cursed and depends on FMA + q = np.trunc((np.float64(blocks) * np.float64(id)) + np.float64(16.5), dtype=np.float32).astype(np.uint8).clip(0, 31) + + qs = q.reshape((n_blocks, 2, cls.block_size // 2)) + qs = (qs[..., 0, :] & np.uint8(0x0F)) | (qs[..., 1, :] << np.uint8(4)) + + qh = np.packbits(q.reshape((n_blocks, 1, 32)) >> np.uint8(4), axis=-1, bitorder="little").reshape(n_blocks, 4) + + d = d.astype(np.float16).view(np.uint8) + + return np.concatenate([d, qh, qs], axis=-1) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qh, qs = np.hsplit(rest, [4]) + + d = d.view(np.float16).astype(np.float32) + qh = qh.view(np.uint32) + + qh = qh.reshape((n_blocks, 1)) >> np.array([i for i in range(32)], dtype=np.uint32).reshape((1, 32)) + ql = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qh = (qh & np.uint32(0x01)).astype(np.uint8) + ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1)) + + qs = (ql | (qh << np.uint8(4))).astype(np.int8) - np.int8(16) + + return (d * qs.astype(np.float32)) + + +class Q5_1(__Quant, qtype=GGMLQuantizationType.Q5_1): + @classmethod + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + max = blocks.max(axis=-1, keepdims=True) + min = blocks.min(axis=-1, keepdims=True) + + d = (max - min) / 31 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + q = np.trunc((blocks - min) * id + np.float32(0.5), dtype=np.float32).astype(np.uint8).clip(0, 31) + + qs = q.reshape((n_blocks, 2, cls.block_size // 2)) + qs = (qs[..., 0, :] & np.uint8(0x0F)) | (qs[..., 1, :] << np.uint8(4)) + + qh = np.packbits(q.reshape((n_blocks, 1, 32)) >> np.uint8(4), axis=-1, bitorder="little").reshape(n_blocks, 4) + + d = d.astype(np.float16).view(np.uint8) + m = min.astype(np.float16).view(np.uint8) + + return np.concatenate([d, m, qh, qs], axis=-1) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + m, rest = np.hsplit(rest, [2]) + qh, qs = np.hsplit(rest, [4]) + + d = d.view(np.float16).astype(np.float32) + m = m.view(np.float16).astype(np.float32) + qh = qh.view(np.uint32) + + qh = qh.reshape((n_blocks, 1)) >> np.array([i for i in range(32)], dtype=np.uint32).reshape((1, 32)) + ql = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qh = (qh & np.uint32(0x01)).astype(np.uint8) + ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1)) + + qs = (ql | (qh << np.uint8(4))).astype(np.float32) + + return (d * qs) + m + + +class Q8_0(__Quant, qtype=GGMLQuantizationType.Q8_0): + @classmethod + # Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c + def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + + d = abs(blocks).max(axis=1, keepdims=True) / 127 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + qs = np_roundf(blocks * id) + + # (n_blocks, 2) + d = d.astype(np.float16).view(np.uint8) + # (n_blocks, block_size) + qs = qs.astype(np.int8).view(np.uint8) + + return np.concatenate([d, qs], axis=1) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + d, x = np.split(blocks, [2], axis=1) + d = d.view(np.float16).astype(np.float32) + x = x.view(np.int8).astype(np.float32) + + return (x * d) + + +class Q2_K(__Quant, qtype=GGMLQuantizationType.Q2_K): + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + scales, rest = np.hsplit(blocks, [QK_K // 16]) + qs, rest = np.hsplit(rest, [QK_K // 4]) + d, dmin = np.hsplit(rest, [2]) + + d = d.view(np.float16).astype(np.float32) + dmin = dmin.view(np.float16).astype(np.float32) + + # (n_blocks, 16, 1) + dl = (d * (scales & 0xF).astype(np.float32)).reshape((n_blocks, QK_K // 16, 1)) + ml = (dmin * (scales >> 4).astype(np.float32)).reshape((n_blocks, QK_K // 16, 1)) + + shift = np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 1, 4, 1)) + + qs = (qs.reshape((n_blocks, -1, 1, 32)) >> shift) & np.uint8(3) + + qs = qs.reshape((n_blocks, QK_K // 16, 16)).astype(np.float32) + + qs = dl * qs - ml + + return qs.reshape((n_blocks, -1)) + + +class Q3_K(__Quant, qtype=GGMLQuantizationType.Q3_K): + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + hmask, rest = np.hsplit(blocks, [QK_K // 8]) + qs, rest = np.hsplit(rest, [QK_K // 4]) + scales, d = np.hsplit(rest, [12]) + + d = d.view(np.float16).astype(np.float32) + + # The scales are packed at 6-bit each in this pattern: + # 0: IIIIAAAA + # 1: JJJJBBBB + # 2: KKKKCCCC + # 3: LLLLDDDD + # 4: MMMMEEEE + # 5: NNNNFFFF + # 6: OOOOGGGG + # 7: PPPPHHHH + # 8: MMIIEEAA + # 9: NNJJFFBB + # 10: OOKKGGCC + # 11: PPLLHHDD + lscales, hscales = np.hsplit(scales, [8]) + lscales = lscales.reshape((n_blocks, 1, 8)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 2, 1)) + lscales = lscales.reshape((n_blocks, 16)) + hscales = hscales.reshape((n_blocks, 1, 4)) >> np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 4, 1)) + hscales = hscales.reshape((n_blocks, 16)) + scales = (lscales & np.uint8(0x0F)) | ((hscales & np.uint8(0x03)) << np.uint8(4)) + scales = (scales.astype(np.int8) - np.int8(32)).astype(np.float32) + + dl = (d * scales).reshape((n_blocks, 16, 1)) + + ql = qs.reshape((n_blocks, -1, 1, 32)) >> np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 1, 4, 1)) + qh = hmask.reshape(n_blocks, -1, 1, 32) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 8, 1)) + ql = ql.reshape((n_blocks, 16, QK_K // 16)) & np.uint8(3) + qh = (qh.reshape((n_blocks, 16, QK_K // 16)) & np.uint8(1)) + qh = qh ^ np.uint8(1) # strangely, the offset is zero when the bitmask is 1 + q = (ql.astype(np.int8) - (qh << np.uint8(2)).astype(np.int8)).astype(np.float32) + + return (dl * q).reshape((n_blocks, QK_K)) + + +class Q4_K(__Quant, qtype=GGMLQuantizationType.Q4_K): + K_SCALE_SIZE = 12 + + @staticmethod + def get_scale_min(scales: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + n_blocks = scales.shape[0] + scales = scales.view(np.uint8) + ### Unpacking the following: ### + # 0 EEAAAAAA + # 1 FFBBBBBB + # 2 GGCCCCCC + # 3 HHDDDDDD + # 4 eeaaaaaa + # 5 ffbbbbbb + # 6 ggcccccc + # 7 hhdddddd + # 8 eeeeEEEE + # 9 ffffFFFF + # 10 ggggGGGG + # 11 hhhhHHHH + scales = scales.reshape((n_blocks, 3, 4)) + d, m, m_d = np.split(scales, 3, axis=-2) + + sc = np.concatenate([d & 0x3F, (m_d & 0x0F) | ((d >> 2) & 0x30)], axis=-1) + min = np.concatenate([m & 0x3F, (m_d >> 4) | ((m >> 2) & 0x30)], axis=-1) + + return (sc.reshape((n_blocks, 8)), min.reshape((n_blocks, 8))) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + dmin, rest = np.hsplit(rest, [2]) + scales, qs = np.hsplit(rest, [cls.K_SCALE_SIZE]) + + d = d.view(np.float16).astype(np.float32) + dmin = dmin.view(np.float16).astype(np.float32) + + sc, m = Q4_K.get_scale_min(scales) + + d = (d * sc.astype(np.float32)).reshape((n_blocks, -1, 1)) + dm = (dmin * m.astype(np.float32)).reshape((n_blocks, -1, 1)) + + qs = qs.reshape((n_blocks, -1, 1, 32)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1, 32)).astype(np.float32) + + return (d * qs - dm).reshape((n_blocks, QK_K)) + + +class Q5_K(__Quant, qtype=GGMLQuantizationType.Q5_K): + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + dmin, rest = np.hsplit(rest, [2]) + scales, rest = np.hsplit(rest, [Q4_K.K_SCALE_SIZE]) + qh, qs = np.hsplit(rest, [QK_K // 8]) + + d = d.view(np.float16).astype(np.float32) + dmin = dmin.view(np.float16).astype(np.float32) + + sc, m = Q4_K.get_scale_min(scales) + + d = (d * sc.astype(np.float32)).reshape((n_blocks, -1, 1)) + dm = (dmin * m.astype(np.float32)).reshape((n_blocks, -1, 1)) + + ql = qs.reshape((n_blocks, -1, 1, 32)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qh = qh.reshape((n_blocks, -1, 1, 32)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 8, 1)) + ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1, 32)) + qh = (qh & np.uint8(0x01)).reshape((n_blocks, -1, 32)) + q = (ql | (qh << np.uint8(4))).astype(np.float32) + + return (d * q - dm).reshape((n_blocks, QK_K)) + + +class Q6_K(__Quant, qtype=GGMLQuantizationType.Q6_K): + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + ql, rest = np.hsplit(blocks, [QK_K // 2]) + qh, rest = np.hsplit(rest, [QK_K // 4]) + scales, d = np.hsplit(rest, [QK_K // 16]) + + scales = scales.view(np.int8).astype(np.float32) + d = d.view(np.float16).astype(np.float32) + d = (d * scales).reshape((n_blocks, QK_K // 16, 1)) + + ql = ql.reshape((n_blocks, -1, 1, 64)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1, 32)) + qh = qh.reshape((n_blocks, -1, 1, 32)) >> np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 1, 4, 1)) + qh = (qh & np.uint8(0x03)).reshape((n_blocks, -1, 32)) + q = (ql | (qh << np.uint8(4))).astype(np.int8) - np.int8(32) + q = q.reshape((n_blocks, QK_K // 16, -1)).astype(np.float32) + + return (d * q).reshape((n_blocks, QK_K)) + + +class IQ2_XXS(__Quant, qtype=GGMLQuantizationType.IQ2_XXS): + ksigns: bytes = ( + b"\x00\x81\x82\x03\x84\x05\x06\x87\x88\x09\x0a\x8b\x0c\x8d\x8e\x0f" + b"\x90\x11\x12\x93\x14\x95\x96\x17\x18\x99\x9a\x1b\x9c\x1d\x1e\x9f" + b"\xa0\x21\x22\xa3\x24\xa5\xa6\x27\x28\xa9\xaa\x2b\xac\x2d\x2e\xaf" + b"\x30\xb1\xb2\x33\xb4\x35\x36\xb7\xb8\x39\x3a\xbb\x3c\xbd\xbe\x3f" + b"\xc0\x41\x42\xc3\x44\xc5\xc6\x47\x48\xc9\xca\x4b\xcc\x4d\x4e\xcf" + b"\x50\xd1\xd2\x53\xd4\x55\x56\xd7\xd8\x59\x5a\xdb\x5c\xdd\xde\x5f" + b"\x60\xe1\xe2\x63\xe4\x65\x66\xe7\xe8\x69\x6a\xeb\x6c\xed\xee\x6f" + b"\xf0\x71\x72\xf3\x74\xf5\xf6\x77\x78\xf9\xfa\x7b\xfc\x7d\x7e\xff" + ) + + # iq2xxs_grid, but with each byte of the original packed in 2 bits, + # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2. + grid_shape = (256, 8) + grid_map = (0x08, 0x19, 0x2b) + grid_hex = ( + b"00000200050008000a00110014002000220028002a0041004400500058006100" + b"6400800082008a00a20001010401100115014001840198010002020222028202" + b"010404041004210424044004420448046004810484049004a404000502050805" + b"200546056905800591050906100640068406a406000805080808140828084108" + b"440850085208880804094009020a140a01100410101021104010601084109010" + b"951000110811201150115a118011241245120014081420142514491480141815" + b"6215001616160118041810184018811800190519a019511a002002200a204420" + b"6120802082202921482100220222012404241024402456240025412564259026" + b"082820289428442a014004401040184021402440404048405640604081408440" + b"9040004120416141804185410142104248425642684200440844204480449944" + b"124524450046014804481048404845480049584961498249454a904a00500850" + b"1150195020508050885004514251a4519152905492540a550156545600581158" + b"195864584059085a046010604060686000615561186260620064056410651265" + b"84654268008002800a8041808280048118814081118201840484108415844084" + b"608400854685948509864086608602880489118a0490109024904090a1901691" + b"8091459200942294449451958198209902a050a085a009a100a218a450a804a9" + ) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, qs = np.hsplit(blocks, [2]) + + d = d.view(np.float16).astype(np.float32) + + qs = qs.view(np.uint32).reshape(n_blocks, -1, 2) + + db = d * (np.float32(0.5) + (qs[..., 1] >> 28).astype(np.float32)) * np.float32(0.25) + db = db.reshape((n_blocks, -1, 1, 1)) + + # get the sign indices and unpack the bits + signs = qs[..., 1].reshape((n_blocks, -1, 1)) >> np.array([0, 7, 14, 21], dtype=np.uint32).reshape((1, 1, 4)) + ksigns = np.frombuffer(cls.ksigns, dtype=np.uint8).reshape((1, 1, 1, 128)) + signs = (signs & np.uint32(0x7F)).reshape((n_blocks, -1, 4, 1)) + signs = np.take_along_axis(ksigns, signs, axis=-1) + signs = signs.reshape((n_blocks, -1, 4, 1)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 1, 8)) + signs = signs & np.uint8(0x01) + signs = np.where(signs == 0, np.float32(1), np.float32(-1)) + signs = signs.reshape((n_blocks, -1, 4, 8)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs[..., 0].copy().view(np.uint8).reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 4, 8)) + + return (db * grid * signs).reshape((n_blocks, -1)) + + +class IQ2_XS(__Quant, qtype=GGMLQuantizationType.IQ2_XS): + # iq2xs_grid, but with each byte of the original packed in 2 bits, + # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2. + grid_shape = (512, 8) + grid_map = (0x08, 0x19, 0x2b) + grid_hex = ( + b"00000200050008000a0011001400160019002000220025002800410044004600" + b"49005000520055005800610064008000820085008800910094009900a0000101" + b"04010601090110011201150118011a0121012401400142014501480151015401" + b"6001680181018401900100020202050208021102140220024102440250025502" + b"80028a0201040404060409041004120415041804210424044004420445044804" + b"5104540456046004810484049004000502050505080511051405200541054405" + b"500561058005010604061006260640064206840600080208050808080a081108" + b"14082008250841084408500858088008a008aa08010904091009400981098909" + b"000a200a280a960aa00a01100410061009101010121015101810211024104010" + b"4210451048105110541060106a10811084109010001102110511081111111411" + b"2011411144115011801194119611011204120612101240126012001402140514" + b"0814111414142014411444144914501464148014011504151015401500161416" + b"49160118041810181218401854188618001905196619511aa91a002002200520" + b"08200a201120142020204120442050208020a020012104211021402148216521" + b"002222228022a82201240424102429244024002541255225992501261a26a626" + b"002808280a28202855288828a22868299029082a202a822a882a8a2a01400440" + b"0640094010401240154018402140244040404240454048404a40514054406040" + b"6540814084409040004102410541084111411441204141414441504180418541" + b"a241014204421042124229424042004402440544084411441444194420444144" + b"4444504480449444014504451045244540459a4500460a464446504601480448" + b"1048404845485448624800491149444950496949044a00500250055008501150" + b"145020502850415044505050805001510451105115514051425100524452aa52" + b"0154045410542154405460548154a154005508558055885521566856a1560058" + b"14584158505899581a5940594259855a0160046010604060546062608660a960" + b"006124624a62926200641664106540654565a46501686a682569066a546a626a" + b"00800280058008801180148020802a8041804480508080808280a880aa800181" + b"0481068110814081518159810082208280828282a082a8820184048410841284" + b"158440846084898400854485a58518866a860088088825885a8880888288a888" + b"0689228a808a888a968aa88a0190049010904090569084900091229164915692" + b"89920094059444945094589429959095929541965198a6984999159a609a00a0" + b"02a008a00aa020a02aa0a0a051a159a1a6a100a202a208a22aa280a2a0a240a4" + b"95a465a698a60aa820a822a828a8a0a8a8a804a984a986a928aa2aaa91aaaaaa" + ) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qs, scales = np.hsplit(rest, [2 * QK_K // 8]) + + d = d.view(np.float16).astype(np.float32) + qs = qs.view(np.uint16) + + scales = scales.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2)) + scales = (scales & 0x0F).reshape((n_blocks, -1)) + db = d * (np.float32(0.5) + scales) * np.float32(0.25) + db = db.reshape((n_blocks, -1, 1, 1)) + + # get the sign indices and unpack the bits + signs = np.frombuffer(IQ2_XXS.ksigns, dtype=np.uint8).reshape(1, 1, 128) + signs = np.take_along_axis(signs, (qs >> 9).reshape((n_blocks, -1, 1)), axis=-1) + signs = signs.reshape((n_blocks, -1, 1)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 8)) + signs = signs & np.uint8(0x01) + signs = np.where(signs == 0, np.float32(1), np.float32(-1)) + signs = signs.reshape((n_blocks, -1, 2, 8)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, (qs & np.uint16(511)).reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 2, 8)) + + return (db * grid * signs).reshape((n_blocks, -1)) + + +class IQ2_S(__Quant, qtype=GGMLQuantizationType.IQ2_S): + # iq2s_grid, but with each byte of the original packed in 2 bits, + # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2. + grid_shape = (1024, 8) + grid_map = (0x08, 0x19, 0x2b) + grid_hex = ( + b"00000200050008000a0011001400160019002000220025002800410044004600" + b"490050005200550058006100640066006900800082008500880091009400a000" + b"a500aa0001010401060109011001120115011801210124014001420145014801" + b"510154015601590160016501680181018401900192019501a101a40100020202" + b"050208021102140220022a02410244024602490250025502800285028a029402" + b"a202010404040604090410041204150418042104240426042904400442044504" + b"48044a0451045404560459046004620465048104840486048904900495049804" + b"a104a40400050205050508050a05110514051605190520052505280541054405" + b"46054905500552055505580561056405800582058505880591059405a0050106" + b"0406060609061006150640064506480651065406600681068406900600080208" + b"050808081108140816081908200825082a084108440846084908500852085508" + b"580861086408800885089408aa08010904091009120915091809210940094509" + b"480951095409600981099009000a110a140a220a280a2a0a500a990a01100410" + b"0610091010101210151018102110241026104010421045104810511054105610" + b"59106010621065106810811084108610901095109810a110a410001102110511" + b"08110a1111111411161119112011221125112811411144114611491150115211" + b"5511581161116411801182118511881191119411011204120912101215122112" + b"2412401245125112541281128412901200140214051408141114141416141914" + b"2014251428144114441446144914501452145514581461146414801482148514" + b"881491149414a014011504150615091510151215151518152115241540154215" + b"4515481551155415601581158415901500160516081611161416201641164416" + b"50168016aa160118041806180918101815181818211840184218451848185118" + b"541860188118841800190219051908191119141920194119441950196919a219" + b"041a101a401a561a00200220052008201120142016201920202025202a204120" + b"4420502052205520642080208a209420aa200121042110211221152121214021" + b"4221452151215421602181218421902100220a22222228222a22442250228822" + b"8a22a82201240424062409241024152418242124242440244224452448245124" + b"5424602481248424902400250525082511251425202541254425502566258025" + b"0126042610264026592600280528112814284128442850288a28aa2801290429" + b"102995290a2a222a642a882a8a2a014004400640094010401240154018401a40" + b"21402440264040404240454048404a4051405440564059406040624065408140" + b"8440904095409840a140a4400041024105410841114114411641194120412241" + b"2541414144414641494150415241554158416141644180418241854188419141" + b"9441a04101420442104212421542184224424042454248425142544260428142" + b"844200440244054408440a441144144416441944204422442544284441444444" + b"46444944504452445544584461446444804482448544884491449444a0440145" + b"0445064509451045124515451845214524454045424545454845514554456045" + b"6a4581458445904500460246054608461146144620464146444650468046a546" + b"0148044809481048124815481848214824484048424845484848514854486048" + b"84489048004902490549084911491449204941494449504980499649014a044a" + b"104a404a00500250055008501150145016501950205022502550285041504450" + b"4650495050505250555058506150645080508250855088509150945001510451" + b"0651095110511251155118512151245140514251455148515151545160518151" + b"8451905100520552085211521452205241524452505269528052015404540654" + b"0954105412541554185421542454405442544554485451545454605481548454" + b"9054005502550555085511551455205541554455505580550156045610562656" + b"405600580258055808581158145820584158445850585a588058015904591059" + b"4059005a195a855aa85a01600460066010601260156018602160246040604560" + b"4860516054606060846090600061026105610861116114612061416144615061" + b"806199610462106240625662a162006405640864116414642064416444645064" + b"806401650465106540654a656865926500669466016804681068656898680069" + b"2a69426aa16a0080028005800880118014801980208025804180448050805280" + b"5580588061808080858091809480018104810981108112811581188121812481" + b"408142814581488151815481818184819081a981008205820a82118214824182" + b"4482508201840484068409841084128415841884218440844284458448845184" + b"5484608481848484908400850285058508851185148520854185448550858085" + b"8a85018604861086298640860088058811881488418844885088a28801890489" + b"40896589228a588a5a8a828aa28a019004900990109012901590189024904090" + b"4290459048905190549060908190849090900091059111911491419144915091" + b"5a910192049210924092a6920094029405940894119414942094419444945094" + b"8094969401950495109540959895a19500964696649601980498109826984098" + b"a998009949995299909a00a005a00aa014a022a02aa041a044a050a0a2a0aaa0" + b"40a165a102a20aa222a228a22aa282a288a28aa2a8a201a404a410a440a489a4" + b"a4a400a519a551a60aa828a8a2a854a986a908aa0aaa20aa22aa28aa88aaaaaa" + ) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qs, rest = np.hsplit(rest, [QK_K // 8]) + signs, rest = np.hsplit(rest, [QK_K // 8]) + qh, scales = np.hsplit(rest, [QK_K // 32]) + + d = d.view(np.float16).astype(np.float32) + + scales = scales.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2)) + scales = (scales & 0x0F).reshape((n_blocks, -1)) + db = d * (np.float32(0.5) + scales) * np.float32(0.25) + db = db.reshape((n_blocks, -1, 1, 1)) + + # unpack the sign bits + signs = signs.reshape((n_blocks, -1, 1)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 8)) + signs = signs & np.uint8(0x01) + signs = np.where(signs == 0, np.float32(1), np.float32(-1)) + signs = signs.reshape((n_blocks, -1, 2, 8)) + + qh = qh.reshape((n_blocks, -1, 1)) >> np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 1, 4)) + qs = qs.astype(np.uint16) | ((qh & 0x03).astype(np.uint16) << 8).reshape((n_blocks, -1)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 2, 8)) + + return (db * grid * signs).reshape((n_blocks, -1)) + + +class IQ3_XXS(__Quant, qtype=GGMLQuantizationType.IQ3_XXS): + grid_shape = (256, 4) + grid_map = (0x04, 0x0c, 0x14, 0x1c, 0x24, 0x2c, 0x34, 0x3e) + grid_hex = ( + b"0000020004001100130017002000220031004200730075000101030110011201" + b"2101250130013201410154017001000202020402110220022202310233023702" + b"5102570275020103070310031203250370031304370444045704730475040105" + b"0705320552053506640610071407160743076107011003101010121021102310" + b"3010321034104710501000110211111120112211011203121012121221123012" + b"7212001302132013311346136613011405145014201524154615711505162217" + b"4017002002201120132020202220262031204220012103210521102112212121" + b"3021632167217021002202221122172220222222372240225522012310231423" + b"7023742335245324032527254125742501270327162745270130103012302130" + b"2330503065307230003102312031313144314631013203321032253252327232" + b"1133333330344734723400350635223555351436363663363337603704401740" + b"3540374053405740744120423742404260426642074345430444514464442545" + b"4345704505471047124730471250415070500051065126515551145232527252" + b"0253535310542354275472540255315550562457425724604460466064602161" + b"6161176264623063366344640565526533660367216703700570077010703270" + b"5270267140711272457252720073157333736073217441740075027524753076" + ) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qs, scales = np.hsplit(rest, [QK_K // 4]) + + d = d.view(np.float16).astype(np.float32) + scales = scales.view(np.uint32) + + db = d * (np.float32(0.5) + (scales >> 28).astype(np.float32)) * np.float32(0.5) + db = db.reshape((n_blocks, -1, 1, 1)) + + # get the sign indices and unpack the bits + signs = scales.reshape((n_blocks, -1, 1)) >> np.array([0, 7, 14, 21], dtype=np.uint32).reshape((1, 1, 4)) + ksigns = np.frombuffer(IQ2_XXS.ksigns, dtype=np.uint8).reshape((1, 1, 1, 128)) + signs = (signs & np.uint32(0x7F)).reshape((n_blocks, -1, 4, 1)) + signs = np.take_along_axis(ksigns, signs, axis=-1) + signs = signs.reshape((n_blocks, -1, 4, 1)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 1, 8)) + signs = signs & np.uint8(0x01) + signs = np.where(signs == 0, np.float32(1), np.float32(-1)) + signs = signs.reshape((n_blocks, -1, 4, 8)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 4, 8)) + + return (db * grid * signs).reshape((n_blocks, -1)) + + +class IQ3_S(__Quant, qtype=GGMLQuantizationType.IQ3_S): + grid_shape = (512, 4) + grid_map = (0x01, 0x03, 0x05, 0x07, 0x09, 0x0b, 0x0d, 0x0f) + grid_hex = ( + b"0000010002000500070010001100120014001600200021002500330040004200" + b"4500470051005300600062007100740077000001010102010401100111011501" + b"2001230127013101350144016101650172010002010205020702100213021602" + b"2102250230023402420245024702510253027002730203031103150320032203" + b"3103330336034403500352036703710375030004130417042104240432044004" + b"4304510470040205040520052205260533054105450547056605730506061106" + b"1306310652067106000702070407200722072607330750075407001001100210" + b"0410101011101310151017102010221031103410361054105610611072100011" + b"0111031106111011141121113011331141115011521170117611001212121512" + b"1712201224123212401243125512601272120113041307131013131321132713" + b"3013341341136213701303140514121414143114331442144614501454140115" + b"1015131521153015321551152016241627164416461601170317101712172117" + b"3517411762177017002001200320052007201020122014201620212023202720" + b"3020322041204320452050205220672070207320752000210221102113211721" + b"2221252131213421422151210122042207222122232230223722412253225722" + b"7122742200230223052311232223242331233323422350236623012407242024" + b"2324322435244124722475240425112522253725402553257025002602260726" + b"2126552661260527112726273027432750270230113013301530173022303130" + b"3330353042304430473051306330713001310331053114312131233140316031" + b"7231763100321232203232323432503201331033143321332333273330334133" + b"4333473355337333033411341634223431345234603464340135103512352535" + b"3235443556357335163641360137033720372237353700400440124020402440" + b"2740324041405040704002410741114113412241304135414341514155410142" + b"0342104215422142334240425742624270420443114313432043224331433543" + b"0044024424443744404471440545074521456245134634466046104715473047" + b"4347514702501050145022504050445047505250665074500151035105511251" + b"2151325172510052115223523052365253520253075310532753445351536553" + b"7353015404542054325446541255265551555355425602570457225711601360" + b"1560316033606060006120612761646112623462426255626262706200631463" + b"2163406325644364626400650365346560650566406611671367007004700770" + b"2070227036704070547062700271117124714371457101720472107216722172" + b"3072517202733273357353730174057413742074507422754275027631760077" + ) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qs, rest = np.hsplit(rest, [QK_K // 4]) + qh, rest = np.hsplit(rest, [QK_K // 32]) + signs, scales = np.hsplit(rest, [QK_K // 8]) + + d = d.view(np.float16).astype(np.float32) + + scales = scales.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2)) + scales = (scales & 0x0F).reshape((n_blocks, -1)) + db = d * (1 + 2 * scales) + db = db.reshape((n_blocks, -1, 1, 1)) + + # unpack the sign bits + signs = signs.reshape((n_blocks, -1, 1)) >> np.array([i for i in range(8)], dtype=np.uint8).reshape((1, 1, 8)) + signs = signs & np.uint8(0x01) + signs = np.where(signs == 0, np.float32(1), np.float32(-1)) + signs = signs.reshape((n_blocks, -1, 4, 8)) + + qh = qh.reshape((n_blocks, -1, 1)) >> np.array([i for i in range(8)], dtype=np.uint8) + qh = (qh & 0x01).astype(np.uint16).reshape((n_blocks, -1)) + qs = qs.astype(np.uint16) | (qh << 8) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 4, 8)) + + return (db * grid * signs).reshape((n_blocks, -1)) + + +class IQ1_S(__Quant, qtype=GGMLQuantizationType.IQ1_S): + # iq1s_grid, with each byte packed into 2 bits + # -1, 0, 1 <=> 0, 1, 2 + grid_shape = (2048, 8) + grid_map = (-1, 0, 1) + grid_hex = ( + b"00000200050008000a00110015002000220028002a0045005100540056006500" + b"8000820088008a009500a000a200a800aa000401050111011401160119011a01" + b"2501410146014901520155015a0161016401660168018501910194019601a501" + b"0002020208020a0215022002220228022a024502510259026402690280028202" + b"88028a02910295029902a002a202a802aa021104140416042504410449045504" + b"5a046404650491049904a5040105040505050605150518051a05290540054505" + b"4a0550055105540555055605590560056205650568056a058105910595059805" + b"9a05a105a405a505a605a9051406190641064406500652065506580660066106" + b"6606690685069106940699060008020808080a0815082008220828082a084508" + b"5108560865088008820888088a089508a008a208a808aa080509110914091909" + b"2409250941095009510955096109640969099109940996099909a509000a020a" + b"080a0a0a150a200a220a280a2a0a450a510a590a610a650a800a820a850a880a" + b"8a0a950aa00aa20aa80aaa0a1010111014101910241025104110441050105510" + b"58106110641065106910911094109610a110a510011104110611091110111211" + b"1511181121112411291145114a11501151115211541155115611591160116511" + b"841192119511a111a41111121412161225124012461249125212551258125a12" + b"641266128512911294129612a512011406140914141415141814191421142614" + b"41144514461448144a1451145414551456145914621465146814841489149014" + b"94149514981499149a14a114a414a514a914021505150a151115141515151615" + b"191520152215251528152a154115441545154615511552155415551556155915" + b"5a1561156415651566156915801582158415851588158a159015911594159515" + b"961599159a15a015a215a51501160416051606161516161618161a1621162616" + b"401642164416451648164a165116551656165816591661166416651668166916" + b"6a1686168a1692169516a416a916111816182518411844184618491850185518" + b"58185a1860186118641866186918851891189418a5181019121915191a192119" + b"25194219441945194819511954195519561959195a19601965196a1989199119" + b"921995199819a119a619a919091a161a241a261a441a461a491a501a521a551a" + b"581a611a661a691a851a911a961a9a1a0020022008200a201520202022202520" + b"28202a20452051205920612065208020822088208a209520a020a220a520a820" + b"aa2005211121142119212521422144214921552158215a216121642165216621" + b"8521902196219921a521012208220a22112215222022222228222a2245225122" + b"562259226522812288228a2291229522a022a222a822aa220524142416241924" + b"252444244524462449245224552458245a2466248524912494249924a124a524" + b"0925152521252925402545254825512554255525592562256525682589259025" + b"9425952598259a25a125a425a625a92505261026122619262526412649265526" + b"6026612669268426862690269a260028022808280a2815282028222828282a28" + b"45285128542865288028822888288a28a028a228a828aa280929112914291929" + b"2529462949295229552961296429662969298529902996299929a429a529002a" + b"022a082a0a2a202a222a282a2a2a452a512a562a592a652a802a822a882a8a2a" + b"952aa02aa22aa82aaa2a054011401640254049405240554058405a4061406440" + b"664094409940a140a6400041014104410641094112411541164118411a412141" + b"26412941454148414a41514154415541564159415a41654168416a4181418441" + b"8641904192419541a041a141a241054211421442164225424142524255425a42" + b"6442694289429442a5420144154419442944454448444a445144544455445644" + b"61446244654468446a44814486448944904492449544a044a144a94401450245" + b"05450a4511451445154516451945204525452a45414544454545464549455045" + b"5145544555455645584559456145644565456645694582458445854588459145" + b"94459545964599459a45a545a845aa450146054609461446154618461a462146" + b"2446294640464246454648465046514652465546564659466246654668468146" + b"85468a4694469546a146a446a6460548114815481a4825484248494850485548" + b"5848614864486648694885489148944896489948a5480149054906490a491049" + b"144915491849214924492649404945494a495149524954495549564959496049" + b"6249654966496a49864989499249954996499849a149a449a649a949164a444a" + b"464a494a554a584a5a4a644a694a944aa54a0150045005500650095012501550" + b"1a50215024502950405045504850515054505550565059506550685086508950" + b"95509850a050a150a650a9500551085109510a51115114511551165118511951" + b"20512551265128512a5141514451455146514951505151515251545155515651" + b"585159515a51615164516551665169518251855191519451955196519951a051" + b"a551aa5101520652125215521a5221522452425245524a525152545255525652" + b"595262526552855290529252955299529a52a452045405541154145415541654" + b"185419542154255428542a54415444544554465449544a545054515454545554" + b"5654585459545a54615462546454655466546954805488548a54915494549554" + b"96549954a154a454a554aa540155025504550555065509551055115512551455" + b"1555165519551a55215524552555265529554055415542554455455546554855" + b"4955505551555255545555555655585559555a55605561556455655566556855" + b"69556a5581558455855589558a559055915594559555965598559955a155a455" + b"a555a655a9550056015602560456065608560956115614561556185619562056" + b"2156225624562556265628562956415645564656485649564a56505651565256" + b"545655565656585659565a566156645665566956825685568656885689568a56" + b"915695569a56a256a556a656a856a95604580558065809581058155818582158" + b"2a58455848584a58515854585558565858585958605862586458655882588958" + b"9058925895589858a158a9580159025905590a59115914591559165919592559" + b"41594459455946594959505951595259545955595659585959595a5961596459" + b"655966596959815985598959915994599559965998599959a559045a085a155a" + b"1a5a205a255a265a295a455a485a495a515a555a565a585a595a625a655a685a" + b"6a5a815a8a5a925a955a965a985a9a5aa15a0560146016601960256044605060" + b"5560566058605a60616064606660696081609660a56001610461066109611261" + b"15612161226126612961456149615161556156615961656166616a6184618a61" + b"92619561a161a661a96111621662196240624162466255625662586260628562" + b"91629662a56211641264156416641a6421642664296440644264456448644a64" + b"516454645564566459645a646064626465648464856489649064926494649564" + b"966498649a64a164a464a964056508650a651165156516651965446545654665" + b"496550655165546555655665596561656465656566656965866589658a659165" + b"9565966599659a65a265a565a665a86502660966156620662666286629664066" + b"456648664a66516654665566566658665a666066656668668066826685668a66" + b"9466966698669966a066a466a666aa661668196825684168526855685a686168" + b"6968856891689868a66801690469106915692169246926692969406941694569" + b"4669486951695469556956695969606965696a69826984698a699569a169a469" + b"a569a969116a166a186a416a446a496a506a556a586a5a6a646a656a696a866a" + b"946a986a9a6aa66a0080028008800a802080228028802a804580508051805480" + b"5680598065808080828088808a809580a080a280a880aa800581118114811681" + b"1981258141814481498150815281558156815881598164816681698185818981" + b"948196819981a5810082028208820a8215822082228228822a82518254825982" + b"65828082828288828a829582a082a282a882aa82148419844184448451845584" + b"5a846184648469849484998401850985128515851a8526852985408541854585" + b"4885518554855585568559855a856585668568856a8581858485868589859085" + b"928595859885a68511861686198625864186448649864a865086558659865a86" + b"618666866a86858691869a86a4860088028808880a8815882088228828882a88" + b"41884588518854885988658869888088828888888a889588a088a288a888aa88" + b"05890689118914891689258941894489468949895089528955895a8961896489" + b"858996899989a589008a028a088a0a8a158a208a228a288a2a8a458a518a548a" + b"568a808a828a888a8a8a958aa08aa28aa88aaa8a059011901690189019902590" + b"419046904990559058905a9069906a9085909190949096909990a59001910491" + b"069109911091159118911a912191249126912991409145915091519154915591" + b"569159916291659184918691929195919891a191a491a691a991059211921492" + b"19922592449246924992509252925592589266926992859294929692a9920194" + b"04940694109415941894269440944a9451945494559456945894599460946194" + b"62946594849486949294949495949894a194a9940095059508950a9510951195" + b"14951595169519952195259529952a9541954495459546954995509551955295" + b"549555955695589559955a956195649565956695699581958595889591959295" + b"94959595969599959a95a095a295a595a895aa95019604961096159619962096" + b"2696299645964896499651965296559656965996659668968296849689968a96" + b"929694969596a496a696a9960598169819982598419846985098529855985698" + b"5a98649865988598919896989998a59804990699099910991299159918991a99" + b"209921992499269940994299459948994a995199549955995699599962996599" + b"66996a99819984999099929995999a99a199a699059a159a259a449a469a499a" + b"509a559a589a619a859a919a949a959a969a00a002a008a00aa015a020a022a0" + b"28a02aa045a051a054a056a059a080a082a088a08aa095a0a0a0a2a0a8a0aaa0" + b"05a109a111a114a116a119a11aa146a149a151a155a158a15aa161a164a185a1" + b"90a192a196a199a102a208a20aa210a219a222a228a22aa245a251a256a259a2" + b"65a280a282a288a28aa295a2a0a2a2a2a8a2aaa219a425a441a444a450a454a4" + b"55a458a45aa461a465a466a468a469a485a406a509a510a512a515a518a526a5" + b"29a542a545a551a554a555a556a559a565a56aa581a584a585a586a589a592a5" + b"95a598a505a611a616a61aa621a625a644a646a64aa652a655a656a658a660a6" + b"62a686a690a695a696a699a6a1a6a4a6a6a600a802a808a80aa820a822a828a8" + b"2aa851a854a856a859a880a882a888a88aa895a8a0a8a2a8a8a8aaa805a914a9" + b"19a921a925a941a950a955a95aa961a966a969a990a996a900aa02aa08aa0aaa" + b"20aa22aa28aa2aaa51aa54aa56aa80aa82aa88aa8aaa95aaa0aaa2aaa8aaaaaa" + ) + + delta = np.float32(0.125) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + qs, qh = np.hsplit(rest, [QK_K // 8]) + + d = d.view(np.float16).astype(np.float32) + qh = qh.view(np.uint16) + + dl = d * (2 * ((qh >> 12) & 7) + 1) + dl = dl.reshape((n_blocks, -1, 1, 1)) + delta = np.where((qh & np.uint16(0x8000)) == 0, cls.delta, -cls.delta) + delta = delta.reshape((n_blocks, -1, 1, 1)) + + qh = qh.reshape((n_blocks, -1, 1)) >> np.array([0, 3, 6, 9], dtype=np.uint16).reshape((1, 1, 4)) + qs = qs.astype(np.uint16) | ((qh & 7) << 8).reshape((n_blocks, -1)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 4, 8)) + + return (dl * (grid + delta)).reshape((n_blocks, -1)) + + +class IQ1_M(__Quant, qtype=GGMLQuantizationType.IQ1_M): + grid_shape = IQ1_S.grid_shape + grid_map = IQ1_S.grid_map + grid_hex = IQ1_S.grid_hex + + delta = IQ1_S.delta + + # Okay *this* type is weird. It's the only one which stores the f16 scales in multiple parts. + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + qs, rest = np.hsplit(blocks, [QK_K // 8]) + qh, scales = np.hsplit(rest, [QK_K // 16]) + + # The f16 scale is packed across multiple bytes + scales = scales.view(np.uint16) + d = (scales.reshape((n_blocks, 4)) & np.uint16(0xF000)) >> np.array([12, 8, 4, 0], dtype=np.uint16).reshape((1, 4)) + d = d[..., 0] | d[..., 1] | d[..., 2] | d[..., 3] + d = d.view(np.float16).astype(np.float32).reshape((n_blocks, 1)) + + scales = scales.reshape(n_blocks, -1, 1) >> np.array([0, 3, 6, 9], dtype=np.uint16).reshape((1, 1, 4)) + scales = (scales & 0x07).reshape((n_blocks, -1)) + dl = d * (2 * scales + 1) + dl = dl.reshape((n_blocks, -1, 2, 1, 1)) + + qh = qh.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2)) + qs = qs.astype(np.uint16) | ((qh & 0x07).astype(np.uint16) << 8).reshape((n_blocks, -1)) + + delta = np.where(qh & 0x08 == 0, cls.delta, -cls.delta) + delta = delta.reshape((n_blocks, -1, 2, 2, 1)) + + assert cls.grid is not None + grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2) + grid = grid.reshape((n_blocks, -1, 2, 2, 8)) + + return (dl * (grid + delta)).reshape((n_blocks, -1)) + + +class IQ4_NL(__Quant, qtype=GGMLQuantizationType.IQ4_NL): + kvalues = (-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, qs = np.hsplit(blocks, [2]) + + d = d.view(np.float16).astype(np.float32) + + qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + + qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1, 1)) + + kvalues = np.array(cls.kvalues, dtype=np.int8).reshape(1, 1, 16) + qs = np.take_along_axis(kvalues, qs, axis=-1).astype(np.float32).reshape((n_blocks, -1)) + + return (d * qs) + + +class IQ4_XS(__Quant, qtype=GGMLQuantizationType.IQ4_XS): + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_blocks = blocks.shape[0] + + d, rest = np.hsplit(blocks, [2]) + scales_h, rest = np.hsplit(rest, [2]) + scales_l, qs = np.hsplit(rest, [QK_K // 64]) + + d = d.view(np.float16).astype(np.float32) + scales_h = scales_h.view(np.uint16) + + scales_l = scales_l.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2)) + scales_h = scales_h.reshape((n_blocks, 1, -1)) >> np.array([2 * i for i in range(QK_K // 32)], dtype=np.uint16).reshape((1, -1, 1)) + scales_l = scales_l.reshape((n_blocks, -1)) & np.uint8(0x0F) + scales_h = scales_h.reshape((n_blocks, -1)).astype(np.uint8) & np.uint8(0x03) + + scales = (scales_l | (scales_h << np.uint8(4))).astype(np.int8) - np.int8(32) + dl = (d * scales.astype(np.float32)).reshape((n_blocks, -1, 1)) + + qs = qs.reshape((n_blocks, -1, 1, 16)) >> np.array([0, 4], dtype=np.uint8).reshape((1, 1, 2, 1)) + qs = qs.reshape((n_blocks, -1, 32, 1)) & np.uint8(0x0F) + + kvalues = np.array(IQ4_NL.kvalues, dtype=np.int8).reshape((1, 1, 1, -1)) + qs = np.take_along_axis(kvalues, qs, axis=-1).astype(np.float32).reshape((n_blocks, -1, 32)) + + return (dl * qs).reshape((n_blocks, -1)) diff --git a/.venv/lib/python3.11/site-packages/gguf/tensor_mapping.py b/.venv/lib/python3.11/site-packages/gguf/tensor_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f185c0658a34b14abd6893c16e5bf8216258af --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/tensor_mapping.py @@ -0,0 +1,657 @@ +from __future__ import annotations + +from typing import Sequence + +from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES + + +class TensorNameMap: + mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Token embeddings + MODEL_TENSOR.TOKEN_EMBD: ( + "gpt_neox.embed_in", # gptneox + "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone + "transformer.word_embeddings", # falcon + "word_embeddings", # bloom + "model.embed_tokens", # llama-hf nemotron + "tok_embeddings", # llama-pth + "embeddings.word_embeddings", # bert nomic-bert + "language_model.embedding.word_embeddings", # persimmon + "wte", # gpt2 + "transformer.embd.wte", # phi2 + "model.tok_embeddings", # internlm2 + "model.embedding", # mamba-qbert + "backbone.embedding", # mamba + "backbone.embeddings", # mamba-hf + "transformer.in_out_embed", # Grok + "embedding.word_embeddings", # chatglm + "transformer.token_embeddings", # openelm + "shared", # t5 + ), + + # Token type embeddings + MODEL_TENSOR.TOKEN_TYPES: ( + "embeddings.token_type_embeddings", # bert nomic-bert + ), + + # Normalization of token embeddings + MODEL_TENSOR.TOKEN_EMBD_NORM: ( + "word_embeddings_layernorm", # bloom + "embeddings.LayerNorm", # bert + "emb_ln", # nomic-bert + "transformer.norm", # openelm + ), + + # Position embeddings + MODEL_TENSOR.POS_EMBD: ( + "transformer.wpe", # gpt2 + "embeddings.position_embeddings", # bert + "wpe", # gpt2 + ), + + # Output + MODEL_TENSOR.OUTPUT: ( + "embed_out", # gptneox + "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone + "output", # llama-pth bloom internlm2 + "word_embeddings_for_head", # persimmon + "lm_head.linear", # phi2 + "output_layer", # chatglm + ), + + # Output norm + MODEL_TENSOR.OUTPUT_NORM: ( + "gpt_neox.final_layer_norm", # gptneox + "transformer.ln_f", # gpt2 gpt-j falcon jais exaone + "model.norm", # llama-hf baichuan internlm2 + "norm", # llama-pth + "transformer.norm_f", # mpt dbrx + "ln_f", # refact bloom qwen gpt2 + "language_model.encoder.final_layernorm", # persimmon + "model.final_layernorm", # persimmon + "lm_head.ln", # phi2 + "model.norm_f", # mamba-qbert + "backbone.norm_f", # mamba + "transformer.rms_norm", # Grok + "encoder.final_layernorm", # chatglm + "transformer.norm", # openelm + "model.norm", # nemotron + ), + + # Rope frequencies + MODEL_TENSOR.ROPE_FREQS: ( + "rope.freqs", # llama-pth + "rotary_pos_emb.inv_freq", # chatglm + ), + } + + block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Attention norm + MODEL_TENSOR.ATTN_NORM: ( + "gpt_neox.layers.{bid}.input_layernorm", # gptneox + "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone + "transformer.blocks.{bid}.norm_1", # mpt + "transformer.h.{bid}.input_layernorm", # falcon7b + "h.{bid}.input_layernorm", # bloom + "transformer.h.{bid}.ln_mlp", # falcon40b + "model.layers.{bid}.input_layernorm", # llama-hf nemotron + "layers.{bid}.attention_norm", # llama-pth + "language_model.encoder.layers.{bid}.input_layernorm", # persimmon + "model.layers.{bid}.ln1", # yi + "h.{bid}.ln_1", # gpt2 + "transformer.h.{bid}.ln", # phi2 + "model.layers.layers.{bid}.norm", # plamo + "model.layers.{bid}.attention_norm", # internlm2 + "model.layers.{bid}.norm", # mamba-qbert + "backbone.layers.{bid}.norm", # mamba + "transformer.decoder_layer.{bid}.rms_norm", # Grok + "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx + "encoder.layers.{bid}.input_layernorm", # chatglm + "transformer.layers.{bid}.attn_norm", # openelm + ), + + # Attention norm 2 + MODEL_TENSOR.ATTN_NORM_2: ( + "transformer.h.{bid}.ln_attn", # falcon40b + "encoder.layer.{bid}.layer_norm_1", # jina-v2-code + ), + + # Attention query-key-value + MODEL_TENSOR.ATTN_QKV: ( + "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox + "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais + "transformer.blocks.{bid}.attn.Wqkv", # mpt + "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx + "transformer.h.{bid}.self_attention.query_key_value", # falcon + "h.{bid}.self_attention.query_key_value", # bloom + "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon + "model.layers.{bid}.self_attn.query_key_value", # persimmon + "h.{bid}.attn.c_attn", # gpt2 + "transformer.h.{bid}.mixer.Wqkv", # phi2 + "encoder.layers.{bid}.attn.Wqkv", # nomic-bert + "model.layers.{bid}.self_attn.qkv_proj", # phi3 + "encoder.layers.{bid}.self_attention.query_key_value", # chatglm + "transformer.layers.{bid}.attn.qkv_proj", # openelm + ), + + # Attention query + MODEL_TENSOR.ATTN_Q: ( + "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron + "layers.{bid}.attention.wq", # llama-pth + "encoder.layer.{bid}.attention.self.query", # bert + "transformer.h.{bid}.attn.q_proj", # gpt-j + "model.layers.layers.{bid}.self_attn.q_proj", # plamo + "model.layers.{bid}.attention.wq", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok + "transformer.h.{bid}.attn.attention.q_proj", # exaone + ), + + # Attention key + MODEL_TENSOR.ATTN_K: ( + "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron + "layers.{bid}.attention.wk", # llama-pth + "encoder.layer.{bid}.attention.self.key", # bert + "transformer.h.{bid}.attn.k_proj", # gpt-j + "transformer.h.{bid}.attn.k", # refact + "model.layers.layers.{bid}.self_attn.k_proj", # plamo + "model.layers.{bid}.attention.wk", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok + "transformer.h.{bid}.attn.attention.k_proj", # exaone + ), + + # Attention value + MODEL_TENSOR.ATTN_V: ( + "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron + "layers.{bid}.attention.wv", # llama-pth + "encoder.layer.{bid}.attention.self.value", # bert + "transformer.h.{bid}.attn.v_proj", # gpt-j + "transformer.h.{bid}.attn.v", # refact + "model.layers.layers.{bid}.self_attn.v_proj", # plamo + "model.layers.{bid}.attention.wv", # internlm2 + "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok + "transformer.h.{bid}.attn.attention.v_proj", # exaone + ), + + # Attention output + MODEL_TENSOR.ATTN_OUT: ( + "gpt_neox.layers.{bid}.attention.dense", # gptneox + "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais + "transformer.blocks.{bid}.attn.out_proj", # mpt + "transformer.h.{bid}.self_attention.dense", # falcon + "h.{bid}.self_attention.dense", # bloom + "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron + "layers.{bid}.attention.wo", # llama-pth + "encoder.layer.{bid}.attention.output.dense", # bert + "transformer.h.{bid}.attn.out_proj", # gpt-j + "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon + "model.layers.{bid}.self_attn.dense", # persimmon + "h.{bid}.attn.c_proj", # gpt2 + "transformer.h.{bid}.mixer.out_proj", # phi2 + "model.layers.layers.{bid}.self_attn.o_proj", # plamo + "model.layers.{bid}.attention.wo", # internlm2 + "encoder.layers.{bid}.attn.out_proj", # nomic-bert + "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok + "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx + "encoder.layers.{bid}.self_attention.dense", # chatglm + "transformer.layers.{bid}.attn.out_proj", # openelm + "transformer.h.{bid}.attn.attention.out_proj", # exaone + ), + + # Attention output norm + MODEL_TENSOR.ATTN_OUT_NORM: ( + "encoder.layer.{bid}.attention.output.LayerNorm", # bert + "encoder.layers.{bid}.norm1", # nomic-bert + "transformer.decoder_layer.{bid}.rms_norm_1", # Grok + "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx + ), + + MODEL_TENSOR.ATTN_POST_NORM: ( + "model.layers.{bid}.post_attention_layernorm", # gemma2 + ), + + # Rotary embeddings + MODEL_TENSOR.ATTN_ROT_EMBD: ( + "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf + "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo + "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell + ), + + # Feed-forward norm + MODEL_TENSOR.FFN_NORM: ( + "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox + "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone + "h.{bid}.post_attention_layernorm", # bloom + "transformer.blocks.{bid}.norm_2", # mpt + "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron + "layers.{bid}.ffn_norm", # llama-pth + "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon + "model.layers.{bid}.ln2", # yi + "h.{bid}.ln_2", # gpt2 + "model.layers.{bid}.ffn_norm", # internlm2 + "transformer.decoder_layer.{bid}.rms_norm_2", # Grok + "encoder.layers.{bid}.post_attention_layernorm", # chatglm + "transformer.layers.{bid}.ffn_norm", # openelm + ), + + # Post feed-forward norm + MODEL_TENSOR.FFN_PRE_NORM: ( + "model.layers.{bid}.pre_feedforward_layernorm", # gemma2 + ), + + # Post feed-forward norm + MODEL_TENSOR.FFN_POST_NORM: ( + "model.layers.{bid}.post_feedforward_layernorm", # gemma2 + ), + + MODEL_TENSOR.FFN_GATE_INP: ( + "layers.{bid}.feed_forward.gate", # mixtral + "model.layers.{bid}.block_sparse_moe.gate", # mixtral + "model.layers.{bid}.mlp.gate", # qwen2moe + "transformer.decoder_layer.{bid}.router", # Grok + "transformer.blocks.{bid}.ffn.router.layer", # dbrx + ), + + MODEL_TENSOR.FFN_GATE_INP_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe + ), + + # Feed-forward up + MODEL_TENSOR.FFN_UP: ( + "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox + "transformer.h.{bid}.mlp.c_fc", # gpt2 jais + "transformer.blocks.{bid}.ffn.up_proj", # mpt + "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon + "h.{bid}.mlp.dense_h_to_4h", # bloom + "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron + "layers.{bid}.feed_forward.w3", # llama-pth + "encoder.layer.{bid}.intermediate.dense", # bert + "transformer.h.{bid}.mlp.fc_in", # gpt-j + "transformer.h.{bid}.mlp.linear_3", # refact + "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon + "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon + "transformer.h.{bid}.mlp.w1", # qwen + "h.{bid}.mlp.c_fc", # gpt2 + "transformer.h.{bid}.mlp.fc1", # phi2 + "model.layers.{bid}.mlp.fc1", # phi2 + "model.layers.{bid}.mlp.gate_up_proj", # phi3 + "model.layers.layers.{bid}.mlp.up_proj", # plamo + "model.layers.{bid}.feed_forward.w3", # internlm2 + "encoder.layers.{bid}.mlp.fc11", # nomic-bert + "model.layers.{bid}.mlp.c_fc", # starcoder2 + "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 + "model.layers.{bid}.residual_mlp.w3", # arctic + "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm + "transformer.h.{bid}.mlp.c_fc_1", # exaone + ), + + MODEL_TENSOR.FFN_UP_EXP: ( + "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx + "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_UP_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2 + ), + + # AWQ-activation gate + MODEL_TENSOR.FFN_ACT: ( + "transformer.blocks.{bid}.ffn.act", # mpt + ), + + # Feed-forward gate + MODEL_TENSOR.FFN_GATE: ( + "model.layers.{bid}.mlp.gate_proj", # llama-hf refact + "layers.{bid}.feed_forward.w1", # llama-pth + "transformer.h.{bid}.mlp.w2", # qwen + "transformer.h.{bid}.mlp.c_fc2", # jais + "model.layers.layers.{bid}.mlp.gate_proj", # plamo + "model.layers.{bid}.feed_forward.w1", # internlm2 + "encoder.layers.{bid}.mlp.fc12", # nomic-bert + "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 + "transformer.h.{bid}.mlp.linear_1", # refact + "model.layers.{bid}.residual_mlp.w1", # arctic + "transformer.h.{bid}.mlp.c_fc_0", # exaone + ), + + MODEL_TENSOR.FFN_GATE_EXP: ( + "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx + "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_GATE_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2 + ), + + # Feed-forward down + MODEL_TENSOR.FFN_DOWN: ( + "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox + "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais + "transformer.blocks.{bid}.ffn.down_proj", # mpt + "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon + "h.{bid}.mlp.dense_4h_to_h", # bloom + "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron + "layers.{bid}.feed_forward.w2", # llama-pth + "encoder.layer.{bid}.output.dense", # bert + "transformer.h.{bid}.mlp.fc_out", # gpt-j + "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon + "h.{bid}.mlp.c_proj", # gpt2 + "transformer.h.{bid}.mlp.fc2", # phi2 + "model.layers.{bid}.mlp.fc2", # phi2 + "model.layers.layers.{bid}.mlp.down_proj", # plamo + "model.layers.{bid}.feed_forward.w2", # internlm2 + "encoder.layers.{bid}.mlp.fc2", # nomic-bert + "model.layers.{bid}.mlp.c_proj", # starcoder2 + "encoder.layer.{bid}.mlp.wo", # jina-bert-v2 + "transformer.layers.{bid}.ffn.proj_2", # openelm + "model.layers.{bid}.residual_mlp.w2", # arctic + "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2 + "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm + "model.layers.h.{bid}.mlp.c_proj", # exaone + ), + + MODEL_TENSOR.FFN_DOWN_EXP: ( + "layers.{bid}.feed_forward.experts.w2", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx + "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged) + ), + + MODEL_TENSOR.FFN_DOWN_SHEXP: ( + "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe + "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.q_layernorm", + "model.layers.{bid}.self_attn.q_layernorm", # persimmon + "model.layers.{bid}.self_attn.q_norm", # cohere + "transformer.blocks.{bid}.attn.q_ln", # sea-lion + "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2 + "transformer.layers.{bid}.attn.q_norm", # openelm + ), + + MODEL_TENSOR.ATTN_K_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.k_layernorm", + "model.layers.{bid}.self_attn.k_layernorm", # persimmon + "model.layers.{bid}.self_attn.k_norm", # cohere + "transformer.blocks.{bid}.attn.k_ln", # sea-lion + "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2 + "transformer.layers.{bid}.attn.k_norm", # openelm + ), + + MODEL_TENSOR.ROPE_FREQS: ( + "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon + ), + + MODEL_TENSOR.LAYER_OUT_NORM: ( + "encoder.layer.{bid}.output.LayerNorm", # bert + "encoder.layers.{bid}.norm2", # nomic-bert + "transformer.decoder_layer.{bid}.rms_norm_3", # Grok + "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2 + "encoder.layer.{bid}.layer_norm_2" # jina-v2-code + ), + + MODEL_TENSOR.SSM_IN: ( + "model.layers.{bid}.in_proj", + "backbone.layers.{bid}.mixer.in_proj", + ), + + MODEL_TENSOR.SSM_CONV1D: ( + "model.layers.{bid}.conv1d", + "backbone.layers.{bid}.mixer.conv1d", + ), + + MODEL_TENSOR.SSM_X: ( + "model.layers.{bid}.x_proj", + "backbone.layers.{bid}.mixer.x_proj", + ), + + MODEL_TENSOR.SSM_DT: ( + "model.layers.{bid}.dt_proj", + "backbone.layers.{bid}.mixer.dt_proj", + ), + + MODEL_TENSOR.SSM_A: ( + "model.layers.{bid}.A_log", + "backbone.layers.{bid}.mixer.A_log", + ), + + MODEL_TENSOR.SSM_D: ( + "model.layers.{bid}.D", + "backbone.layers.{bid}.mixer.D", + ), + + MODEL_TENSOR.SSM_OUT: ( + "model.layers.{bid}.out_proj", + "backbone.layers.{bid}.mixer.out_proj", + ), + + MODEL_TENSOR.ATTN_Q_A: ( + "model.layers.{bid}.self_attn.q_a_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_B: ( + "model.layers.{bid}.self_attn.q_b_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_A_MQA: ( + "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_B: ( + "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2 + ), + + MODEL_TENSOR.ATTN_Q_A_NORM: ( + "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2 + ), + + MODEL_TENSOR.ATTN_KV_A_NORM: ( + "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2 + ), + + MODEL_TENSOR.ATTN_SUB_NORM: ( + "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet + ), + + MODEL_TENSOR.FFN_SUB_NORM: ( + "model.layers.{bid}.mlp.ffn_layernorm", # bitnet + ), + + MODEL_TENSOR.DEC_ATTN_NORM: ( + "decoder.block.{bid}.layer.0.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_Q: ( + "decoder.block.{bid}.layer.0.SelfAttention.q", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_K: ( + "decoder.block.{bid}.layer.0.SelfAttention.k", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_V: ( + "decoder.block.{bid}.layer.0.SelfAttention.v", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_OUT: ( + "decoder.block.{bid}.layer.0.SelfAttention.o", # t5 + ), + + MODEL_TENSOR.DEC_ATTN_REL_B: ( + "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_NORM: ( + "decoder.block.{bid}.layer.1.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_Q: ( + "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_K: ( + "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_V: ( + "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_OUT: ( + "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5 + ), + + MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: ( + "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.DEC_FFN_NORM: ( + "decoder.block.{bid}.layer.2.layer_norm", # t5 + ), + + MODEL_TENSOR.DEC_FFN_GATE: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5 + ), + + MODEL_TENSOR.DEC_FFN_UP: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5 + "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5 + ), + + MODEL_TENSOR.DEC_FFN_DOWN: ( + "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5 + ), + + MODEL_TENSOR.DEC_OUTPUT_NORM: ( + "decoder.final_layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_NORM: ( + "encoder.block.{bid}.layer.0.layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_Q: ( + "encoder.block.{bid}.layer.0.SelfAttention.q", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_K: ( + "encoder.block.{bid}.layer.0.SelfAttention.k", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_V: ( + "encoder.block.{bid}.layer.0.SelfAttention.v", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_OUT: ( + "encoder.block.{bid}.layer.0.SelfAttention.o", # t5 + ), + + MODEL_TENSOR.ENC_ATTN_REL_B: ( + "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5 + ), + + MODEL_TENSOR.ENC_FFN_NORM: ( + "encoder.block.{bid}.layer.1.layer_norm", # t5 + ), + + MODEL_TENSOR.ENC_FFN_GATE: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5 + ), + + MODEL_TENSOR.ENC_FFN_UP: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5 + "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5 + ), + + MODEL_TENSOR.ENC_FFN_DOWN: ( + "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5 + ), + + MODEL_TENSOR.ENC_OUTPUT_NORM: ( + "encoder.final_layer_norm", # t5 + ), + } + + # architecture-specific block mappings + arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = { + MODEL_ARCH.ARCTIC: { + MODEL_TENSOR.FFN_NORM: ( + "model.layers.{bid}.residual_layernorm", + ), + MODEL_TENSOR.FFN_NORM_EXP: ( + "model.layers.{bid}.post_attention_layernorm", + ), + }, + } + + mapping: dict[str, tuple[MODEL_TENSOR, str]] + + def __init__(self, arch: MODEL_ARCH, n_blocks: int): + self.mapping = {} + for tensor, keys in self.mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + tensor_name = TENSOR_NAMES[tensor] + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + self.mapping[key] = (tensor, tensor_name) + if arch in self.arch_block_mappings_cfg: + self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch]) + for bid in range(n_blocks): + for tensor, keys in self.block_mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + + tensor_name = TENSOR_NAMES[tensor].format(bid = bid) + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + key = key.format(bid = bid) + self.mapping[key] = (tensor, tensor_name) + + def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None: + result = self.mapping.get(key) + if result is not None: + return result + for suffix in try_suffixes: + if key.endswith(suffix): + result = self.mapping.get(key[:-len(suffix)]) + if result is not None: + return result[0], result[1] + suffix + return None + + def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[1] + + def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[0] + + def __getitem__(self, key: str) -> str: + try: + return self.mapping[key][1] + except KeyError: + raise KeyError(key) + + def __contains__(self, key: str) -> bool: + return key in self.mapping + + def __repr__(self) -> str: + return repr(self.mapping) + + +def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap: + return TensorNameMap(arch, n_blocks) diff --git a/.venv/lib/python3.11/site-packages/gguf/utility.py b/.venv/lib/python3.11/site-packages/gguf/utility.py new file mode 100644 index 0000000000000000000000000000000000000000..40d59b75ee04ec6b46d219ea3be0b3a8fb8b3f35 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/utility.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from typing import Literal + + +def fill_templated_filename(filename: str, output_type: str | None) -> str: + # Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf' + ftype_lowercase: str = output_type.lower() if output_type is not None else "" + ftype_uppercase: str = output_type.upper() if output_type is not None else "" + return filename.format(ftype_lowercase, + outtype=ftype_lowercase, ftype=ftype_lowercase, + OUTTYPE=ftype_uppercase, FTYPE=ftype_uppercase) + + +def model_weight_count_rounded_notation(model_params_count: int, min_digits: int = 2) -> str: + if model_params_count > 1e12 : + # Trillions Of Parameters + scaled_model_params = model_params_count * 1e-12 + scale_suffix = "T" + elif model_params_count > 1e9 : + # Billions Of Parameters + scaled_model_params = model_params_count * 1e-9 + scale_suffix = "B" + elif model_params_count > 1e6 : + # Millions Of Parameters + scaled_model_params = model_params_count * 1e-6 + scale_suffix = "M" + else: + # Thousands Of Parameters + scaled_model_params = model_params_count * 1e-3 + scale_suffix = "K" + + fix = max(min_digits - len(str(round(scaled_model_params)).lstrip('0')), 0) + + return f"{scaled_model_params:.{fix}f}{scale_suffix}" + + +def size_label(total_params: int, shared_params: int, expert_params: int, expert_count: int) -> str: + + if expert_count > 0: + pretty_size = model_weight_count_rounded_notation(abs(shared_params) + abs(expert_params), min_digits=2) + size_class = f"{expert_count}x{pretty_size}" + else: + size_class = model_weight_count_rounded_notation(abs(total_params), min_digits=2) + + return size_class + + +def naming_convention(model_name: str | None, base_name: str | None, finetune_string: str | None, version_string: str | None, size_label: str | None, output_type: str | None, model_type: Literal['vocab', 'LoRA'] | None = None) -> str: + # Reference: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#gguf-naming-convention + + if base_name is not None: + name = base_name.strip().replace(' ', '-').replace('/', '-') + elif model_name is not None: + name = model_name.strip().replace(' ', '-').replace('/', '-') + else: + name = "ggml-model" + + parameters = f"-{size_label}" if size_label is not None else "" + + finetune = f"-{finetune_string.strip().replace(' ', '-')}" if finetune_string is not None else "" + + version = f"-{version_string.strip().replace(' ', '-')}" if version_string is not None else "" + + encoding = f"-{output_type.strip().replace(' ', '-').upper()}" if output_type is not None else "" + + kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else "" + + return f"{name}{parameters}{finetune}{version}{encoding}{kind}" diff --git a/.venv/lib/python3.11/site-packages/gguf/vocab.py b/.venv/lib/python3.11/site-packages/gguf/vocab.py new file mode 100644 index 0000000000000000000000000000000000000000..dc574991381a8a9558611cb729c4fb763dd4c444 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/gguf/vocab.py @@ -0,0 +1,465 @@ +from __future__ import annotations + +import re +import logging +import json +import os +from pathlib import Path +from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable + +from sentencepiece import SentencePieceProcessor + +import gguf + +from .gguf_writer import GGUFWriter + +logger = logging.getLogger(__name__) + + +class SpecialVocab: + merges: list[str] + add_special_token: dict[str, bool] + special_token_ids: dict[str, int] + chat_template: str | Sequence[Mapping[str, str]] | None + + def __init__( + self, path: str | os.PathLike[str], load_merges: bool = False, + special_token_types: Iterable[str] | None = None, + n_vocab: int | None = None, + ): + self.special_token_ids = {} + self.add_special_token = {} + self.n_vocab = n_vocab + self.load_merges = load_merges + self.merges = [] + self.chat_template = None + if special_token_types is not None: + self.special_token_types = special_token_types + else: + self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad', 'cls', 'mask') + self._load(Path(path)) + + def __repr__(self) -> str: + return ''.format( + len(self.merges), self.special_token_ids or "unset", self.add_special_token or "unset", + ) + + def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None: + if self.merges: + if not quiet: + logger.info(f'Adding {len(self.merges)} merge(s).') + gw.add_token_merges(self.merges) + elif self.load_merges: + logger.warning('Adding merges requested but no merges found, output may be non-functional.') + for typ, tokid in self.special_token_ids.items(): + id_handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None) + if id_handler is None: + logger.warning(f'No handler for special token type {typ} with id {tokid} - skipping') + continue + if not quiet: + logger.info(f'Setting special token type {typ} to {tokid}') + id_handler(tokid) + for typ, value in self.add_special_token.items(): + add_handler: Callable[[bool], None] | None = getattr(gw, f'add_add_{typ}_token', None) + if add_handler is None: + logger.warning(f'No handler for add_{typ}_token with value {value} - skipping') + continue + if not quiet: + logger.info(f'Setting add_{typ}_token to {value}') + add_handler(value) + if self.chat_template is not None: + if not quiet: + logger.info(f'Setting chat_template to {self.chat_template}') + gw.add_chat_template(self.chat_template) + + def _load(self, path: Path) -> None: + self._try_load_from_tokenizer_json(path) + self._try_load_from_config_json(path) + if self.load_merges and not self.merges: + self._try_load_merges_txt(path) + + def _try_load_merges_txt(self, path: Path) -> bool: + merges_file = path / 'merges.txt' + if not merges_file.is_file(): + return False + with open(merges_file, 'r', encoding = 'utf-8') as fp: + first_line = next(fp, '').strip() + if not first_line.startswith('#'): + fp.seek(0) + line_num = 0 + else: + line_num = 1 + merges = [] + for line in fp: + line_num += 1 + line = line.strip() + if not line: + continue + parts = line.split(None, 3) + if len(parts) != 2: + logger.warning(f'{merges_file.name}: Line {line_num}: Entry malformed, ignoring') + continue + merges.append(f'{parts[0]} {parts[1]}') + self.merges = merges + return True + + def _set_special_token(self, typ: str, tid: Any) -> None: + if not isinstance(tid, int): + return + if tid < 0: + raise ValueError(f'invalid value for special token type {typ}: {tid}') + if self.n_vocab is None or tid < self.n_vocab: + if typ in self.special_token_ids: + return + self.special_token_ids[typ] = tid + return + logger.warning(f'Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping') + + def _try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer_file = path / 'tokenizer.json' + if tokenizer_file.is_file(): + with open(tokenizer_file, encoding = 'utf-8') as f: + tokenizer = json.load(f) + if self.load_merges: + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and merges and isinstance(merges[0], str): + self.merges = merges + added_tokens = tokenizer.get('added_tokens', {}) + else: + added_tokens = {} + tokenizer_config_file = path / 'tokenizer_config.json' + if not tokenizer_config_file.is_file(): + return True + with open(tokenizer_config_file, encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + chat_template = tokenizer_config.get('chat_template') + if chat_template is None or isinstance(chat_template, (str, list)): + self.chat_template = chat_template + else: + logger.warning(f'Bad type for chat_template field in {tokenizer_config_file!r} - ignoring') + for typ in self.special_token_types: + add_entry = tokenizer_config.get(f'add_{typ}_token') + if isinstance(add_entry, bool): + self.add_special_token[typ] = add_entry + entry = tokenizer_config.get(f'{typ}_token') + if isinstance(entry, str): + tc_content = entry + elif isinstance(entry, dict): + entry_content = entry.get('content') + if not isinstance(entry_content, str): + continue + tc_content = entry_content + else: + continue + # We only need the first match here. + maybe_token_id = next( + (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content), + None, + ) + self._set_special_token(typ, maybe_token_id) + return True + + def _try_load_from_config_json(self, path: Path) -> bool: + config_file = path / 'config.json' + if not config_file.is_file(): + return False + with open(config_file, encoding = 'utf-8') as f: + config = json.load(f) + for typ in self.special_token_types: + self._set_special_token(typ, config.get(f'{typ}_token_id')) + return True + + +@runtime_checkable +class BaseVocab(Protocol): + tokenizer_model: ClassVar[str] + name: ClassVar[str] + + +@runtime_checkable +class Vocab(BaseVocab, Protocol): + vocab_size: int + added_tokens_dict: dict[str, int] + added_tokens_list: list[str] + fname_tokenizer: Path + + def __init__(self, base_path: Path): ... + def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ... + + +class NoVocab(BaseVocab): + tokenizer_model = "no_vocab" + name = "no_vocab" + + def __repr__(self) -> str: + return "" + + +class BpeVocab(Vocab): + tokenizer_model = "gpt2" + name = "bpe" + + def __init__(self, base_path: Path): + added_tokens: dict[str, int] = {} + + if (fname_tokenizer := base_path / 'vocab.json').exists(): + # "slow" tokenizer + with open(fname_tokenizer, encoding="utf-8") as f: + self.vocab = json.load(f) + + try: + # FIXME: Verify that added tokens here _cannot_ overlap with the main vocab. + with open(base_path / 'added_tokens.json', encoding="utf-8") as f: + added_tokens = json.load(f) + except FileNotFoundError: + pass + else: + # "fast" tokenizer + fname_tokenizer = base_path / 'tokenizer.json' + + # if this fails, FileNotFoundError propagates to caller + with open(fname_tokenizer, encoding="utf-8") as f: + tokenizer_json = json.load(f) + + tokenizer_model: dict[str, Any] = tokenizer_json['model'] + if ( + tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False) + or tokenizer_json['decoder']['type'] != 'ByteLevel' + ): + raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer') + + self.vocab = tokenizer_model["vocab"] + + if (added := tokenizer_json.get('added_tokens')) is not None: + # Added tokens here can be duplicates of the main vocabulary. + added_tokens = {item['content']: item['id'] + for item in added + if item['content'] not in self.vocab} + + vocab_size = len(self.vocab) + expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) + actual_ids = sorted(added_tokens.values()) + if expected_ids != actual_ids: + expected_end_id = vocab_size + len(actual_ids) - 1 + raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range " + f"{vocab_size} - {expected_end_id}; got {actual_ids}") + + items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) + self.added_tokens_dict = added_tokens + self.added_tokens_list = [text for (text, idx) in items] + self.vocab_size_base = vocab_size + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer + + def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()} + + for i, _ in enumerate(self.vocab): + yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL + + def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + for text in self.added_tokens_list: + score = -1000.0 + yield text.encode("utf-8"), score, gguf.TokenType.CONTROL + + def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + yield from self.bpe_tokens() + yield from self.added_tokens() + + def __repr__(self) -> str: + return f"" + + +class SentencePieceVocab(Vocab): + tokenizer_model = "llama" + name = "spm" + + def __init__(self, base_path: Path): + added_tokens: dict[str, int] = {} + if (fname_tokenizer := base_path / 'tokenizer.model').exists(): + # normal location + try: + with open(base_path / 'added_tokens.json', encoding="utf-8") as f: + added_tokens = json.load(f) + except FileNotFoundError: + pass + elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists(): + # not found in alternate location either + raise FileNotFoundError('Cannot find tokenizer.model') + + self.sentencepiece_tokenizer = SentencePieceProcessor() + self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer)) + vocab_size = self.sentencepiece_tokenizer.vocab_size() + + new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} + expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens))) + actual_new_ids = sorted(new_tokens.keys()) + + if expected_new_ids != actual_new_ids: + raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}") + + # Token pieces that were added to the base vocabulary. + self.added_tokens_dict = added_tokens + self.added_tokens_list = [new_tokens[id] for id in actual_new_ids] + self.vocab_size_base = vocab_size + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer + + def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + tokenizer = self.sentencepiece_tokenizer + for i in range(tokenizer.vocab_size()): + piece = tokenizer.IdToPiece(i) + text = piece.encode("utf-8") + score: float = tokenizer.GetScore(i) + + toktype = gguf.TokenType.NORMAL + if tokenizer.IsUnknown(i): + toktype = gguf.TokenType.UNKNOWN + if tokenizer.IsControl(i): + toktype = gguf.TokenType.CONTROL + + # NOTE: I think added_tokens are user defined. + # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto + # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED + + if tokenizer.IsUnused(i): + toktype = gguf.TokenType.UNUSED + if tokenizer.IsByte(i): + toktype = gguf.TokenType.BYTE + + yield text, score, toktype + + def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + for text in self.added_tokens_list: + score = -1000.0 + yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED + + def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + yield from self.sentencepiece_tokens() + yield from self.added_tokens() + + def __repr__(self) -> str: + return f"" + + +class LlamaHfVocab(Vocab): + tokenizer_model = "llama" + name = "hfft" + + def __init__(self, base_path: Path): + fname_tokenizer = base_path / 'tokenizer.json' + # if this fails, FileNotFoundError propagates to caller + with open(fname_tokenizer, encoding='utf-8') as f: + tokenizer_json = json.load(f) + + # pre-check so we know if we need transformers + tokenizer_model: dict[str, Any] = tokenizer_json['model'] + is_llama3 = ( + tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False) + and not tokenizer_model.get('byte_fallback', True) + ) + if is_llama3: + raise TypeError('Llama 3 must be converted with BpeVocab') + + if not is_llama3 and ( + tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False) + or tokenizer_json['decoder']['type'] != 'Sequence' + ): + raise FileNotFoundError('Cannot find Llama BPE tokenizer') + + try: + from transformers import AutoTokenizer + except ImportError as e: + raise ImportError( + "To use LlamaHfVocab, please install the `transformers` package. " + "You can install it with `pip install transformers`." + ) from e + + # Allow the tokenizer to default to slow or fast versions. + # Explicitly set tokenizer to use local paths. + self.tokenizer = AutoTokenizer.from_pretrained( + base_path, + cache_dir=base_path, + local_files_only=True, + ) + assert self.tokenizer.is_fast # assume tokenizer.json is used + + # Initialize lists and dictionaries for added tokens + self.added_tokens_list = [] + self.added_tokens_dict = dict() + self.added_tokens_ids = set() + + # Process added tokens + for tok, tokidx in sorted( + self.tokenizer.get_added_vocab().items(), key=lambda x: x[1] + ): + # Only consider added tokens that are not in the base vocabulary + if tokidx >= self.tokenizer.vocab_size: + self.added_tokens_list.append(tok) + self.added_tokens_dict[tok] = tokidx + self.added_tokens_ids.add(tokidx) + + # Store special tokens and their IDs + self.specials = { + tok: self.tokenizer.get_vocab()[tok] + for tok in self.tokenizer.all_special_tokens + } + self.special_ids = set(self.tokenizer.all_special_ids) + + # Set vocabulary sizes + self.vocab_size_base = self.tokenizer.vocab_size + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + + self.fname_tokenizer = fname_tokenizer + + def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + reverse_vocab = { + id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items() + } + + for token_id in range(self.vocab_size_base): + # Skip processing added tokens here + if token_id in self.added_tokens_ids: + continue + + # Convert token text to bytes + token_text = reverse_vocab[token_id].encode("utf-8") + + # Yield token text, score, and type + yield token_text, self.get_token_score(token_id), self.get_token_type( + token_id, token_text, self.special_ids # Reuse already stored special IDs + ) + + def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType: + # Special case for byte tokens + if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text): + return gguf.TokenType.BYTE + + # Determine token type based on whether it's a special token + return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL + + def get_token_score(self, token_id: int) -> float: + # Placeholder for actual logic to determine the token's score + # This needs to be implemented based on specific requirements + return -1000.0 # Default score + + def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + for text in self.added_tokens_list: + if text in self.specials: + toktype = self.get_token_type(self.specials[text], b'', self.special_ids) + score = self.get_token_score(self.specials[text]) + else: + toktype = gguf.TokenType.USER_DEFINED + score = -1000.0 + + yield text.encode("utf-8"), score, toktype + + def has_newline_token(self): + return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab + + def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: + yield from self.hf_tokens() + yield from self.added_tokens() + + def __repr__(self) -> str: + return f"" diff --git a/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/License.txt b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/RECORD b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e2c2d1871fd8cd39bcc291c16cccc34064bcd620 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/RECORD @@ -0,0 +1,20 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-311.pyc,, +nvidia/cufft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/__pycache__/__init__.cpython-311.pyc,, +nvidia/cufft/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/include/__pycache__/__init__.cpython-311.pyc,, +nvidia/cufft/include/cudalibxt.h,sha256=9GDuRiOzJuO61zRDhIpWpF7XHp8FXSOIlHJNoIMwOZQ,4105 +nvidia/cufft/include/cufft.h,sha256=OPTrbN3YvHR2HZTy4Kr_azbFUz8ZGXAkmT_1ero1y3I,13109 +nvidia/cufft/include/cufftXt.h,sha256=bTMo9ixYPn-FnrCw2VYZ2XVwDYT7N8WrRdXp4CmBilY,11148 +nvidia/cufft/include/cufftw.h,sha256=Uzfj1IVMlLQU_G50u84hXYX1K95HLXIwOcjQoAg5pGE,20051 +nvidia/cufft/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/lib/__pycache__/__init__.cpython-311.pyc,, +nvidia/cufft/lib/libcufft.so.11,sha256=85IcQTOSUkJFnr_b95AdtOv65rvP_53FzlOx_xP7Qv8,292889192 +nvidia/cufft/lib/libcufftw.so.11,sha256=IwelrPzMm0D5iThAOCGM_q1WTNQ2M3AdMMiTBH50T0Q,974888 +nvidia_cufft_cu12-11.2.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cufft_cu12-11.2.1.3.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cufft_cu12-11.2.1.3.dist-info/METADATA,sha256=e3c8JR1hTBAIlY96lfSibInmFGkkwNcYO6CExGuXQ6w,1502 +nvidia_cufft_cu12-11.2.1.3.dist-info/RECORD,, +nvidia_cufft_cu12-11.2.1.3.dist-info/WHEEL,sha256=XDTs3wIbcE-BcRO08VJlZpA6z9OaC1mOKPCGGGwuM2g,109 +nvidia_cufft_cu12-11.2.1.3.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e6c30e957cfb045017a9fef3430bb8ee87c4a074 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/nvidia_cufft_cu12-11.2.1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/METADATA b/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..6f4fe344ee33602c5e623b798082a7ff7c4fb20c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/METADATA @@ -0,0 +1,851 @@ +Metadata-Version: 2.4 +Name: openai +Version: 1.61.1 +Summary: The official Python library for the openai API +Project-URL: Homepage, https://github.com/openai/openai-python +Project-URL: Repository, https://github.com/openai/openai-python +Author-email: OpenAI +License-Expression: Apache-2.0 +License-File: LICENSE +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: OS Independent +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Requires-Dist: anyio<5,>=3.5.0 +Requires-Dist: distro<2,>=1.7.0 +Requires-Dist: httpx<1,>=0.23.0 +Requires-Dist: jiter<1,>=0.4.0 +Requires-Dist: pydantic<3,>=1.9.0 +Requires-Dist: sniffio +Requires-Dist: tqdm>4 +Requires-Dist: typing-extensions<5,>=4.11 +Provides-Extra: datalib +Requires-Dist: numpy>=1; extra == 'datalib' +Requires-Dist: pandas-stubs>=1.1.0.11; extra == 'datalib' +Requires-Dist: pandas>=1.2.3; extra == 'datalib' +Provides-Extra: realtime +Requires-Dist: websockets<15,>=13; extra == 'realtime' +Description-Content-Type: text/markdown + +# OpenAI Python API library + +[![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) + +The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ +application. The library includes type definitions for all request params and response fields, +and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). + +It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/). + +## Documentation + +The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](https://github.com/openai/openai-python/tree/main/api.md). + +## Installation + +> [!IMPORTANT] +> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code. + +```sh +# install from PyPI +pip install openai +``` + +## Usage + +The full API of this library can be found in [api.md](https://github.com/openai/openai-python/tree/main/api.md). + +```python +import os +from openai import OpenAI + +client = OpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted +) + +chat_completion = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", +) +``` + +While you can provide an `api_key` keyword argument, +we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) +to add `OPENAI_API_KEY="My API Key"` to your `.env` file +so that your API Key is not stored in source control. + +### Vision + +With a hosted image: + +```python +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": {"url": f"{img_url}"}, + }, + ], + } + ], +) +``` + +With the image as a base64 encoded string: + +```python +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": {"url": f"data:{img_type};base64,{img_b64_str}"}, + }, + ], + } + ], +) +``` + +### Polling Helpers + +When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes +helper functions which will poll the status until it reaches a terminal state and then return the resulting object. +If an API method results in an action that could benefit from polling there will be a corresponding version of the +method ending in '\_and_poll'. + +For instance to create a Run and poll until it reaches a terminal state you can run: + +```python +run = client.beta.threads.runs.create_and_poll( + thread_id=thread.id, + assistant_id=assistant.id, +) +``` + +More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle) + +### Bulk Upload Helpers + +When creating and interacting with vector stores, you can use polling helpers to monitor the status of operations. +For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. + +```python +sample_files = [Path("sample-paper.pdf"), ...] + +batch = await client.vector_stores.file_batches.upload_and_poll( + store.id, + files=sample_files, +) +``` + +### Streaming Helpers + +The SDK also includes helpers to process streams and handle incoming events. + +```python +with client.beta.threads.runs.stream( + thread_id=thread.id, + assistant_id=assistant.id, + instructions="Please address the user as Jane Doe. The user has a premium account.", +) as stream: + for event in stream: + # Print the text from text delta events + if event.type == "thread.message.delta" and event.data.delta.content: + print(event.data.delta.content[0].text) +``` + +More information on streaming helpers can be found in the dedicated documentation: [helpers.md](https://github.com/openai/openai-python/tree/main/helpers.md) + +## Async usage + +Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call: + +```python +import os +import asyncio +from openai import AsyncOpenAI + +client = AsyncOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted +) + + +async def main() -> None: + chat_completion = await client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ) + + +asyncio.run(main()) +``` + +Functionality between the synchronous and asynchronous clients is otherwise identical. + +## Streaming responses + +We provide support for streaming responses using Server Side Events (SSE). + +```python +from openai import OpenAI + +client = OpenAI() + +stream = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + stream=True, +) +for chunk in stream: + print(chunk.choices[0].delta.content or "", end="") +``` + +The async client uses the exact same interface. + +```python +import asyncio +from openai import AsyncOpenAI + +client = AsyncOpenAI() + + +async def main(): + stream = await client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Say this is a test"}], + stream=True, + ) + async for chunk in stream: + print(chunk.choices[0].delta.content or "", end="") + + +asyncio.run(main()) +``` + +## Module-level client + +> [!IMPORTANT] +> We highly recommend instantiating client instances instead of relying on the global client. + +We also expose a global client instance that is accessible in a similar fashion to versions prior to v1. + +```py +import openai + +# optional; defaults to `os.environ['OPENAI_API_KEY']` +openai.api_key = '...' + +# all client options can be configured just like the `OpenAI` instantiation counterpart +openai.base_url = "https://..." +openai.default_headers = {"x-foo": "true"} + +completion = openai.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + }, + ], +) +print(completion.choices[0].message.content) +``` + +The API is the exact same as the standard client instance-based API. + +This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code. + +We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because: + +- It can be difficult to reason about where client options are configured +- It's not possible to change certain client options without potentially causing race conditions +- It's harder to mock for testing purposes +- It's not possible to control cleanup of network connections + +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a WebSocket connection. + +Under the hood the SDK uses the [`websockets`](https://websockets.readthedocs.io/en/stable/) library to manage connections. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +Basic text based example: + +```py +import asyncio +from openai import AsyncOpenAI + +async def main(): + client = AsyncOpenAI() + + async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: + await connection.session.update(session={'modalities': ['text']}) + + await connection.conversation.item.create( + item={ + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "Say hello!"}], + } + ) + await connection.response.create() + + async for event in connection: + if event.type == 'response.text.delta': + print(event.delta, flush=True, end="") + + elif event.type == 'response.text.done': + print() + + elif event.type == "response.done": + break + +asyncio.run(main()) +``` + +However the real magic of the Realtime API is handling audio inputs / outputs, see this example [TUI script](https://github.com/openai/openai-python/blob/main/examples/realtime/push_to_talk_app.py) for a fully fledged example. + +### Realtime error handling + +Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling) and the connection will stay open and remain usable. This means you need to handle it yourself, as *no errors are raised directly* by the SDK when an `error` event comes in. + +```py +client = AsyncOpenAI() + +async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: + ... + async for event in connection: + if event.type == 'error': + print(event.error.type) + print(event.error.code) + print(event.error.event_id) + print(event.error.message) +``` + +## Using types + +Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: + +- Serializing back into JSON, `model.to_json()` +- Converting to a dictionary, `model.to_dict()` + +Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. + +## Pagination + +List methods in the OpenAI API are paginated. + +This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually: + +```python +from openai import OpenAI + +client = OpenAI() + +all_jobs = [] +# Automatically fetches more pages as needed. +for job in client.fine_tuning.jobs.list( + limit=20, +): + # Do something with job here + all_jobs.append(job) +print(all_jobs) +``` + +Or, asynchronously: + +```python +import asyncio +from openai import AsyncOpenAI + +client = AsyncOpenAI() + + +async def main() -> None: + all_jobs = [] + # Iterate through items across all pages, issuing requests as needed. + async for job in client.fine_tuning.jobs.list( + limit=20, + ): + all_jobs.append(job) + print(all_jobs) + + +asyncio.run(main()) +``` + +Alternatively, you can use the `.has_next_page()`, `.next_page_info()`, or `.get_next_page()` methods for more granular control working with pages: + +```python +first_page = await client.fine_tuning.jobs.list( + limit=20, +) +if first_page.has_next_page(): + print(f"will fetch next page using these details: {first_page.next_page_info()}") + next_page = await first_page.get_next_page() + print(f"number of items we just fetched: {len(next_page.data)}") + +# Remove `await` for non-async usage. +``` + +Or just work directly with the returned data: + +```python +first_page = await client.fine_tuning.jobs.list( + limit=20, +) + +print(f"next page cursor: {first_page.after}") # => "next page cursor: ..." +for job in first_page.data: + print(job.id) + +# Remove `await` for non-async usage. +``` + +## Nested params + +Nested parameters are dictionaries, typed using `TypedDict`, for example: + +```python +from openai import OpenAI + +client = OpenAI() + +completion = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Can you generate an example json object describing a fruit?", + } + ], + model="gpt-4o", + response_format={"type": "json_object"}, +) +``` + +## File uploads + +Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. + +```python +from pathlib import Path +from openai import OpenAI + +client = OpenAI() + +client.files.create( + file=Path("input.jsonl"), + purpose="fine-tune", +) +``` + +The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. + +## Handling errors + +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised. + +When the API returns a non-success status code (that is, 4xx or 5xx +response), a subclass of `openai.APIStatusError` is raised, containing `status_code` and `response` properties. + +All errors inherit from `openai.APIError`. + +```python +import openai +from openai import OpenAI + +client = OpenAI() + +try: + client.fine_tuning.jobs.create( + model="gpt-4o", + training_file="file-abc123", + ) +except openai.APIConnectionError as e: + print("The server could not be reached") + print(e.__cause__) # an underlying Exception, likely raised within httpx. +except openai.RateLimitError as e: + print("A 429 status code was received; we should back off a bit.") +except openai.APIStatusError as e: + print("Another non-200-range status code was received") + print(e.status_code) + print(e.response) +``` + +Error codes are as follows: + +| Status Code | Error Type | +| ----------- | -------------------------- | +| 400 | `BadRequestError` | +| 401 | `AuthenticationError` | +| 403 | `PermissionDeniedError` | +| 404 | `NotFoundError` | +| 422 | `UnprocessableEntityError` | +| 429 | `RateLimitError` | +| >=500 | `InternalServerError` | +| N/A | `APIConnectionError` | + +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```python +completion = await client.chat.completions.create( + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" +) +print(completion._request_id) # req_123 +``` + +Note that unlike other properties that use an `_` prefix, the `_request_id` property +*is* public. Unless documented otherwise, *all* other `_` prefix properties, +methods and modules are *private*. + +> [!IMPORTANT] +> If you need to access request IDs for failed requests you must catch the `APIStatusError` exception + +```python +import openai + +try: + completion = await client.chat.completions.create( + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" + ) +except openai.APIStatusError as exc: + print(exc.request_id) # req_123 + raise exc +``` + + +### Retries + +Certain errors are automatically retried 2 times by default, with a short exponential backoff. +Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, +429 Rate Limit, and >=500 Internal errors are all retried by default. + +You can use the `max_retries` option to configure or disable retry settings: + +```python +from openai import OpenAI + +# Configure the default for all requests: +client = OpenAI( + # default is 2 + max_retries=0, +) + +# Or, configure per-request: +client.with_options(max_retries=5).chat.completions.create( + messages=[ + { + "role": "user", + "content": "How can I get the name of the current day in JavaScript?", + } + ], + model="gpt-4o", +) +``` + +### Timeouts + +By default requests time out after 10 minutes. You can configure this with a `timeout` option, +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: + +```python +from openai import OpenAI + +# Configure the default for all requests: +client = OpenAI( + # 20 seconds (default is 10 minutes) + timeout=20.0, +) + +# More granular control: +client = OpenAI( + timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), +) + +# Override per-request: +client.with_options(timeout=5.0).chat.completions.create( + messages=[ + { + "role": "user", + "content": "How can I list all files in a directory using Python?", + } + ], + model="gpt-4o", +) +``` + +On timeout, an `APITimeoutError` is thrown. + +Note that requests that time out are [retried twice by default](https://github.com/openai/openai-python/tree/main/#retries). + +## Advanced + +### Logging + +We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. + +You can enable logging by setting the environment variable `OPENAI_LOG` to `info`. + +```shell +$ export OPENAI_LOG=info +``` + +Or to `debug` for more verbose logging. + +### How to tell whether `None` means `null` or missing + +In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: + +```py +if response.my_field is None: + if 'my_field' not in response.model_fields_set: + print('Got json like {}, without a "my_field" key present at all.') + else: + print('Got json like {"my_field": null}.') +``` + +### Accessing raw response data (e.g. headers) + +The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., + +```py +from openai import OpenAI + +client = OpenAI() +response = client.chat.completions.with_raw_response.create( + messages=[{ + "role": "user", + "content": "Say this is a test", + }], + model="gpt-4o", +) +print(response.headers.get('X-My-Header')) + +completion = response.parse() # get the object that `chat.completions.create()` would have returned +print(completion) +``` + +These methods return a [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. + +For the sync client this will mostly be the same with the exception +of `content` & `text` will be methods instead of properties. In the +async client, all methods will be async. + +A migration script will be provided & the migration in general should +be smooth. + +#### `.with_streaming_response` + +The above interface eagerly reads the full response body when you make the request, which may not always be what you want. + +To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. + +As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object. + +```python +with client.chat.completions.with_streaming_response.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", +) as response: + print(response.headers.get("X-My-Header")) + + for line in response.iter_lines(): + print(line) +``` + +The context manager is required so that the response will reliably be closed. + +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. + +If you need to access undocumented endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other +http verbs. Options on the client will be respected (such as retries) when making this request. + +```py +import httpx + +response = client.post( + "/foo", + cast_to=httpx.Response, + body={"my_param": True}, +) + +print(response.headers.get("x-foo")) +``` + +#### Undocumented request params + +If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` request +options. + +#### Undocumented response properties + +To access undocumented response properties, you can access the extra fields like `response.unknown_prop`. You +can also get all the extra fields on the Pydantic model as a dict with +[`response.model_extra`](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel.model_extra). + +### Configuring the HTTP client + +You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: + +- Support for [proxies](https://www.python-httpx.org/advanced/proxies/) +- Custom [transports](https://www.python-httpx.org/advanced/transports/) +- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality + +```python +import httpx +from openai import OpenAI, DefaultHttpxClient + +client = OpenAI( + # Or use the `OPENAI_BASE_URL` env var + base_url="http://my.test.server.example.com:8083/v1", + http_client=DefaultHttpxClient( + proxy="http://my.test.proxy.example.com", + transport=httpx.HTTPTransport(local_address="0.0.0.0"), + ), +) +``` + +You can also customize the client on a per-request basis by using `with_options()`: + +```python +client.with_options(http_client=DefaultHttpxClient(...)) +``` + +### Managing HTTP resources + +By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. + +```py +from openai import OpenAI + +with OpenAI() as client: + # make requests here + ... + +# HTTP client is now closed +``` + +## Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```py +from openai import AzureOpenAI + +# gets the API Key from environment variable AZURE_OPENAI_API_KEY +client = AzureOpenAI( + # https://learn.microsoft.com/azure/ai-services/openai/reference#rest-api-versioning + api_version="2023-07-01-preview", + # https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource + azure_endpoint="https://example-endpoint.openai.azure.com", +) + +completion = client.chat.completions.create( + model="deployment-name", # e.g. gpt-35-instant + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + }, + ], +) +print(completion.to_json()) +``` + +In addition to the options provided in the base `OpenAI` client, the following options are provided: + +- `azure_endpoint` (or the `AZURE_OPENAI_ENDPOINT` environment variable) +- `azure_deployment` +- `api_version` (or the `OPENAI_API_VERSION` environment variable) +- `azure_ad_token` (or the `AZURE_OPENAI_AD_TOKEN` environment variable) +- `azure_ad_token_provider` + +An example of using the client with Microsoft Entra ID (formerly known as Azure Active Directory) can be found [here](https://github.com/openai/openai-python/blob/main/examples/azure_ad.py). + +## Versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes that only affect static types, without breaking runtime behavior. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ +3. Changes that we do not expect to impact the vast majority of users in practice. + +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. + +We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions. + +### Determining the installed version + +If you've upgraded to the latest version but aren't seeing any new features you were expecting then your python environment is likely still using an older version. + +You can determine the version that is being used at runtime with: + +```py +import openai +print(openai.__version__) +``` + +## Requirements + +Python 3.8 or higher. + +## Contributing + +See [the contributing documentation](https://github.com/openai/openai-python/tree/main/./CONTRIBUTING.md). diff --git a/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/licenses/LICENSE b/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f011417af6f72ab5e00e7f48931028fa202ca7cb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai-1.61.1.dist-info/licenses/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 OpenAI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/smart_open/__init__.py b/.venv/lib/python3.11/site-packages/smart_open/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75ec8e8ec57268eb55f6c870add25de54d78e983 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/__init__.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +""" +Utilities for streaming to/from several file-like data storages: S3 / HDFS / local +filesystem / compressed files, and many more, using a simple, Pythonic API. + +The streaming makes heavy use of generators and pipes, to avoid loading +full file contents into memory, allowing work with arbitrarily large files. + +The main functions are: + +* `open()`, which opens the given file for reading/writing +* `parse_uri()` +* `s3_iter_bucket()`, which goes over all keys in an S3 bucket in parallel +* `register_compressor()`, which registers callbacks for transparent compressor handling + +""" + +import logging + +# +# Prevent regression of #474 and #475 +# +logger = logging.getLogger(__name__) +logger.addHandler(logging.NullHandler()) + +from smart_open import version # noqa: E402 +from .smart_open_lib import open, parse_uri, smart_open, register_compressor # noqa: E402 + +_WARNING = """smart_open.s3_iter_bucket is deprecated and will stop functioning +in a future version. Please import iter_bucket from the smart_open.s3 module instead: + + from smart_open.s3 import iter_bucket as s3_iter_bucket + +""" +_WARNED = False + + +def s3_iter_bucket( + bucket_name, + prefix='', + accept_key=None, + key_limit=None, + workers=16, + retries=3, + **session_kwargs +): + """Deprecated. Use smart_open.s3.iter_bucket instead.""" + global _WARNED + from .s3 import iter_bucket + if not _WARNED: + logger.warning(_WARNING) + _WARNED = True + return iter_bucket( + bucket_name=bucket_name, + prefix=prefix, + accept_key=accept_key, + key_limit=key_limit, + workers=workers, + retries=retries, + session_kwargs=session_kwargs + ) + + +__all__ = [ + 'open', + 'parse_uri', + 'register_compressor', + 's3_iter_bucket', + 'smart_open', +] + +__version__ = version.__version__ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78dfdf80d29ed3109b6bad981eee61314314e992 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/azure.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/azure.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d871e3108bb9a84190a635951f3ccc6f159a83e7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/azure.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/bytebuffer.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/bytebuffer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ff59e805df5b5b4bfa53d881563ca77b0265f5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/bytebuffer.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/compression.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/compression.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e071a903d00d42e130e8ea8650819cb725e5ced Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/compression.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/concurrency.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/concurrency.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fba51c8d89d57cd56e90afd93d679caa0d65414 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/concurrency.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/constants.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/constants.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06696445ba9d8032eeb4a100a664419a8d06e47d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/constants.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/doctools.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/doctools.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d55a74b479ac5cd7e5b7729c82c514987e4d20e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/doctools.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ftp.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ftp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0a2326b1dafadebb2dd7555b8e10545ce69b2d4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ftp.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/gcs.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/gcs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3bc38144e5ef03433194eafd543eb4890d453c9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/gcs.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/hdfs.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/hdfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a16ccda99f87220ddb83f9eb8ee32502c8e1b5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/hdfs.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/http.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/http.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58257ed4428d46e64bdea9cf2deea127688f9664 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/http.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/local_file.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/local_file.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97cdcc292c967ae0fc7694281cc72e1ebfbd0110 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/local_file.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/s3.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/s3.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8702de8faa9967b56dabd9f2e9bbdee56f3d75c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/s3.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/smart_open_lib.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/smart_open_lib.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f840984521d31cdc936c83145b5098309d1cb4cd Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/smart_open_lib.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ssh.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ssh.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc7a942d5abf5fcbe71c265185886e3295fe6a9b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/ssh.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/transport.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/transport.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2529dc73a183a1d33b80b7b0a598b37ed1ca7e4c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/transport.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ad83c0f54a3880c3ceaad96849d005f4c04a4a4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/version.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/version.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8197163aae25d7ff092516cde1d5b65513115bbb Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/version.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/__pycache__/webhdfs.cpython-311.pyc b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/webhdfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfdf269688fddfe037f1e7f5f785b358d2f8a77e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/smart_open/__pycache__/webhdfs.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/smart_open/azure.py b/.venv/lib/python3.11/site-packages/smart_open/azure.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8c95a09882fb7c55df5a1bf1cfda3f9fabe761 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/azure.py @@ -0,0 +1,552 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# Copyright (C) 2020 Nicolas Mitchell +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements file-like objects for reading and writing to/from Azure Blob Storage.""" + +import base64 +import io +import logging + +import smart_open.bytebuffer +import smart_open.constants + +try: + import azure.storage.blob + import azure.core.exceptions +except ImportError: + MISSING_DEPS = True + +logger = logging.getLogger(__name__) + +_BINARY_TYPES = (bytes, bytearray, memoryview) +"""Allowed binary buffer types for writing to the underlying Azure Blob Storage stream""" + +SCHEME = "azure" +"""Supported scheme for Azure Blob Storage in smart_open endpoint URL""" + +_DEFAULT_MIN_PART_SIZE = 64 * 1024**2 +"""Default minimum part size for Azure Cloud Storage multipart uploads is 64MB""" + +DEFAULT_BUFFER_SIZE = 4 * 1024**2 +"""Default buffer size for working with Azure Blob Storage is 256MB +https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs +""" + +DEFAULT_MAX_CONCURRENCY = 1 +"""Default number of parallel connections with which to download.""" + + +def parse_uri(uri_as_string): + sr = smart_open.utils.safe_urlsplit(uri_as_string) + assert sr.scheme == SCHEME + first = sr.netloc + second = sr.path.lstrip('/') + + # https://docs.microsoft.com/en-us/rest/api/storageservices/working-with-the-root-container + if not second: + container_id = '$root' + blob_id = first + else: + container_id = first + blob_id = second + + return dict(scheme=SCHEME, container_id=container_id, blob_id=blob_id) + + +def open_uri(uri, mode, transport_params): + parsed_uri = parse_uri(uri) + kwargs = smart_open.utils.check_kwargs(open, transport_params) + return open(parsed_uri['container_id'], parsed_uri['blob_id'], mode, **kwargs) + + +def open( + container_id, + blob_id, + mode, + client=None, # type: Union[azure.storage.blob.BlobServiceClient, azure.storage.blob.ContainerClient, azure.storage.blob.BlobClient] # noqa + blob_kwargs=None, + buffer_size=DEFAULT_BUFFER_SIZE, + min_part_size=_DEFAULT_MIN_PART_SIZE, + max_concurrency=DEFAULT_MAX_CONCURRENCY, + ): + """Open an Azure Blob Storage blob for reading or writing. + + Parameters + ---------- + container_id: str + The name of the container this object resides in. + blob_id: str + The name of the blob within the bucket. + mode: str + The mode for opening the object. Must be either "rb" or "wb". + client: azure.storage.blob.BlobServiceClient, ContainerClient, or BlobClient + The Azure Blob Storage client to use when working with azure-storage-blob. + blob_kwargs: dict, optional + Additional parameters to pass to `BlobClient.commit_block_list`. + For writing only. + buffer_size: int, optional + The buffer size to use when performing I/O. For reading only. + min_part_size: int, optional + The minimum part size for multipart uploads. For writing only. + max_concurrency: int, optional + The number of parallel connections with which to download. For reading only. + + """ + if not client: + raise ValueError('you must specify the client to connect to Azure') + + if mode == smart_open.constants.READ_BINARY: + return Reader( + container_id, + blob_id, + client, + buffer_size=buffer_size, + line_terminator=smart_open.constants.BINARY_NEWLINE, + max_concurrency=max_concurrency, + ) + elif mode == smart_open.constants.WRITE_BINARY: + return Writer( + container_id, + blob_id, + client, + blob_kwargs=blob_kwargs, + min_part_size=min_part_size + ) + else: + raise NotImplementedError('Azure Blob Storage support for mode %r not implemented' % mode) + + +def _get_blob_client(client, container, blob): + # type: (Union[azure.storage.blob.BlobServiceClient, azure.storage.blob.ContainerClient, azure.storage.blob.BlobClient], str, str) -> azure.storage.blob.BlobClient # noqa + """ + Return an Azure BlobClient starting with any of BlobServiceClient, + ContainerClient, or BlobClient plus container name and blob name. + """ + if hasattr(client, "get_container_client"): + client = client.get_container_client(container) + + if hasattr(client, "container_name") and client.container_name != container: + raise ValueError( + "Client for %r doesn't match " + "container %r" % (client.container_name, container) + ) + + if hasattr(client, "get_blob_client"): + client = client.get_blob_client(blob) + + return client + + +class _RawReader(object): + """Read an Azure Blob Storage file.""" + + def __init__(self, blob, size, concurrency): + # type: (azure.storage.blob.BlobClient, int, int) -> None + self._blob = blob + self._size = size + self._position = 0 + self._concurrency = concurrency + + def seek(self, position): + """Seek to the specified position (byte offset) in the Azure Blob Storage blob. + + :param int position: The byte offset from the beginning of the blob. + + Returns the position after seeking. + """ + self._position = position + return self._position + + def read(self, size=-1): + if self._position >= self._size: + return b'' + binary = self._download_blob_chunk(size) + self._position += len(binary) + return binary + + def _download_blob_chunk(self, size): + if self._size == self._position: + # + # When reading, we can't seek to the first byte of an empty file. + # Similarly, we can't seek past the last byte. Do nothing here. + # + return b'' + elif size == -1: + stream = self._blob.download_blob(offset=self._position, max_concurrency=self._concurrency) + else: + stream = self._blob.download_blob( + offset=self._position, max_concurrency=self._concurrency, length=size) + logging.debug('reading with a max concurrency of %d', self._concurrency) + if isinstance(stream, azure.storage.blob.StorageStreamDownloader): + binary = stream.readall() + else: + binary = stream.read() + return binary + + +class Reader(io.BufferedIOBase): + """Reads bytes from Azure Blob Storage. + + Implements the io.BufferedIOBase interface of the standard library. + + :raises azure.core.exceptions.ResourceNotFoundError: Raised when the blob to read from does not exist. + """ + _blob = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__( + self, + container, + blob, + client, # type: Union[azure.storage.blob.BlobServiceClient, azure.storage.blob.ContainerClient, azure.storage.blob.BlobClient] # noqa + buffer_size=DEFAULT_BUFFER_SIZE, + line_terminator=smart_open.constants.BINARY_NEWLINE, + max_concurrency=DEFAULT_MAX_CONCURRENCY, + ): + self._container_name = container + self._blob_name = blob + + # type: azure.storage.blob.BlobClient + self._blob = _get_blob_client(client, container, blob) + + if self._blob is None: + raise azure.core.exceptions.ResourceNotFoundError( + 'blob %s not found in %s' % (blob, container) + ) + try: + self._size = self._blob.get_blob_properties()['size'] + except KeyError: + self._size = 0 + + self._raw_reader = _RawReader(self._blob, self._size, max_concurrency) + self._position = 0 + self._current_part = smart_open.bytebuffer.ByteBuffer(buffer_size) + self._line_terminator = line_terminator + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + # + # Override some methods from io.IOBase. + # + def close(self): + """Flush and close this stream.""" + logger.debug("close: called") + if not self.closed: + self._blob = None + self._raw_reader = None + + @property + def closed(self): + return self._blob is None + + def readable(self): + """Return True if the stream can be read from.""" + return True + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only seek support, and no truncate support.""" + return True + + # + # io.BufferedIOBase methods. + # + def detach(self): + """Unsupported.""" + raise io.UnsupportedOperation + + def seek(self, offset, whence=smart_open.constants.WHENCE_START): + """Seek to the specified position. + + :param int offset: The offset in bytes. + :param int whence: Where the offset is from. + + Returns the position after seeking.""" + logger.debug('seeking to offset: %r whence: %r', offset, whence) + if whence not in smart_open.constants.WHENCE_CHOICES: + raise ValueError('invalid whence %i, expected one of %r' % (whence, + smart_open.constants.WHENCE_CHOICES)) + + if whence == smart_open.constants.WHENCE_START: + new_position = offset + elif whence == smart_open.constants.WHENCE_CURRENT: + new_position = self._position + offset + else: + new_position = self._size + offset + self._position = new_position + self._raw_reader.seek(new_position) + logger.debug('current_pos: %r', self._position) + + self._current_part.empty() + return self._position + + def tell(self): + """Return the current position within the file.""" + return self._position + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def read(self, size=-1): + """Read up to size bytes from the object and return them.""" + if size == 0: + return b'' + elif size < 0: + self._position = self._size + return self._read_from_buffer() + self._raw_reader.read() + + # + # Return unused data first + # + if len(self._current_part) >= size: + return self._read_from_buffer(size) + + if self._position == self._size: + return self._read_from_buffer() + + self._fill_buffer(size) + return self._read_from_buffer(size) + + def read1(self, size=-1): + """This is the same as read().""" + return self.read(size=size) + + def readinto(self, b): + """Read up to len(b) bytes into b, and return the number of bytes read.""" + data = self.read(len(b)) + if not data: + return 0 + b[:len(data)] = data + return len(data) + + def readline(self, limit=-1): + """Read up to and including the next newline. Returns the bytes read.""" + if limit != -1: + raise NotImplementedError('limits other than -1 not implemented yet') + + # + # A single line may span multiple buffers. + # + line = io.BytesIO() + while not (self._position == self._size and len(self._current_part) == 0): + line_part = self._current_part.readline(self._line_terminator) + line.write(line_part) + self._position += len(line_part) + + if line_part.endswith(self._line_terminator): + break + else: + self._fill_buffer() + + return line.getvalue() + + # + # Internal methods. + # + def _read_from_buffer(self, size=-1): + """Remove at most size bytes from our buffer and return them.""" + # logger.debug('reading %r bytes from %r byte-long buffer', size, len(self._current_part)) + size = size if size >= 0 else len(self._current_part) + part = self._current_part.read(size) + self._position += len(part) + # logger.debug('part: %r', part) + return part + + def _fill_buffer(self, size=-1): + size = max(size, self._current_part._chunk_size) + while len(self._current_part) < size and not self._position == self._size: + bytes_read = self._current_part.fill(self._raw_reader) + if bytes_read == 0: + logger.debug('reached EOF while filling buffer') + return True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def __str__(self): + return "(%s, %r, %r)" % ( + self.__class__.__name__, + self._container_name, + self._blob_name + ) + + def __repr__(self): + return "%s(container=%r, blob=%r)" % ( + self.__class__.__name__, + self._container_name, + self._blob_name, + ) + + +class Writer(io.BufferedIOBase): + """Writes bytes to Azure Blob Storage. + + Implements the io.BufferedIOBase interface of the standard library. + """ + _blob = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__( + self, + container, + blob, + client, # type: Union[azure.storage.blob.BlobServiceClient, azure.storage.blob.ContainerClient, azure.storage.blob.BlobClient] # noqa + blob_kwargs=None, + min_part_size=_DEFAULT_MIN_PART_SIZE, + ): + self._container_name = container + self._blob_name = blob + self._blob_kwargs = blob_kwargs or {} + self._min_part_size = min_part_size + self._total_size = 0 + self._total_parts = 0 + self._bytes_uploaded = 0 + self._current_part = io.BytesIO() + self._block_list = [] + + # type: azure.storage.blob.BlobClient + self._blob = _get_blob_client(client, container, blob) + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + def flush(self): + pass + + def terminate(self): + """Do not commit block list on abort. + + Uploaded (uncommitted) blocks will be garbage collected after 7 days. + + See also https://stackoverflow.com/a/69673084/5511061.""" + logger.debug('%s: terminating multipart upload', self) + if not self.closed: + self._block_list = [] + self._blob = None + logger.debug('%s: terminated multipart upload', self) + + # + # Override some methods from io.IOBase. + # + def close(self): + logger.debug("close: called") + if not self.closed: + logger.debug('%s: completing multipart upload', self) + if self._current_part.tell() > 0: + self._upload_part() + self._blob.commit_block_list(self._block_list, **self._blob_kwargs) + self._block_list = [] + self._blob = None + logger.debug('%s: completed multipart upload', self) + + @property + def closed(self): + return self._blob is None + + def writable(self): + """Return True if the stream supports writing.""" + return True + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only tell support, and no seek or truncate support.""" + return True + + def seek(self, offset, whence=smart_open.constants.WHENCE_START): + """Unsupported.""" + raise io.UnsupportedOperation + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def tell(self): + """Return the current stream position.""" + return self._total_size + + # + # io.BufferedIOBase methods. + # + def detach(self): + raise io.UnsupportedOperation("detach() not supported") + + def write(self, b): + """Write the given bytes (binary string) to the Azure Blob Storage file. + + There's buffering happening under the covers, so this may not actually + do any HTTP transfer right away.""" + + if not isinstance(b, _BINARY_TYPES): + raise TypeError("input must be one of %r, got: %r" % (_BINARY_TYPES, type(b))) + + self._current_part.write(b) + self._total_size += len(b) + + if self._current_part.tell() >= self._min_part_size: + self._upload_part() + + return len(b) + + def _upload_part(self): + part_num = self._total_parts + 1 + content_length = self._current_part.tell() + range_stop = self._bytes_uploaded + content_length - 1 + + """ # noqa: E501 + block_id's must be base64 encoded, all the same length, and less than or equal to 64 bytes in size prior + to encoding. + https://docs.microsoft.com/en-us/python/api/azure-storage-blob/azure.storage.blob.blobclient?view=azure-python#stage-block-block-id--data--length-none----kwargs- + """ + zero_padded_part_num = str(part_num).zfill(64 // 2) + block_id = base64.b64encode(zero_padded_part_num.encode()) + self._current_part.seek(0) + self._blob.stage_block(block_id, self._current_part.read(content_length)) + self._block_list.append(azure.storage.blob.BlobBlock(block_id=block_id)) + + logger.info( + "uploading part #%i, %i bytes (total %.3fGB)", + part_num, content_length, range_stop / 1024.0 ** 3, + ) + + self._total_parts += 1 + self._bytes_uploaded += content_length + self._current_part = io.BytesIO(self._current_part.read()) + self._current_part.seek(0, io.SEEK_END) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + self.terminate() + else: + self.close() + + def __str__(self): + return "(%s, %r, %r)" % ( + self.__class__.__name__, + self._container_name, + self._blob_name + ) + + def __repr__(self): + return "%s(container=%r, blob=%r, min_part_size=%r)" % ( + self.__class__.__name__, + self._container_name, + self._blob_name, + self._min_part_size + ) diff --git a/.venv/lib/python3.11/site-packages/smart_open/bytebuffer.py b/.venv/lib/python3.11/site-packages/smart_open/bytebuffer.py new file mode 100644 index 0000000000000000000000000000000000000000..6aaa25155ac3c9a122fe9a7c215583060bb62097 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/bytebuffer.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements ByteBuffer class for amortizing network transfer overhead.""" + +import io + + +class ByteBuffer(object): + """Implements a byte buffer that allows callers to read data with minimal + copying, and has a fast __len__ method. The buffer is parametrized by its + chunk_size, which is the number of bytes that it will read in from the + supplied reader or iterable when the buffer is being filled. As primary use + case for this buffer is to amortize the overhead costs of transferring data + over the network (rather than capping memory consumption), it leads to more + predictable performance to always read the same amount of bytes each time + the buffer is filled, hence the chunk_size parameter instead of some fixed + capacity. + + The bytes are stored in a bytestring, and previously-read bytes are freed + when the buffer is next filled (by slicing the bytestring into a smaller + copy). + + Example + ------- + + Note that while this example works in both Python 2 and 3, the doctest only + passes in Python 3 due to the bytestring literals in the expected values. + + >>> buf = ByteBuffer(chunk_size = 8) + >>> message_bytes = iter([b'Hello, W', b'orld!']) + >>> buf.fill(message_bytes) + 8 + >>> len(buf) # only chunk_size bytes are filled + 8 + >>> buf.peek() + b'Hello, W' + >>> len(buf) # peek() does not change read position + 8 + >>> buf.read(6) + b'Hello,' + >>> len(buf) # read() does change read position + 2 + >>> buf.fill(message_bytes) + 5 + >>> buf.read() + b' World!' + >>> len(buf) + 0 + """ + + def __init__(self, chunk_size=io.DEFAULT_BUFFER_SIZE): + """Create a ByteBuffer instance that reads chunk_size bytes when filled. + Note that the buffer has no maximum size. + + Parameters + ----------- + chunk_size: int, optional + The the number of bytes that will be read from the supplied reader + or iterable when filling the buffer. + """ + self._chunk_size = chunk_size + self.empty() + + def __len__(self): + """Return the number of unread bytes in the buffer as an int""" + return len(self._bytes) - self._pos + + def read(self, size=-1): + """Read bytes from the buffer and advance the read position. Returns + the bytes in a bytestring. + + Parameters + ---------- + size: int, optional + Maximum number of bytes to read. If negative or not supplied, read + all unread bytes in the buffer. + + Returns + ------- + bytes + """ + part = self.peek(size) + self._pos += len(part) + return part + + def peek(self, size=-1): + """Get bytes from the buffer without advancing the read position. + Returns the bytes in a bytestring. + + Parameters + ---------- + size: int, optional + Maximum number of bytes to return. If negative or not supplied, + return all unread bytes in the buffer. + + Returns + ------- + bytes + """ + if size < 0 or size > len(self): + size = len(self) + + part = bytes(self._bytes[self._pos:self._pos+size]) + return part + + def empty(self): + """Remove all bytes from the buffer""" + self._bytes = bytearray() + self._pos = 0 + + def fill(self, source, size=-1): + """Fill the buffer with bytes from source until one of these + conditions is met: + * size bytes have been read from source (if size >= 0); + * chunk_size bytes have been read from source; + * no more bytes can be read from source; + Returns the number of new bytes added to the buffer. + Note: all previously-read bytes in the buffer are removed. + + Parameters + ---------- + source: a file-like object, or iterable/list that contains bytes + The source of bytes to fill the buffer with. If this argument has + the `read` attribute, it's assumed to be a file-like object and + `read` is called to get the bytes; otherwise it's assumed to be an + iterable or list that contains bytes, and a for loop is used to get + the bytes. + size: int, optional + The number of bytes to try to read from source. If not supplied, + negative, or larger than the buffer's chunk_size, then chunk_size + bytes are read. Note that if source is an iterable or list, then + it's possible that more than size bytes will be read if iterating + over source produces more than one byte at a time. + + Returns + ------- + int, the number of new bytes added to the buffer. + """ + size = size if size >= 0 else self._chunk_size + size = min(size, self._chunk_size) + + if self._pos != 0: + self._bytes = self._bytes[self._pos:] + self._pos = 0 + + if hasattr(source, 'read'): + new_bytes = source.read(size) + else: + new_bytes = bytearray() + for more_bytes in source: + new_bytes += more_bytes + if len(new_bytes) >= size: + break + + self._bytes += new_bytes + return len(new_bytes) + + def readline(self, terminator): + """Read a line from this buffer efficiently. + + A line is a contiguous sequence of bytes that ends with either: + + 1. The ``terminator`` character + 2. The end of the buffer itself + + :param byte terminator: The line terminator character. + :rtype: bytes + + """ + index = self._bytes.find(terminator, self._pos) + if index == -1: + size = len(self) + else: + size = index - self._pos + 1 + return self.read(size) diff --git a/.venv/lib/python3.11/site-packages/smart_open/compression.py b/.venv/lib/python3.11/site-packages/smart_open/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..e7c6d93c9fa998e10cea3ff517065b5831481375 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/compression.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements the compression layer of the ``smart_open`` library.""" +import io +import logging +import os.path + +logger = logging.getLogger(__name__) + +_COMPRESSOR_REGISTRY = {} + +NO_COMPRESSION = 'disable' +"""Use no compression. Read/write the data as-is.""" +INFER_FROM_EXTENSION = 'infer_from_extension' +"""Determine the compression to use from the file extension. + +See get_supported_extensions(). +""" + + +def get_supported_compression_types(): + """Return the list of supported compression types available to open. + + See compression paratemeter to smart_open.open(). + """ + return [NO_COMPRESSION, INFER_FROM_EXTENSION] + get_supported_extensions() + + +def get_supported_extensions(): + """Return the list of file extensions for which we have registered compressors.""" + return sorted(_COMPRESSOR_REGISTRY.keys()) + + +def register_compressor(ext, callback): + """Register a callback for transparently decompressing files with a specific extension. + + Parameters + ---------- + ext: str + The extension. Must include the leading period, e.g. ``.gz``. + callback: callable + The callback. It must accept two position arguments, file_obj and mode. + This function will be called when ``smart_open`` is opening a file with + the specified extension. + + Examples + -------- + + Instruct smart_open to use the `lzma` module whenever opening a file + with a .xz extension (see README.rst for the complete example showing I/O): + + >>> def _handle_xz(file_obj, mode): + ... import lzma + ... return lzma.LZMAFile(filename=file_obj, mode=mode, format=lzma.FORMAT_XZ) + >>> + >>> register_compressor('.xz', _handle_xz) + + """ + if not (ext and ext[0] == '.'): + raise ValueError('ext must be a string starting with ., not %r' % ext) + ext = ext.lower() + if ext in _COMPRESSOR_REGISTRY: + logger.warning('overriding existing compression handler for %r', ext) + _COMPRESSOR_REGISTRY[ext] = callback + + +def tweak_close(outer, inner): + """Ensure that closing the `outer` stream closes the `inner` stream as well. + + Deprecated: smart_open.open().__exit__ now always calls __exit__ on the + underlying filestream. + + Use this when your compression library's `close` method does not + automatically close the underlying filestream. See + https://github.com/RaRe-Technologies/smart_open/issues/630 for an + explanation why that is a problem for smart_open. + """ + outer_close = outer.close + + def close_both(*args): + nonlocal inner + try: + outer_close() + finally: + if inner: + inner, fp = None, inner + fp.close() + + outer.close = close_both + + +def _handle_bz2(file_obj, mode): + from bz2 import BZ2File + result = BZ2File(file_obj, mode) + return result + + +def _handle_gzip(file_obj, mode): + import gzip + result = gzip.GzipFile(fileobj=file_obj, mode=mode) + return result + + +def _handle_zstd(file_obj, mode): + import zstandard # type: ignore + result = zstandard.open(filename=file_obj, mode=mode) + # zstandard.open returns an io.TextIOWrapper in text mode, but otherwise + # returns a raw stream reader/writer, and we need the `io` wrapper + # to make FileLikeProxy work correctly. + # + # See: + # + # https://github.com/indygreg/python-zstandard/blob/d7d81e79dbe74feb22fb73405ebfb3e20f4c4653/zstandard/__init__.py#L169-L174 + if "b" in mode and "w" in mode: + result = io.BufferedWriter(result) + elif "b" in mode and "r" in mode: + result = io.BufferedReader(result) + return result + + +def compression_wrapper(file_obj, mode, compression=INFER_FROM_EXTENSION, filename=None): + """ + Wrap `file_obj` with an appropriate [de]compression mechanism based on its file extension. + + If the filename extension isn't recognized, simply return the original `file_obj` unchanged. + + `file_obj` must either be a filehandle object, or a class which behaves like one. + + If `filename` is specified, it will be used to extract the extension. + If not, the `file_obj.name` attribute is used as the filename. + + """ + if compression == NO_COMPRESSION: + return file_obj + elif compression == INFER_FROM_EXTENSION: + try: + filename = (filename or file_obj.name).lower() + except (AttributeError, TypeError): + logger.warning( + 'unable to transparently decompress %r because it ' + 'seems to lack a string-like .name', file_obj + ) + return file_obj + _, compression = os.path.splitext(filename) + + if compression in _COMPRESSOR_REGISTRY and mode.endswith('+'): + raise ValueError('transparent (de)compression unsupported for mode %r' % mode) + + try: + callback = _COMPRESSOR_REGISTRY[compression] + except KeyError: + return file_obj + else: + return callback(file_obj, mode) + + +# +# NB. avoid using lambda here to make stack traces more readable. +# +register_compressor('.bz2', _handle_bz2) +register_compressor('.gz', _handle_gzip) +register_compressor('.zst', _handle_zstd) diff --git a/.venv/lib/python3.11/site-packages/smart_open/concurrency.py b/.venv/lib/python3.11/site-packages/smart_open/concurrency.py new file mode 100644 index 0000000000000000000000000000000000000000..4e72aec7581499d9558c473219d3d26d1dbf6d78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/concurrency.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Common functionality for concurrent processing. + +The main entry point is :func:`create_pool`. +""" + +import contextlib +import logging +import warnings + +logger = logging.getLogger(__name__) + +# AWS Lambda environments do not support multiprocessing.Queue or multiprocessing.Pool. +# However they do support Threads and therefore concurrent.futures's ThreadPoolExecutor. +# We use this flag to allow python 2 backward compatibility, where concurrent.futures doesn't exist. +_CONCURRENT_FUTURES = False +try: + import concurrent.futures + _CONCURRENT_FUTURES = True +except ImportError: + warnings.warn("concurrent.futures could not be imported and won't be used") + +# Multiprocessing is unavailable in App Engine (and possibly other sandboxes). +# The only method currently relying on it is iter_bucket, which is instructed +# whether to use it by the MULTIPROCESSING flag. +_MULTIPROCESSING = False +try: + import multiprocessing.pool + _MULTIPROCESSING = True +except ImportError: + warnings.warn("multiprocessing could not be imported and won't be used") + + +class DummyPool(object): + """A class that mimics multiprocessing.pool.Pool for our purposes.""" + def imap_unordered(self, function, items): + return map(function, items) + + def terminate(self): + pass + + +class ConcurrentFuturesPool(object): + """A class that mimics multiprocessing.pool.Pool but uses concurrent futures instead of processes.""" + def __init__(self, max_workers): + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers) + + def imap_unordered(self, function, items): + futures = [self.executor.submit(function, item) for item in items] + for future in concurrent.futures.as_completed(futures): + yield future.result() + + def terminate(self): + self.executor.shutdown(wait=True) + + +@contextlib.contextmanager +def create_pool(processes=1): + if _MULTIPROCESSING and processes: + logger.info("creating multiprocessing pool with %i workers", processes) + pool = multiprocessing.pool.Pool(processes=processes) + elif _CONCURRENT_FUTURES and processes: + logger.info("creating concurrent futures pool with %i workers", processes) + pool = ConcurrentFuturesPool(max_workers=processes) + else: + logger.info("creating dummy pool") + pool = DummyPool() + yield pool + pool.terminate() diff --git a/.venv/lib/python3.11/site-packages/smart_open/constants.py b/.venv/lib/python3.11/site-packages/smart_open/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..1ffa14e3f99e44c784a435732ea222d4689e816c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/constants.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Some universal constants that are common to I/O operations.""" + + +READ_BINARY = 'rb' + +WRITE_BINARY = 'wb' + +BINARY_MODES = (READ_BINARY, WRITE_BINARY) + +BINARY_NEWLINE = b'\n' + +WHENCE_START = 0 + +WHENCE_CURRENT = 1 + +WHENCE_END = 2 + +WHENCE_CHOICES = (WHENCE_START, WHENCE_CURRENT, WHENCE_END) diff --git a/.venv/lib/python3.11/site-packages/smart_open/doctools.py b/.venv/lib/python3.11/site-packages/smart_open/doctools.py new file mode 100644 index 0000000000000000000000000000000000000000..daa2bc01c4b7aa92d3e34470941f5015f43a53eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/doctools.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Common functions for working with docstrings. + +For internal use only. +""" + +import contextlib +import inspect +import io +import os.path +import re + +from . import compression +from . import transport + +PLACEHOLDER = ' smart_open/doctools.py magic goes here' + + +def extract_kwargs(docstring): + """Extract keyword argument documentation from a function's docstring. + + Parameters + ---------- + docstring: str + The docstring to extract keyword arguments from. + + Returns + ------- + list of (str, str, list str) + + str + The name of the keyword argument. + str + Its type. + str + Its documentation as a list of lines. + + Notes + ----- + The implementation is rather fragile. It expects the following: + + 1. The parameters are under an underlined Parameters section + 2. Keyword parameters have the literal ", optional" after the type + 3. Names and types are not indented + 4. Descriptions are indented with 4 spaces + 5. The Parameters section ends with an empty line. + + Examples + -------- + + >>> docstring = '''The foo function. + ... Parameters + ... ---------- + ... bar: str, optional + ... This parameter is the bar. + ... baz: int, optional + ... This parameter is the baz. + ... + ... ''' + >>> kwargs = extract_kwargs(docstring) + >>> kwargs[0] + ('bar', 'str, optional', ['This parameter is the bar.']) + + """ + if not docstring: + return [] + + lines = inspect.cleandoc(docstring).split('\n') + retval = [] + + # + # 1. Find the underlined 'Parameters' section + # 2. Once there, continue parsing parameters until we hit an empty line + # + while lines and lines[0] != 'Parameters': + lines.pop(0) + + if not lines: + return [] + + lines.pop(0) + lines.pop(0) + + while lines and lines[0]: + name, type_ = lines.pop(0).split(':', 1) + description = [] + while lines and lines[0].startswith(' '): + description.append(lines.pop(0).strip()) + if 'optional' in type_: + retval.append((name.strip(), type_.strip(), description)) + + return retval + + +def to_docstring(kwargs, lpad=''): + """Reconstruct a docstring from keyword argument info. + + Basically reverses :func:`extract_kwargs`. + + Parameters + ---------- + kwargs: list + Output from the extract_kwargs function + lpad: str, optional + Padding string (from the left). + + Returns + ------- + str + The docstring snippet documenting the keyword arguments. + + Examples + -------- + + >>> kwargs = [ + ... ('bar', 'str, optional', ['This parameter is the bar.']), + ... ('baz', 'int, optional', ['This parameter is the baz.']), + ... ] + >>> print(to_docstring(kwargs), end='') + bar: str, optional + This parameter is the bar. + baz: int, optional + This parameter is the baz. + + """ + buf = io.StringIO() + for name, type_, description in kwargs: + buf.write('%s%s: %s\n' % (lpad, name, type_)) + for line in description: + buf.write('%s %s\n' % (lpad, line)) + return buf.getvalue() + + +def extract_examples_from_readme_rst(indent=' '): + """Extract examples from this project's README.rst file. + + Parameters + ---------- + indent: str + Prepend each line with this string. Should contain some number of spaces. + + Returns + ------- + str + The examples. + + Notes + ----- + Quite fragile, depends on named labels inside the README.rst file. + """ + curr_dir = os.path.dirname(os.path.abspath(__file__)) + readme_path = os.path.join(curr_dir, '..', 'README.rst') + try: + with open(readme_path) as fin: + lines = list(fin) + start = lines.index('.. _doctools_before_examples:\n') + end = lines.index(".. _doctools_after_examples:\n") + lines = lines[start+4:end-2] + return ''.join([indent + re.sub('^ ', '', line) for line in lines]) + except Exception: + return indent + 'See README.rst' + + +def tweak_open_docstring(f): + buf = io.StringIO() + seen = set() + + root_path = os.path.dirname(os.path.dirname(__file__)) + + with contextlib.redirect_stdout(buf): + print(' smart_open supports the following transport mechanisms:') + print() + for scheme, submodule in sorted(transport._REGISTRY.items()): + if scheme == transport.NO_SCHEME or submodule in seen: + continue + seen.add(submodule) + + relpath = os.path.relpath(submodule.__file__, start=root_path) + heading = '%s (%s)' % (scheme, relpath) + print(' %s' % heading) + print(' %s' % ('~' * len(heading))) + print(' %s' % submodule.__doc__.split('\n')[0]) + print() + + kwargs = extract_kwargs(submodule.open.__doc__) + if kwargs: + print(to_docstring(kwargs, lpad=u' ')) + + print(' Examples') + print(' --------') + print() + print(extract_examples_from_readme_rst()) + + print(' This function also supports transparent compression and decompression ') + print(' using the following codecs:') + print() + for extension in compression.get_supported_extensions(): + print(' * %s' % extension) + print() + print(' The function depends on the file extension to determine the appropriate codec.') + + # + # The docstring can be None if -OO was passed to the interpreter. + # + if f.__doc__: + f.__doc__ = f.__doc__.replace(PLACEHOLDER, buf.getvalue()) + + +def tweak_parse_uri_docstring(f): + buf = io.StringIO() + seen = set() + schemes = [] + examples = [] + + for scheme, submodule in sorted(transport._REGISTRY.items()): + if scheme == transport.NO_SCHEME or submodule in seen: + continue + schemes.append(scheme) + seen.add(submodule) + + try: + examples.extend(submodule.URI_EXAMPLES) + except AttributeError: + pass + + with contextlib.redirect_stdout(buf): + print(' Supported URI schemes are:') + print() + for scheme in schemes: + print(' * %s' % scheme) + print() + print(' Valid URI examples::') + print() + for example in examples: + print(' * %s' % example) + + if f.__doc__: + f.__doc__ = f.__doc__.replace(PLACEHOLDER, buf.getvalue()) diff --git a/.venv/lib/python3.11/site-packages/smart_open/ftp.py b/.venv/lib/python3.11/site-packages/smart_open/ftp.py new file mode 100644 index 0000000000000000000000000000000000000000..a7212ecd99ebf2825303d132e1e5dafc2015c5fe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/ftp.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Implements I/O streams over FTP. +""" + +import logging +import ssl +import urllib.parse +import smart_open.utils +from ftplib import FTP, FTP_TLS, error_reply +import types + +logger = logging.getLogger(__name__) + +SCHEMES = ("ftp", "ftps") + +"""Supported URL schemes.""" + +DEFAULT_PORT = 21 + +URI_EXAMPLES = ( + "ftp://username@host/path/file", + "ftp://username:password@host/path/file", + "ftp://username:password@host:port/path/file", + "ftps://username@host/path/file", + "ftps://username:password@host/path/file", + "ftps://username:password@host:port/path/file", +) + + +def _unquote(text): + return text and urllib.parse.unquote(text) + + +def parse_uri(uri_as_string): + split_uri = urllib.parse.urlsplit(uri_as_string) + assert split_uri.scheme in SCHEMES + return dict( + scheme=split_uri.scheme, + uri_path=_unquote(split_uri.path), + user=_unquote(split_uri.username), + host=split_uri.hostname, + port=int(split_uri.port or DEFAULT_PORT), + password=_unquote(split_uri.password), + ) + + +def open_uri(uri, mode, transport_params): + smart_open.utils.check_kwargs(open, transport_params) + parsed_uri = parse_uri(uri) + uri_path = parsed_uri.pop("uri_path") + scheme = parsed_uri.pop("scheme") + secure_conn = True if scheme == "ftps" else False + return open( + uri_path, + mode, + secure_connection=secure_conn, + transport_params=transport_params, + **parsed_uri, + ) + + +def convert_transport_params_to_args(transport_params): + supported_keywords = [ + "timeout", + "source_address", + "encoding", + ] + unsupported_keywords = [k for k in transport_params if k not in supported_keywords] + kwargs = {k: v for (k, v) in transport_params.items() if k in supported_keywords} + + if unsupported_keywords: + logger.warning( + "ignoring unsupported ftp keyword arguments: %r", unsupported_keywords + ) + + return kwargs + + +def _connect(hostname, username, port, password, secure_connection, transport_params): + kwargs = convert_transport_params_to_args(transport_params) + if secure_connection: + ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) + ftp = FTP_TLS(context=ssl_context, **kwargs) + else: + ftp = FTP(**kwargs) + try: + ftp.connect(hostname, port) + except Exception as e: + logger.error("Unable to connect to FTP server: try checking the host and port!") + raise e + try: + ftp.login(username, password) + except error_reply as e: + logger.error( + "Unable to login to FTP server: try checking the username and password!" + ) + raise e + if secure_connection: + ftp.prot_p() + return ftp + + +def open( + path, + mode="rb", + host=None, + user=None, + password=None, + port=DEFAULT_PORT, + secure_connection=False, + transport_params=None, +): + """Open a file for reading or writing via FTP/FTPS. + + Parameters + ---------- + path: str + The path on the remote server + mode: str + Must be "rb" or "wb" + host: str + The host to connect to + user: str + The username to use for the connection + password: str + The password for the specified username + port: int + The port to connect to + secure_connection: bool + True for FTPS, False for FTP + transport_params: dict + Additional parameters for the FTP connection. + Currently supported parameters: timeout, source_address, encoding. + """ + if not host: + raise ValueError("you must specify the host to connect to") + if not user: + raise ValueError("you must specify the user") + if not transport_params: + transport_params = {} + conn = _connect(host, user, port, password, secure_connection, transport_params) + mode_to_ftp_cmds = { + "rb": ("RETR", "rb"), + "wb": ("STOR", "wb"), + "ab": ("APPE", "wb"), + } + try: + ftp_mode, file_obj_mode = mode_to_ftp_cmds[mode] + except KeyError: + raise ValueError(f"unsupported mode: {mode!r}") + ftp_mode, file_obj_mode = mode_to_ftp_cmds[mode] + conn.voidcmd("TYPE I") + socket = conn.transfercmd(f"{ftp_mode} {path}") + fobj = socket.makefile(file_obj_mode) + + def full_close(self): + self.orig_close() + self.socket.close() + self.conn.close() + + fobj.orig_close = fobj.close + fobj.socket = socket + fobj.conn = conn + fobj.close = types.MethodType(full_close, fobj) + return fobj diff --git a/.venv/lib/python3.11/site-packages/smart_open/gcs.py b/.venv/lib/python3.11/site-packages/smart_open/gcs.py new file mode 100644 index 0000000000000000000000000000000000000000..0ae3430721bc42d5443dcd92794f93c13b29c0e1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/gcs.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements file-like objects for reading and writing to/from GCS.""" + +import logging +import warnings + +try: + import google.cloud.exceptions + import google.cloud.storage + import google.auth.transport.requests +except ImportError: + MISSING_DEPS = True + +import smart_open.bytebuffer +import smart_open.utils + +from smart_open import constants + +logger = logging.getLogger(__name__) + +SCHEME = "gs" +"""Supported scheme for GCS""" + +_DEFAULT_MIN_PART_SIZE = 50 * 1024**2 +"""Default minimum part size for GCS multipart uploads""" + +_DEFAULT_WRITE_OPEN_KWARGS = {'ignore_flush': True} + + +def parse_uri(uri_as_string): + sr = smart_open.utils.safe_urlsplit(uri_as_string) + assert sr.scheme == SCHEME + bucket_id = sr.netloc + blob_id = sr.path.lstrip('/') + return dict(scheme=SCHEME, bucket_id=bucket_id, blob_id=blob_id) + + +def open_uri(uri, mode, transport_params): + parsed_uri = parse_uri(uri) + kwargs = smart_open.utils.check_kwargs(open, transport_params) + return open(parsed_uri['bucket_id'], parsed_uri['blob_id'], mode, **kwargs) + + +def warn_deprecated(parameter_name): + message = f"Parameter {parameter_name} is deprecated, this parameter no-longer has any effect" + warnings.warn(message, UserWarning) + + +def open( + bucket_id, + blob_id, + mode, + buffer_size=None, + min_part_size=_DEFAULT_MIN_PART_SIZE, + client=None, # type: google.cloud.storage.Client + get_blob_kwargs=None, + blob_properties=None, + blob_open_kwargs=None, +): + """Open an GCS blob for reading or writing. + + Parameters + ---------- + bucket_id: str + The name of the bucket this object resides in. + blob_id: str + The name of the blob within the bucket. + mode: str + The mode for opening the object. Must be either "rb" or "wb". + buffer_size: + deprecated + min_part_size: int, optional + The minimum part size for multipart uploads. For writing only. + client: google.cloud.storage.Client, optional + The GCS client to use when working with google-cloud-storage. + get_blob_kwargs: dict, optional + Additional keyword arguments to propagate to the bucket.get_blob + method of the google-cloud-storage library. For reading only. + blob_properties: dict, optional + Set properties on blob before writing. For writing only. + blob_open_kwargs: dict, optional + Additional keyword arguments to propagate to the blob.open method + of the google-cloud-storage library. + + """ + if blob_open_kwargs is None: + blob_open_kwargs = {} + + if buffer_size is not None: + warn_deprecated('buffer_size') + + if mode in (constants.READ_BINARY, 'r', 'rt'): + _blob = Reader(bucket=bucket_id, + key=blob_id, + client=client, + get_blob_kwargs=get_blob_kwargs, + blob_open_kwargs=blob_open_kwargs) + + elif mode in (constants.WRITE_BINARY, 'w', 'wt'): + _blob = Writer(bucket=bucket_id, + blob=blob_id, + min_part_size=min_part_size, + client=client, + blob_properties=blob_properties, + blob_open_kwargs=blob_open_kwargs) + + else: + raise NotImplementedError(f'GCS support for mode {mode} not implemented') + + return _blob + + +def Reader(bucket, + key, + buffer_size=None, + line_terminator=None, + client=None, + get_blob_kwargs=None, + blob_open_kwargs=None): + + if get_blob_kwargs is None: + get_blob_kwargs = {} + if blob_open_kwargs is None: + blob_open_kwargs = {} + if client is None: + client = google.cloud.storage.Client() + if buffer_size is not None: + warn_deprecated('buffer_size') + if line_terminator is not None: + warn_deprecated('line_terminator') + + bkt = client.bucket(bucket) + blob = bkt.get_blob(key, **get_blob_kwargs) + + if blob is None: + raise google.cloud.exceptions.NotFound(f'blob {key} not found in {bucket}') + + return blob.open('rb', **blob_open_kwargs) + + +def Writer(bucket, + blob, + min_part_size=None, + client=None, + blob_properties=None, + blob_open_kwargs=None): + + if blob_open_kwargs is None: + blob_open_kwargs = {} + if blob_properties is None: + blob_properties = {} + if client is None: + client = google.cloud.storage.Client() + + blob_open_kwargs = {**_DEFAULT_WRITE_OPEN_KWARGS, **blob_open_kwargs} + + g_blob = client.bucket(bucket).blob( + blob, + chunk_size=min_part_size, + ) + + for k, v in blob_properties.items(): + setattr(g_blob, k, v) + + _blob = g_blob.open('wb', **blob_open_kwargs) + + # backwards-compatiblity, was deprecated upstream https://cloud.google.com/storage/docs/resumable-uploads + _blob.terminate = lambda: None + + return _blob diff --git a/.venv/lib/python3.11/site-packages/smart_open/hdfs.py b/.venv/lib/python3.11/site-packages/smart_open/hdfs.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc97e94756da03aaba35bcfd80868ba7bcd16bc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/hdfs.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Implements reading and writing to/from HDFS. + +The main entry point is the :func:`~smart_open.hdfs.open` function. + +Uses the command-line hdfs utility under the covers. + +""" + +import io +import logging +import subprocess +import urllib.parse + +from smart_open import utils + +logger = logging.getLogger(__name__) + +SCHEMES = ('hdfs', 'viewfs') + +URI_EXAMPLES = ( + 'hdfs:///path/file', + 'hdfs://path/file', + 'viewfs:///path/file', + 'viewfs://path/file', +) + + +def parse_uri(uri_as_string): + split_uri = urllib.parse.urlsplit(uri_as_string) + assert split_uri.scheme in SCHEMES + + uri_path = split_uri.netloc + split_uri.path + uri_path = "/" + uri_path.lstrip("/") + if not uri_path: + raise RuntimeError("invalid HDFS URI: %r" % uri_as_string) + + return dict(scheme=split_uri.scheme, uri_path=uri_path) + + +def open_uri(uri, mode, transport_params): + utils.check_kwargs(open, transport_params) + + parsed_uri = parse_uri(uri) + fobj = open(parsed_uri['uri_path'], mode) + fobj.name = parsed_uri['uri_path'].split('/')[-1] + return fobj + + +def open(uri, mode): + if mode == 'rb': + return CliRawInputBase(uri) + elif mode == 'wb': + return CliRawOutputBase(uri) + else: + raise NotImplementedError('hdfs support for mode %r not implemented' % mode) + + +class CliRawInputBase(io.RawIOBase): + """Reads bytes from HDFS via the "hdfs dfs" command-line interface. + + Implements the io.RawIOBase interface of the standard library. + """ + _sub = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__(self, uri): + self._uri = uri + self._sub = subprocess.Popen(["hdfs", "dfs", '-cat', self._uri], stdout=subprocess.PIPE) + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + # + # Override some methods from io.IOBase. + # + def close(self): + """Flush and close this stream.""" + logger.debug("close: called") + if not self.closed: + self._sub.terminate() + self._sub = None + + @property + def closed(self): + return self._sub is None + + def readable(self): + """Return True if the stream can be read from.""" + return self._sub is not None + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError.""" + return False + + # + # io.RawIOBase methods. + # + def detach(self): + """Unsupported.""" + raise io.UnsupportedOperation + + def read(self, size=-1): + """Read up to size bytes from the object and return them.""" + return self._sub.stdout.read(size) + + def read1(self, size=-1): + """This is the same as read().""" + return self.read(size=size) + + def readinto(self, b): + """Read up to len(b) bytes into b, and return the number of bytes + read.""" + data = self.read(len(b)) + if not data: + return 0 + b[:len(data)] = data + return len(data) + + +class CliRawOutputBase(io.RawIOBase): + """Writes bytes to HDFS via the "hdfs dfs" command-line interface. + + Implements the io.RawIOBase interface of the standard library. + """ + _sub = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__(self, uri): + self._uri = uri + self._sub = subprocess.Popen(["hdfs", "dfs", '-put', '-f', '-', self._uri], + stdin=subprocess.PIPE) + + # + # This member is part of the io.RawIOBase interface. + # + self.raw = None + + def close(self): + logger.debug("close: called") + if not self.closed: + self.flush() + self._sub.stdin.close() + self._sub.wait() + self._sub = None + + @property + def closed(self): + return self._sub is None + + def flush(self): + self._sub.stdin.flush() + + def writeable(self): + """Return True if this object is writeable.""" + return self._sub is not None + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError.""" + return False + + def write(self, b): + self._sub.stdin.write(b) + + # + # io.IOBase methods. + # + def detach(self): + raise io.UnsupportedOperation("detach() not supported") diff --git a/.venv/lib/python3.11/site-packages/smart_open/http.py b/.venv/lib/python3.11/site-packages/smart_open/http.py new file mode 100644 index 0000000000000000000000000000000000000000..9dfa1d5f383ed20eeeee98fbc1c6347e7e50ced5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/http.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements file-like objects for reading from http.""" + +import io +import logging +import os.path +import urllib.parse + +try: + import requests +except ImportError: + MISSING_DEPS = True + +from smart_open import bytebuffer, constants +import smart_open.utils + +DEFAULT_BUFFER_SIZE = 128 * 1024 +SCHEMES = ('http', 'https') + +logger = logging.getLogger(__name__) + + +_HEADERS = {'Accept-Encoding': 'identity'} +"""The headers we send to the server with every HTTP request. + +For now, we ask the server to send us the files as they are. +Sometimes, servers compress the file for more efficient transfer, in which case +the client (us) has to decompress them with the appropriate algorithm. +""" + + +def parse_uri(uri_as_string): + split_uri = urllib.parse.urlsplit(uri_as_string) + assert split_uri.scheme in SCHEMES + + uri_path = split_uri.netloc + split_uri.path + uri_path = "/" + uri_path.lstrip("/") + return dict(scheme=split_uri.scheme, uri_path=uri_path) + + +def open_uri(uri, mode, transport_params): + kwargs = smart_open.utils.check_kwargs(open, transport_params) + return open(uri, mode, **kwargs) + + +def open(uri, mode, kerberos=False, user=None, password=None, cert=None, + headers=None, timeout=None, session=None, buffer_size=DEFAULT_BUFFER_SIZE): + """Implement streamed reader from a web site. + + Supports Kerberos and Basic HTTP authentication. + + Parameters + ---------- + url: str + The URL to open. + mode: str + The mode to open using. + kerberos: boolean, optional + If True, will attempt to use the local Kerberos credentials + user: str, optional + The username for authenticating over HTTP + password: str, optional + The password for authenticating over HTTP + cert: str/tuple, optional + if String, path to ssl client cert file (.pem). If Tuple, (‘cert’, ‘key’) + headers: dict, optional + Any headers to send in the request. If ``None``, the default headers are sent: + ``{'Accept-Encoding': 'identity'}``. To use no headers at all, + set this variable to an empty dict, ``{}``. + session: object, optional + The requests Session object to use with http get requests. + Can be used for OAuth2 clients. + buffer_size: int, optional + The buffer size to use when performing I/O. + + Note + ---- + If neither kerberos or (user, password) are set, will connect + unauthenticated, unless set separately in headers. + + """ + if mode == constants.READ_BINARY: + fobj = SeekableBufferedInputBase( + uri, mode, buffer_size=buffer_size, kerberos=kerberos, + user=user, password=password, cert=cert, + headers=headers, session=session, timeout=timeout, + ) + fobj.name = os.path.basename(urllib.parse.urlparse(uri).path) + return fobj + else: + raise NotImplementedError('http support for mode %r not implemented' % mode) + + +class BufferedInputBase(io.BufferedIOBase): + response = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__(self, url, mode='r', buffer_size=DEFAULT_BUFFER_SIZE, + kerberos=False, user=None, password=None, cert=None, + headers=None, session=None, timeout=None): + + self.session = session or requests + + if kerberos: + import requests_kerberos + auth = requests_kerberos.HTTPKerberosAuth() + elif user is not None and password is not None: + auth = (user, password) + else: + auth = None + + self.buffer_size = buffer_size + self.mode = mode + + if headers is None: + self.headers = _HEADERS.copy() + else: + self.headers = headers + + self.timeout = timeout + + self.response = self.session.get( + url, + auth=auth, + cert=cert, + stream=True, + headers=self.headers, + timeout=self.timeout, + ) + + if not self.response.ok: + self.response.raise_for_status() + + self._read_iter = self.response.iter_content(self.buffer_size) + self._read_buffer = bytebuffer.ByteBuffer(buffer_size) + self._current_pos = 0 + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + # + # Override some methods from io.IOBase. + # + def close(self): + """Flush and close this stream.""" + logger.debug("close: called") + if not self.closed: + self.response = None + self._read_iter = None + + @property + def closed(self): + return self.response is None + + def readable(self): + """Return True if the stream can be read from.""" + return True + + def seekable(self): + return False + + # + # io.BufferedIOBase methods. + # + def detach(self): + """Unsupported.""" + raise io.UnsupportedOperation + + def read(self, size=-1): + """ + Mimics the read call to a filehandle object. + """ + logger.debug("reading with size: %d", size) + if self.response is None: + return b'' + + if size == 0: + return b'' + elif size < 0 and len(self._read_buffer) == 0: + retval = self.response.raw.read() + elif size < 0: + retval = self._read_buffer.read() + self.response.raw.read() + else: + while len(self._read_buffer) < size: + logger.debug( + "http reading more content at current_pos: %d with size: %d", + self._current_pos, size, + ) + bytes_read = self._read_buffer.fill(self._read_iter) + if bytes_read == 0: + # Oops, ran out of data early. + retval = self._read_buffer.read() + self._current_pos += len(retval) + + return retval + + # If we got here, it means we have enough data in the buffer + # to return to the caller. + retval = self._read_buffer.read(size) + + self._current_pos += len(retval) + return retval + + def read1(self, size=-1): + """This is the same as read().""" + return self.read(size=size) + + def readinto(self, b): + """Read up to len(b) bytes into b, and return the number of bytes + read.""" + data = self.read(len(b)) + if not data: + return 0 + b[:len(data)] = data + return len(data) + + +class SeekableBufferedInputBase(BufferedInputBase): + """ + Implement seekable streamed reader from a web site. + Supports Kerberos, client certificate and Basic HTTP authentication. + """ + + def __init__(self, url, mode='r', buffer_size=DEFAULT_BUFFER_SIZE, + kerberos=False, user=None, password=None, cert=None, + headers=None, session=None, timeout=None): + """ + If Kerberos is True, will attempt to use the local Kerberos credentials. + If cert is set, will try to use a client certificate + Otherwise, will try to use "basic" HTTP authentication via username/password. + + If none of those are set, will connect unauthenticated. + """ + self.url = url + + self.session = session or requests + + if kerberos: + import requests_kerberos + self.auth = requests_kerberos.HTTPKerberosAuth() + elif user is not None and password is not None: + self.auth = (user, password) + else: + self.auth = None + + if headers is None: + self.headers = _HEADERS.copy() + else: + self.headers = headers + + self.cert = cert + self.timeout = timeout + + self.buffer_size = buffer_size + self.mode = mode + self.response = self._partial_request() + + if not self.response.ok: + self.response.raise_for_status() + + logger.debug('self.response: %r, raw: %r', self.response, self.response.raw) + + self.content_length = int(self.response.headers.get("Content-Length", -1)) + # + # We assume the HTTP stream is seekable unless the server explicitly + # tells us it isn't. It's better to err on the side of "seekable" + # because we don't want to prevent users from seeking a stream that + # does not appear to be seekable but really is. + # + self._seekable = self.response.headers.get("Accept-Ranges", "").lower() != "none" + + self._read_iter = self.response.iter_content(self.buffer_size) + self._read_buffer = bytebuffer.ByteBuffer(buffer_size) + self._current_pos = 0 + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + def seek(self, offset, whence=0): + """Seek to the specified position. + + :param int offset: The offset in bytes. + :param int whence: Where the offset is from. + + Returns the position after seeking.""" + logger.debug('seeking to offset: %r whence: %r', offset, whence) + if whence not in constants.WHENCE_CHOICES: + raise ValueError('invalid whence, expected one of %r' % constants.WHENCE_CHOICES) + + if not self.seekable(): + raise OSError('stream is not seekable') + + if whence == constants.WHENCE_START: + new_pos = offset + elif whence == constants.WHENCE_CURRENT: + new_pos = self._current_pos + offset + elif whence == constants.WHENCE_END: + new_pos = self.content_length + offset + + if self.content_length == -1: + new_pos = smart_open.utils.clamp(new_pos, maxval=None) + else: + new_pos = smart_open.utils.clamp(new_pos, maxval=self.content_length) + + if self._current_pos == new_pos: + return self._current_pos + + logger.debug("http seeking from current_pos: %d to new_pos: %d", self._current_pos, new_pos) + + self._current_pos = new_pos + + if new_pos == self.content_length: + self.response = None + self._read_iter = None + self._read_buffer.empty() + else: + response = self._partial_request(new_pos) + if response.ok: + self.response = response + self._read_iter = self.response.iter_content(self.buffer_size) + self._read_buffer.empty() + else: + self.response = None + + return self._current_pos + + def tell(self): + return self._current_pos + + def seekable(self, *args, **kwargs): + return self._seekable + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def _partial_request(self, start_pos=None): + if start_pos is not None: + self.headers.update({"range": smart_open.utils.make_range_string(start_pos)}) + + response = self.session.get( + self.url, + auth=self.auth, + stream=True, + cert=self.cert, + headers=self.headers, + timeout=self.timeout, + ) + return response diff --git a/.venv/lib/python3.11/site-packages/smart_open/local_file.py b/.venv/lib/python3.11/site-packages/smart_open/local_file.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f5c5aadef7845ce5cbb0dd6d134e9ebb02b272 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/local_file.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements the transport for the file:// schema.""" +import io +import os.path + +SCHEME = 'file' + +URI_EXAMPLES = ( + './local/path/file', + '~/local/path/file', + 'local/path/file', + './local/path/file.gz', + 'file:///home/user/file', + 'file:///home/user/file.bz2', +) + + +open = io.open + + +def parse_uri(uri_as_string): + local_path = extract_local_path(uri_as_string) + return dict(scheme=SCHEME, uri_path=local_path) + + +def open_uri(uri_as_string, mode, transport_params): + parsed_uri = parse_uri(uri_as_string) + fobj = io.open(parsed_uri['uri_path'], mode) + return fobj + + +def extract_local_path(uri_as_string): + if uri_as_string.startswith('file://'): + local_path = uri_as_string.replace('file://', '', 1) + else: + local_path = uri_as_string + return os.path.expanduser(local_path) diff --git a/.venv/lib/python3.11/site-packages/smart_open/s3.py b/.venv/lib/python3.11/site-packages/smart_open/s3.py new file mode 100644 index 0000000000000000000000000000000000000000..22796a4a323a8ef7298efbaa9f06b3f53cb0e6f6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/s3.py @@ -0,0 +1,1424 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Implements file-like objects for reading and writing from/to AWS S3.""" +from __future__ import annotations + +import http +import io +import functools +import logging +import time +import warnings + +from typing import ( + Callable, + List, + TYPE_CHECKING, +) + +try: + import boto3 + import botocore.client + import botocore.exceptions + import urllib3.exceptions +except ImportError: + MISSING_DEPS = True + +import smart_open.bytebuffer +import smart_open.concurrency +import smart_open.utils + +from smart_open import constants + + +if TYPE_CHECKING: + from mypy_boto3_s3.client import S3Client + from typing_extensions import Buffer + +logger = logging.getLogger(__name__) + +# +# AWS puts restrictions on the part size for multipart uploads. +# Each part must be more than 5MB, and less than 5GB. +# +# On top of that, our MultipartWriter has a min_part_size option. +# In retrospect, it's an unfortunate name, because it conflicts with the +# minimum allowable part size (5MB), but it's too late to change it, because +# people are using that parameter (unlike the MIN, DEFAULT, MAX constants). +# It really just means "part size": as soon as you have this many bytes, +# write a part to S3 (see the MultipartWriter.write method). +# + +MIN_PART_SIZE = 5 * 1024 ** 2 +"""The absolute minimum permitted by Amazon.""" + +DEFAULT_PART_SIZE = 50 * 1024**2 +"""The default part size for S3 multipart uploads, chosen carefully by smart_open""" + +MAX_PART_SIZE = 5 * 1024 ** 3 +"""The absolute maximum permitted by Amazon.""" + +SCHEMES = ("s3", "s3n", 's3u', "s3a") +DEFAULT_PORT = 443 +DEFAULT_HOST = 's3.amazonaws.com' + +DEFAULT_BUFFER_SIZE = 128 * 1024 + +URI_EXAMPLES = ( + 's3://my_bucket/my_key', + 's3://my_key:my_secret@my_bucket/my_key', + 's3://my_key:my_secret@my_server:my_port@my_bucket/my_key', +) + +# Returned by AWS when we try to seek beyond EOF. +_OUT_OF_RANGE = 'InvalidRange' + + +class Retry: + def __init__(self): + self.attempts: int = 6 + self.sleep_seconds: int = 10 + self.exceptions: List[Exception] = [botocore.exceptions.EndpointConnectionError] + self.client_error_codes: List[str] = ['NoSuchUpload'] + + def _do(self, fn: Callable): + for attempt in range(self.attempts): + try: + return fn() + except tuple(self.exceptions) as err: + logger.critical( + 'Caught non-fatal %s, retrying %d more times', + err, + self.attempts - attempt - 1, + ) + logger.exception(err) + time.sleep(self.sleep_seconds) + except botocore.exceptions.ClientError as err: + error_code = err.response['Error'].get('Code') + if error_code not in self.client_error_codes: + raise + logger.critical( + 'Caught non-fatal ClientError (%s), retrying %d more times', + error_code, + self.attempts - attempt - 1, + ) + logger.exception(err) + time.sleep(self.sleep_seconds) + else: + logger.critical('encountered too many non-fatal errors, giving up') + raise IOError('%s failed after %d attempts', fn.func, self.attempts) + + +# +# The retry mechanism for this submodule. Client code may modify it, e.g. by +# updating RETRY.sleep_seconds and friends. +# +if 'MISSING_DEPS' not in locals(): + RETRY = Retry() + + +class _ClientWrapper: + """Wraps a client to inject the appropriate keyword args into each method call. + + The keyword args are a dictionary keyed by the fully qualified method name. + For example, S3.Client.create_multipart_upload. + + See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#client + + This wrapper behaves identically to the client otherwise. + """ + def __init__(self, client, kwargs): + self.client = client + self.kwargs = kwargs + + def __getattr__(self, method_name): + method = getattr(self.client, method_name) + kwargs = self.kwargs.get('S3.Client.%s' % method_name, {}) + return functools.partial(method, **kwargs) + + +def parse_uri(uri_as_string): + # + # Restrictions on bucket names and labels: + # + # - Bucket names must be at least 3 and no more than 63 characters long. + # - Bucket names must be a series of one or more labels. + # - Adjacent labels are separated by a single period (.). + # - Bucket names can contain lowercase letters, numbers, and hyphens. + # - Each label must start and end with a lowercase letter or a number. + # + # We use the above as a guide only, and do not perform any validation. We + # let boto3 take care of that for us. + # + split_uri = smart_open.utils.safe_urlsplit(uri_as_string) + assert split_uri.scheme in SCHEMES + + port = DEFAULT_PORT + host = DEFAULT_HOST + ordinary_calling_format = False + # + # These defaults tell boto3 to look for credentials elsewhere + # + access_id, access_secret = None, None + + # + # Common URI template [secret:key@][host[:port]@]bucket/object + # + # The urlparse function doesn't handle the above schema, so we have to do + # it ourselves. + # + uri = split_uri.netloc + split_uri.path + + # + # Attempt to extract edge-case authentication details from the URL. + # + # See: + # 1. https://summitroute.com/blog/2018/06/20/aws_security_credential_formats/ + # 2. test_s3_uri_with_credentials* in test_smart_open.py for example edge cases + # + if '@' in uri: + maybe_auth, rest = uri.split('@', 1) + if ':' in maybe_auth: + maybe_id, maybe_secret = maybe_auth.split(':', 1) + if '/' not in maybe_id: + access_id, access_secret = maybe_id, maybe_secret + uri = rest + + head, key_id = uri.split('/', 1) + if '@' in head and ':' in head: + ordinary_calling_format = True + host_port, bucket_id = head.split('@') + host, port = host_port.split(':', 1) + port = int(port) + elif '@' in head: + ordinary_calling_format = True + host, bucket_id = head.split('@') + else: + bucket_id = head + + return dict( + scheme=split_uri.scheme, + bucket_id=bucket_id, + key_id=key_id, + port=port, + host=host, + ordinary_calling_format=ordinary_calling_format, + access_id=access_id, + access_secret=access_secret, + ) + + +def _consolidate_params(uri, transport_params): + """Consolidates the parsed Uri with the additional parameters. + + This is necessary because the user can pass some of the parameters can in + two different ways: + + 1) Via the URI itself + 2) Via the transport parameters + + These are not mutually exclusive, but we have to pick one over the other + in a sensible way in order to proceed. + + """ + transport_params = dict(transport_params) + + def inject(**kwargs): + try: + client_kwargs = transport_params['client_kwargs'] + except KeyError: + client_kwargs = transport_params['client_kwargs'] = {} + + try: + init_kwargs = client_kwargs['S3.Client'] + except KeyError: + init_kwargs = client_kwargs['S3.Client'] = {} + + init_kwargs.update(**kwargs) + + client = transport_params.get('client') + if client is not None and (uri['access_id'] or uri['access_secret']): + logger.warning( + 'ignoring credentials parsed from URL because they conflict with ' + 'transport_params["client"]. Set transport_params["client"] to None ' + 'to suppress this warning.' + ) + uri.update(access_id=None, access_secret=None) + elif (uri['access_id'] and uri['access_secret']): + inject( + aws_access_key_id=uri['access_id'], + aws_secret_access_key=uri['access_secret'], + ) + uri.update(access_id=None, access_secret=None) + + if client is not None and uri['host'] != DEFAULT_HOST: + logger.warning( + 'ignoring endpoint_url parsed from URL because they conflict with ' + 'transport_params["client"]. Set transport_params["client"] to None ' + 'to suppress this warning.' + ) + uri.update(host=None) + elif uri['host'] != DEFAULT_HOST: + if uri['scheme'] == 's3u': + scheme = 'http' + else: + scheme = 'https' + inject(endpoint_url=scheme + '://%(host)s:%(port)d' % uri) + uri.update(host=None) + + return uri, transport_params + + +def open_uri(uri, mode, transport_params): + deprecated = ( + 'multipart_upload_kwargs', + 'object_kwargs', + 'resource', + 'resource_kwargs', + 'session', + 'singlepart_upload_kwargs', + ) + detected = [k for k in deprecated if k in transport_params] + if detected: + doc_url = ( + 'https://github.com/RaRe-Technologies/smart_open/blob/develop/' + 'MIGRATING_FROM_OLDER_VERSIONS.rst' + ) + # + # We use warnings.warn /w UserWarning instead of logger.warn here because + # + # 1) Not everyone has logging enabled; and + # 2) check_kwargs (below) already uses logger.warn with a similar message + # + # https://github.com/RaRe-Technologies/smart_open/issues/614 + # + message = ( + 'ignoring the following deprecated transport parameters: %r. ' + 'See <%s> for details' % (detected, doc_url) + ) + warnings.warn(message, UserWarning) + parsed_uri = parse_uri(uri) + parsed_uri, transport_params = _consolidate_params(parsed_uri, transport_params) + kwargs = smart_open.utils.check_kwargs(open, transport_params) + return open(parsed_uri['bucket_id'], parsed_uri['key_id'], mode, **kwargs) + + +def open( + bucket_id, + key_id, + mode, + version_id=None, + buffer_size=DEFAULT_BUFFER_SIZE, + min_part_size=DEFAULT_PART_SIZE, + multipart_upload=True, + defer_seek=False, + client=None, + client_kwargs=None, + writebuffer=None, +): + """Open an S3 object for reading or writing. + + Parameters + ---------- + bucket_id: str + The name of the bucket this object resides in. + key_id: str + The name of the key within the bucket. + mode: str + The mode for opening the object. Must be either "rb" or "wb". + buffer_size: int, optional + The buffer size to use when performing I/O. + min_part_size: int, optional + The minimum part size for multipart uploads, in bytes. + + When the writebuffer contains this many bytes, smart_open will upload + the bytes to S3 as a single part of a multi-part upload, freeing the + buffer either partially or entirely. When you close the writer, it + will assemble the parts together. + + The value determines the upper limit for the writebuffer. If buffer + space is short (e.g. you are buffering to memory), then use a smaller + value for min_part_size, or consider buffering to disk instead (see + the writebuffer option). + + The value must be between 5MB and 5GB. If you specify a value outside + of this range, smart_open will adjust it for you, because otherwise the + upload _will_ fail. + + For writing only. Does not apply if you set multipart_upload=False. + multipart_upload: bool, optional + Default: `True` + If set to `True`, will use multipart upload for writing to S3. If set + to `False`, S3 upload will use the S3 Single-Part Upload API, which + is more ideal for small file sizes. + + For writing only. + version_id: str, optional + Version of the object, used when reading object. + If None, will fetch the most recent version. + defer_seek: boolean, optional + Default: `False` + If set to `True` on a file opened for reading, GetObject will not be + called until the first seek() or read(). + Avoids redundant API queries when seeking before reading. + client: object, optional + The S3 client to use when working with boto3. + If you don't specify this, then smart_open will create a new client for you. + client_kwargs: dict, optional + Additional parameters to pass to the relevant functions of the client. + The keys are fully qualified method names, e.g. `S3.Client.create_multipart_upload`. + The values are kwargs to pass to that method each time it is called. + writebuffer: IO[bytes], optional + By default, this module will buffer data in memory using io.BytesIO + when writing. Pass another binary IO instance here to use it instead. + For example, you may pass a file object to buffer to local disk instead + of in RAM. Use this to keep RAM usage low at the expense of additional + disk IO. If you pass in an open file, then you are responsible for + cleaning it up after writing completes. + """ + logger.debug('%r', locals()) + if mode not in constants.BINARY_MODES: + raise NotImplementedError('bad mode: %r expected one of %r' % (mode, constants.BINARY_MODES)) + + if (mode == constants.WRITE_BINARY) and (version_id is not None): + raise ValueError("version_id must be None when writing") + + if mode == constants.READ_BINARY: + fileobj = Reader( + bucket_id, + key_id, + version_id=version_id, + buffer_size=buffer_size, + defer_seek=defer_seek, + client=client, + client_kwargs=client_kwargs, + ) + elif mode == constants.WRITE_BINARY: + if multipart_upload: + fileobj = MultipartWriter( + bucket_id, + key_id, + client=client, + client_kwargs=client_kwargs, + writebuffer=writebuffer, + part_size=min_part_size, + ) + else: + fileobj = SinglepartWriter( + bucket_id, + key_id, + client=client, + client_kwargs=client_kwargs, + writebuffer=writebuffer, + ) + else: + assert False, 'unexpected mode: %r' % mode + + fileobj.name = key_id + return fileobj + + +def _get(client, bucket, key, version, range_string): + try: + params = dict(Bucket=bucket, Key=key) + if version: + params["VersionId"] = version + if range_string: + params["Range"] = range_string + + return client.get_object(**params) + except botocore.client.ClientError as error: + wrapped_error = IOError( + 'unable to access bucket: %r key: %r version: %r error: %s' % ( + bucket, key, version, error + ) + ) + wrapped_error.backend_error = error + raise wrapped_error from error + + +def _unwrap_ioerror(ioe): + """Given an IOError from _get, return the 'Error' dictionary from boto.""" + try: + return ioe.backend_error.response['Error'] + except (AttributeError, KeyError): + return None + + +class _SeekableRawReader(object): + """Read an S3 object. + + This class is internal to the S3 submodule. + """ + + def __init__( + self, + client, + bucket, + key, + version_id=None, + ): + self._client = client + self._bucket = bucket + self._key = key + self._version_id = version_id + + self._content_length = None + self._position = 0 + self._body = None + + def seek(self, offset, whence=constants.WHENCE_START): + """Seek to the specified position. + + :param int offset: The offset in bytes. + :param int whence: Where the offset is from. + + :returns: the position after seeking. + :rtype: int + """ + if whence not in constants.WHENCE_CHOICES: + raise ValueError('invalid whence, expected one of %r' % constants.WHENCE_CHOICES) + + # + # Close old body explicitly. + # When first seek() after __init__(), self._body is not exist. + # + if self._body is not None: + self._body.close() + self._body = None + + start = None + stop = None + if whence == constants.WHENCE_START: + start = max(0, offset) + elif whence == constants.WHENCE_CURRENT: + start = max(0, offset + self._position) + else: + stop = max(0, -offset) + + # + # If we can figure out that we've read past the EOF, then we can save + # an extra API call. + # + if self._content_length is None: + reached_eof = False + elif start is not None and start >= self._content_length: + reached_eof = True + elif stop == 0: + reached_eof = True + else: + reached_eof = False + + if reached_eof: + self._body = io.BytesIO() + self._position = self._content_length + else: + self._open_body(start, stop) + + return self._position + + def _open_body(self, start=None, stop=None): + """Open a connection to download the specified range of bytes. Store + the open file handle in self._body. + + If no range is specified, start defaults to self._position. + start and stop follow the semantics of the http range header, + so a stop without a start will read bytes beginning at stop. + + As a side effect, set self._content_length. Set self._position + to self._content_length if start is past end of file. + """ + if start is None and stop is None: + start = self._position + range_string = smart_open.utils.make_range_string(start, stop) + + try: + # Optimistically try to fetch the requested content range. + response = _get( + self._client, + self._bucket, + self._key, + self._version_id, + range_string, + ) + except IOError as ioe: + # Handle requested content range exceeding content size. + error_response = _unwrap_ioerror(ioe) + if error_response is None or error_response.get('Code') != _OUT_OF_RANGE: + raise + try: + self._position = self._content_length = int(error_response['ActualObjectSize']) + self._body = io.BytesIO() + except KeyError: + response = _get( + self._client, + self._bucket, + self._key, + self._version_id, + None, + ) + self._position = self._content_length = response["ContentLength"] + self._body = response["Body"] + else: + # + # Keep track of how many times boto3's built-in retry mechanism + # activated. + # + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-an-aws-service-response + # + logger.debug( + '%s: RetryAttempts: %d', + self, + response['ResponseMetadata']['RetryAttempts'], + ) + # + # range request may not always return partial content, see: + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests#partial_request_responses + # + status_code = response['ResponseMetadata']['HTTPStatusCode'] + if status_code == http.HTTPStatus.PARTIAL_CONTENT: + _, start, stop, length = smart_open.utils.parse_content_range(response['ContentRange']) + self._position = start + elif status_code == http.HTTPStatus.OK: + length = response["ContentLength"] + self._content_length = length + self._body = response['Body'] + + def read(self, size=-1): + """Read from the continuous connection with the remote peer.""" + if self._body is None: + # This is necessary for the very first read() after __init__(). + self._open_body() + if self._position >= self._content_length: + return b'' + + # + # Boto3 has built-in error handling and retry mechanisms: + # + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html + # + # Unfortunately, it isn't always enough. There is still a non-zero + # possibility that an exception will slip past these mechanisms and + # terminate the read prematurely. Luckily, at this stage, it's very + # simple to recover from the problem: wait a little bit, reopen the + # HTTP connection and try again. Usually, a single retry attempt is + # enough to recover, but we try multiple times "just in case". + # + for attempt, seconds in enumerate([1, 2, 4, 8, 16], 1): + try: + if size == -1: + binary = self._body.read() + else: + binary = self._body.read(size) + except ( + ConnectionResetError, + botocore.exceptions.BotoCoreError, + urllib3.exceptions.HTTPError, + ) as err: + logger.warning( + '%s: caught %r while reading %d bytes, sleeping %ds before retry', + self, + err, + size, + seconds, + ) + time.sleep(seconds) + self._open_body() + else: + self._position += len(binary) + return binary + + raise IOError('%s: failed to read %d bytes after %d attempts' % (self, size, attempt)) + + def __str__(self): + return 'smart_open.s3._SeekableReader(%r, %r)' % (self._bucket, self._key) + + +def _initialize_boto3(rw, client, client_kwargs, bucket, key): + """Created the required objects for accessing S3. Ideally, they have + been already created for us and we can just reuse them.""" + if client_kwargs is None: + client_kwargs = {} + + if client is None: + init_kwargs = client_kwargs.get('S3.Client', {}) + client = boto3.client('s3', **init_kwargs) + assert client + + rw._client = _ClientWrapper(client, client_kwargs) + rw._bucket = bucket + rw._key = key + + +class Reader(io.BufferedIOBase): + """Reads bytes from S3. + + Implements the io.BufferedIOBase interface of the standard library.""" + + def __init__( + self, + bucket, + key, + version_id=None, + buffer_size=DEFAULT_BUFFER_SIZE, + line_terminator=constants.BINARY_NEWLINE, + defer_seek=False, + client=None, + client_kwargs=None, + ): + self._version_id = version_id + self._buffer_size = buffer_size + + _initialize_boto3(self, client, client_kwargs, bucket, key) + + self._raw_reader = _SeekableRawReader( + self._client, + bucket, + key, + self._version_id, + ) + self._current_pos = 0 + self._buffer = smart_open.bytebuffer.ByteBuffer(buffer_size) + self._eof = False + self._line_terminator = line_terminator + self._seek_initialized = False + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + if not defer_seek: + self.seek(0) + + # + # io.BufferedIOBase methods. + # + + def close(self): + """Flush and close this stream.""" + logger.debug("close: called") + pass + + def readable(self): + """Return True if the stream can be read from.""" + return True + + def read(self, size=-1): + """Read up to size bytes from the object and return them.""" + if size == 0: + return b'' + elif size < 0: + # call read() before setting _current_pos to make sure _content_length is set + out = self._read_from_buffer() + self._raw_reader.read() + self._current_pos = self._raw_reader._content_length + return out + + # + # Return unused data first + # + if len(self._buffer) >= size: + return self._read_from_buffer(size) + + # + # If the stream is finished, return what we have. + # + if self._eof: + return self._read_from_buffer() + + self._fill_buffer(size) + return self._read_from_buffer(size) + + def read1(self, size=-1): + """This is the same as read().""" + return self.read(size=size) + + def readinto(self, b): + """Read up to len(b) bytes into b, and return the number of bytes + read.""" + data = self.read(len(b)) + if not data: + return 0 + b[:len(data)] = data + return len(data) + + def readline(self, limit=-1): + """Read up to and including the next newline. Returns the bytes read.""" + if limit != -1: + raise NotImplementedError('limits other than -1 not implemented yet') + + # + # A single line may span multiple buffers. + # + line = io.BytesIO() + while not (self._eof and len(self._buffer) == 0): + line_part = self._buffer.readline(self._line_terminator) + line.write(line_part) + self._current_pos += len(line_part) + + if line_part.endswith(self._line_terminator): + break + else: + self._fill_buffer() + + return line.getvalue() + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only seek support, and no truncate support.""" + return True + + def seek(self, offset, whence=constants.WHENCE_START): + """Seek to the specified position. + + :param int offset: The offset in bytes. + :param int whence: Where the offset is from. + + Returns the position after seeking.""" + # Convert relative offset to absolute, since self._raw_reader + # doesn't know our current position. + if whence == constants.WHENCE_CURRENT: + whence = constants.WHENCE_START + offset += self._current_pos + + if not self._seek_initialized or not ( + whence == constants.WHENCE_START and offset == self._current_pos + ): + self._current_pos = self._raw_reader.seek(offset, whence) + + self._buffer.empty() + + self._eof = self._current_pos == self._raw_reader._content_length + + self._seek_initialized = True + return self._current_pos + + def tell(self): + """Return the current position within the file.""" + return self._current_pos + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def detach(self): + """Unsupported.""" + raise io.UnsupportedOperation + + def terminate(self): + """Do nothing.""" + pass + + def to_boto3(self, resource): + """Create an **independent** `boto3.s3.Object` instance that points to + the same S3 object as this instance. + Changes to the returned object will not affect the current instance. + """ + assert resource, 'resource must be a boto3.resource instance' + obj = resource.Object(self._bucket, self._key) + if self._version_id is not None: + return obj.Version(self._version_id) + else: + return obj + + # + # Internal methods. + # + def _read_from_buffer(self, size=-1): + """Remove at most size bytes from our buffer and return them.""" + size = size if size >= 0 else len(self._buffer) + part = self._buffer.read(size) + self._current_pos += len(part) + return part + + def _fill_buffer(self, size=-1): + size = max(size, self._buffer._chunk_size) + while len(self._buffer) < size and not self._eof: + bytes_read = self._buffer.fill(self._raw_reader) + if bytes_read == 0: + logger.debug('%s: reached EOF while filling buffer', self) + self._eof = True + + def __str__(self): + return "smart_open.s3.Reader(%r, %r)" % (self._bucket, self._key) + + def __repr__(self): + return ( + "smart_open.s3.Reader(" + "bucket=%r, " + "key=%r, " + "version_id=%r, " + "buffer_size=%r, " + "line_terminator=%r)" + ) % ( + self._bucket, + self._key, + self._version_id, + self._buffer_size, + self._line_terminator, + ) + + +class MultipartWriter(io.BufferedIOBase): + """Writes bytes to S3 using the multi part API. + + Implements the io.BufferedIOBase interface of the standard library.""" + _upload_id = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__( + self, + bucket, + key, + part_size=DEFAULT_PART_SIZE, + client=None, + client_kwargs=None, + writebuffer: io.BytesIO | None = None, + ): + adjusted_ps = smart_open.utils.clamp(part_size, MIN_PART_SIZE, MAX_PART_SIZE) + if part_size != adjusted_ps: + logger.warning(f"adjusting part_size from {part_size} to {adjusted_ps}") + part_size = adjusted_ps + self._part_size = part_size + + _initialize_boto3(self, client, client_kwargs, bucket, key) + self._client: S3Client + self._bucket: str + self._key: str + + try: + partial = functools.partial( + self._client.create_multipart_upload, + Bucket=bucket, + Key=key, + ) + self._upload_id = RETRY._do(partial)['UploadId'] + except botocore.client.ClientError as error: + raise ValueError( + 'the bucket %r does not exist, or is forbidden for access (%r)' % ( + bucket, error + ) + ) from error + + if writebuffer is None: + self._buf = io.BytesIO() + else: + self._buf = writebuffer + + self._total_bytes = 0 + self._total_parts = 0 + self._parts: list[dict[str, object]] = [] + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None # type: ignore[assignment] + + def flush(self): + pass + + # + # Override some methods from io.IOBase. + # + def close(self): + logger.debug("close: called") + if self.closed: + return + + if self._buf.tell(): + self._upload_next_part() + + logger.debug('%s: completing multipart upload', self) + if self._total_bytes and self._upload_id: + partial = functools.partial( + self._client.complete_multipart_upload, + Bucket=self._bucket, + Key=self._key, + UploadId=self._upload_id, + MultipartUpload={'Parts': self._parts}, + ) + RETRY._do(partial) + logger.debug('%s: completed multipart upload', self) + elif self._upload_id: + # + # AWS complains with "The XML you provided was not well-formed or + # did not validate against our published schema" when the input is + # completely empty => abort the upload, no file created. + # + # We work around this by creating an empty file explicitly. + # + self._client.abort_multipart_upload( + Bucket=self._bucket, + Key=self._key, + UploadId=self._upload_id, + ) + self._client.put_object( + Bucket=self._bucket, + Key=self._key, + Body=b'', + ) + logger.debug('%s: wrote 0 bytes to imitate multipart upload', self) + self._upload_id = None + + @property + def closed(self): + return self._upload_id is None + + def writable(self): + """Return True if the stream supports writing.""" + return True + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only tell support, and no seek or truncate support.""" + return True + + def seek(self, offset, whence=constants.WHENCE_START): + """Unsupported.""" + raise io.UnsupportedOperation + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def tell(self): + """Return the current stream position.""" + return self._total_bytes + + # + # io.BufferedIOBase methods. + # + def detach(self): + raise io.UnsupportedOperation("detach() not supported") + + def write(self, b: Buffer) -> int: + """Write the given buffer (bytes, bytearray, memoryview or any buffer + interface implementation) to the S3 file. + + For more information about buffers, see https://docs.python.org/3/c-api/buffer.html + + There's buffering happening under the covers, so this may not actually + do any HTTP transfer right away.""" + offset = 0 + mv = memoryview(b) + self._total_bytes += len(mv) + + # + # botocore does not accept memoryview, otherwise we could've gotten + # away with not needing to write a copy to the buffer aside from cases + # where b is smaller than part_size + # + while offset < len(mv): + start = offset + end = offset + self._part_size - self._buf.tell() + self._buf.write(mv[start:end]) + if self._buf.tell() < self._part_size: + # + # Not enough data to write a new part just yet. The assert + # ensures that we've consumed all of the input buffer. + # + assert end >= len(mv) + return len(mv) + + self._upload_next_part() + offset = end + return len(mv) + + def terminate(self): + """Cancel the underlying multipart upload.""" + if self.closed: + return + logger.debug('%s: terminating multipart upload', self) + self._client.abort_multipart_upload( + Bucket=self._bucket, + Key=self._key, + UploadId=self._upload_id, + ) + self._upload_id = None + logger.debug('%s: terminated multipart upload', self) + + def to_boto3(self, resource): + """Create an **independent** `boto3.s3.Object` instance that points to + the same S3 object as this instance. + Changes to the returned object will not affect the current instance. + """ + assert resource, 'resource must be a boto3.resource instance' + return resource.Object(self._bucket, self._key) + + # + # Internal methods. + # + def _upload_next_part(self) -> None: + part_num = self._total_parts + 1 + logger.info( + "%s: uploading part_num: %i, %i bytes (total %.3fGB)", + self, + part_num, + self._buf.tell(), + self._total_bytes / 1024.0 ** 3, + ) + self._buf.seek(0) + + # + # Network problems in the middle of an upload are particularly + # troublesome. We don't want to abort the entire upload just because + # of a temporary connection problem, so this part needs to be + # especially robust. + # + upload = RETRY._do( + functools.partial( + self._client.upload_part, + Bucket=self._bucket, + Key=self._key, + UploadId=self._upload_id, + PartNumber=part_num, + Body=self._buf, + ) + ) + + self._parts.append({'ETag': upload['ETag'], 'PartNumber': part_num}) + logger.debug("%s: upload of part_num #%i finished", self, part_num) + + self._total_parts += 1 + + self._buf.seek(0) + self._buf.truncate(0) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + self.terminate() + else: + self.close() + + def __str__(self): + return "smart_open.s3.MultipartWriter(%r, %r)" % (self._bucket, self._key) + + def __repr__(self): + return "smart_open.s3.MultipartWriter(bucket=%r, key=%r, part_size=%r)" % ( + self._bucket, + self._key, + self._part_size, + ) + + +class SinglepartWriter(io.BufferedIOBase): + """Writes bytes to S3 using the single part API. + + Implements the io.BufferedIOBase interface of the standard library. + + This class buffers all of its input in memory until its `close` method is called. Only then will + the data be written to S3 and the buffer is released.""" + _buf = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__( + self, + bucket, + key, + client=None, + client_kwargs=None, + writebuffer=None, + ): + _initialize_boto3(self, client, client_kwargs, bucket, key) + + try: + self._client.head_bucket(Bucket=bucket) + except botocore.client.ClientError as e: + raise ValueError('the bucket %r does not exist, or is forbidden for access' % bucket) from e + + if writebuffer is None: + self._buf = io.BytesIO() + else: + self._buf = writebuffer + + self._total_bytes = 0 + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + def flush(self): + pass + + # + # Override some methods from io.IOBase. + # + def close(self): + logger.debug("close: called") + if self.closed: + return + + self._buf.seek(0) + + try: + self._client.put_object( + Bucket=self._bucket, + Key=self._key, + Body=self._buf, + ) + except botocore.client.ClientError as e: + raise ValueError( + 'the bucket %r does not exist, or is forbidden for access' % self._bucket) from e + + logger.debug("%s: direct upload finished", self) + self._buf = None + + @property + def closed(self): + return self._buf is None + + def writable(self): + """Return True if the stream supports writing.""" + return True + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only tell support, and no seek or truncate support.""" + return True + + def seek(self, offset, whence=constants.WHENCE_START): + """Unsupported.""" + raise io.UnsupportedOperation + + def truncate(self, size=None): + """Unsupported.""" + raise io.UnsupportedOperation + + def tell(self): + """Return the current stream position.""" + return self._total_bytes + + # + # io.BufferedIOBase methods. + # + def detach(self): + raise io.UnsupportedOperation("detach() not supported") + + def write(self, b): + """Write the given buffer (bytes, bytearray, memoryview or any buffer + interface implementation) into the buffer. Content of the buffer will be + written to S3 on close as a single-part upload. + + For more information about buffers, see https://docs.python.org/3/c-api/buffer.html""" + + length = self._buf.write(b) + self._total_bytes += length + return length + + def terminate(self): + self._buf = None + logger.debug('%s: terminated singlepart upload', self) + + # + # Internal methods. + # + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + self.terminate() + else: + self.close() + + def __str__(self): + return "smart_open.s3.SinglepartWriter(%r, %r)" % (self._bucket, self._key) + + def __repr__(self): + return "smart_open.s3.SinglepartWriter(bucket=%r, key=%r)" % (self._bucket, self._key) + + +def _accept_all(key): + return True + + +def iter_bucket( + bucket_name, + prefix='', + accept_key=None, + key_limit=None, + workers=16, + retries=3, + **session_kwargs): + """ + Iterate and download all S3 objects under `s3://bucket_name/prefix`. + + Parameters + ---------- + bucket_name: str + The name of the bucket. + prefix: str, optional + Limits the iteration to keys starting with the prefix. + accept_key: callable, optional + This is a function that accepts a key name (unicode string) and + returns True/False, signalling whether the given key should be downloaded. + The default behavior is to accept all keys. + key_limit: int, optional + If specified, the iterator will stop after yielding this many results. + workers: int, optional + The number of subprocesses to use. + retries: int, optional + The number of time to retry a failed download. + session_kwargs: dict, optional + Keyword arguments to pass when creating a new session. + For a list of available names and values, see: + https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session + + + Yields + ------ + str + The full key name (does not include the bucket name). + bytes + The full contents of the key. + + Notes + ----- + The keys are processed in parallel, using `workers` processes (default: 16), + to speed up downloads greatly. If multiprocessing is not available, thus + _MULTIPROCESSING is False, this parameter will be ignored. + + Examples + -------- + + >>> # get all JSON files under "mybucket/foo/" + >>> for key, content in iter_bucket( + ... bucket_name, prefix='foo/', + ... accept_key=lambda key: key.endswith('.json')): + ... print key, len(content) + + >>> # limit to 10k files, using 32 parallel workers (default is 16) + >>> for key, content in iter_bucket(bucket_name, key_limit=10000, workers=32): + ... print key, len(content) + """ + if accept_key is None: + accept_key = _accept_all + + # + # If people insist on giving us bucket instances, silently extract the name + # before moving on. Works for boto3 as well as boto. + # + try: + bucket_name = bucket_name.name + except AttributeError: + pass + + total_size, key_no = 0, -1 + key_iterator = _list_bucket( + bucket_name, + prefix=prefix, + accept_key=accept_key, + **session_kwargs) + download_key = functools.partial( + _download_key, + bucket_name=bucket_name, + retries=retries, + **session_kwargs) + + with smart_open.concurrency.create_pool(processes=workers) as pool: + result_iterator = pool.imap_unordered(download_key, key_iterator) + key_no = 0 + while True: + try: + (key, content) = result_iterator.__next__() + if key_no % 1000 == 0: + logger.info( + "yielding key #%i: %s, size %i (total %.1fMB)", + key_no, key, len(content), total_size / 1024.0 ** 2 + ) + yield key, content + total_size += len(content) + if key_limit is not None and key_no + 1 >= key_limit: + # we were asked to output only a limited number of keys => we're done + break + except botocore.exceptions.ClientError as err: + # + # ignore 404 not found errors: they mean the object was deleted + # after we listed the contents of the bucket, but before we + # downloaded the object. + # + if not ('Error' in err.response and err.response['Error'].get('Code') == '404'): + raise err + except StopIteration: + break + key_no += 1 + logger.info("processed %i keys, total size %i" % (key_no + 1, total_size)) + + +def _list_bucket( + bucket_name, + prefix='', + accept_key=lambda k: True, + **session_kwargs): + session = boto3.session.Session(**session_kwargs) + client = session.client('s3') + ctoken = None + + while True: + # list_objects_v2 doesn't like a None value for ContinuationToken + # so we don't set it if we don't have one. + if ctoken: + kwargs = dict(Bucket=bucket_name, Prefix=prefix, ContinuationToken=ctoken) + else: + kwargs = dict(Bucket=bucket_name, Prefix=prefix) + response = client.list_objects_v2(**kwargs) + try: + content = response['Contents'] + except KeyError: + pass + else: + for c in content: + key = c['Key'] + if accept_key(key): + yield key + ctoken = response.get('NextContinuationToken', None) + if not ctoken: + break + + +def _download_key(key_name, bucket_name=None, retries=3, **session_kwargs): + if bucket_name is None: + raise ValueError('bucket_name may not be None') + + # + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html#multithreading-or-multiprocessing-with-resources + # + session = boto3.session.Session(**session_kwargs) + s3 = session.resource('s3') + bucket = s3.Bucket(bucket_name) + + # Sometimes, https://github.com/boto/boto/issues/2409 can happen + # because of network issues on either side. + # Retry up to 3 times to ensure its not a transient issue. + for x in range(retries + 1): + try: + content_bytes = _download_fileobj(bucket, key_name) + except botocore.client.ClientError: + # Actually fail on last pass through the loop + if x == retries: + raise + # Otherwise, try again, as this might be a transient timeout + pass + else: + return key_name, content_bytes + + +def _download_fileobj(bucket, key_name): + # + # This is a separate function only because it makes it easier to inject + # exceptions during tests. + # + buf = io.BytesIO() + bucket.download_fileobj(key_name, buf) + return buf.getvalue() diff --git a/.venv/lib/python3.11/site-packages/smart_open/smart_open_lib.py b/.venv/lib/python3.11/site-packages/smart_open/smart_open_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..64012c41f74962db8310ee4f0f80345cecbc6034 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/smart_open_lib.py @@ -0,0 +1,533 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Implements the majority of smart_open's top-level API. + +The main functions are: + + * ``parse_uri()`` + * ``open()`` + +""" + +import collections +import locale +import logging +import os +import os.path as P +import pathlib +import urllib.parse +import warnings + +# +# This module defines a function called smart_open so we cannot use +# smart_open.submodule to reference to the submodules. +# +import smart_open.local_file as so_file +import smart_open.compression as so_compression +import smart_open.utils as so_utils + +from smart_open import doctools +from smart_open import transport + +# +# For backwards compatibility and keeping old unit tests happy. +# +from smart_open.compression import register_compressor # noqa: F401 +from smart_open.utils import check_kwargs as _check_kwargs # noqa: F401 +from smart_open.utils import inspect_kwargs as _inspect_kwargs # noqa: F401 + +logger = logging.getLogger(__name__) + +DEFAULT_ENCODING = locale.getpreferredencoding(do_setlocale=False) + + +def _sniff_scheme(uri_as_string): + """Returns the scheme of the URL only, as a string.""" + # + # urlsplit doesn't work on Windows -- it parses the drive as the scheme... + # no protocol given => assume a local file + # + if os.name == 'nt' and '://' not in uri_as_string: + uri_as_string = 'file://' + uri_as_string + + return urllib.parse.urlsplit(uri_as_string).scheme + + +def parse_uri(uri_as_string): + """ + Parse the given URI from a string. + + Parameters + ---------- + uri_as_string: str + The URI to parse. + + Returns + ------- + collections.namedtuple + The parsed URI. + + Notes + ----- + smart_open/doctools.py magic goes here + """ + scheme = _sniff_scheme(uri_as_string) + submodule = transport.get_transport(scheme) + as_dict = submodule.parse_uri(uri_as_string) + + # + # The conversion to a namedtuple is just to keep the old tests happy while + # I'm still refactoring. + # + Uri = collections.namedtuple('Uri', sorted(as_dict.keys())) + return Uri(**as_dict) + + +# +# To keep old unit tests happy while I'm refactoring. +# +_parse_uri = parse_uri + +_builtin_open = open + + +def open( + uri, + mode='r', + buffering=-1, + encoding=None, + errors=None, + newline=None, + closefd=True, + opener=None, + compression=so_compression.INFER_FROM_EXTENSION, + transport_params=None, + ): + r"""Open the URI object, returning a file-like object. + + The URI is usually a string in a variety of formats. + For a full list of examples, see the :func:`parse_uri` function. + + The URI may also be one of: + + - an instance of the pathlib.Path class + - a stream (anything that implements io.IOBase-like functionality) + + Parameters + ---------- + uri: str or object + The object to open. + mode: str, optional + Mimicks built-in open parameter of the same name. + buffering: int, optional + Mimicks built-in open parameter of the same name. + encoding: str, optional + Mimicks built-in open parameter of the same name. + errors: str, optional + Mimicks built-in open parameter of the same name. + newline: str, optional + Mimicks built-in open parameter of the same name. + closefd: boolean, optional + Mimicks built-in open parameter of the same name. Ignored. + opener: object, optional + Mimicks built-in open parameter of the same name. Ignored. + compression: str, optional (see smart_open.compression.get_supported_compression_types) + Explicitly specify the compression/decompression behavior. + transport_params: dict, optional + Additional parameters for the transport layer (see notes below). + + Returns + ------- + A file-like object. + + Notes + ----- + smart_open has several implementations for its transport layer (e.g. S3, HTTP). + Each transport layer has a different set of keyword arguments for overriding + default behavior. If you specify a keyword argument that is *not* supported + by the transport layer being used, smart_open will ignore that argument and + log a warning message. + + smart_open/doctools.py magic goes here + + See Also + -------- + - `Standard library reference `__ + - `smart_open README.rst + `__ + + """ + logger.debug('%r', locals()) + + if not isinstance(mode, str): + raise TypeError('mode should be a string') + + if compression not in so_compression.get_supported_compression_types(): + raise ValueError(f'invalid compression type: {compression}') + + if transport_params is None: + transport_params = {} + + fobj = _shortcut_open( + uri, + mode, + compression=compression, + buffering=buffering, + encoding=encoding, + errors=errors, + newline=newline, + ) + if fobj is not None: + return fobj + + # + # This is a work-around for the problem described in Issue #144. + # If the user has explicitly specified an encoding, then assume they want + # us to open the destination in text mode, instead of the default binary. + # + # If we change the default mode to be text, and match the normal behavior + # of Py2 and 3, then the above assumption will be unnecessary. + # + if encoding is not None and 'b' in mode: + mode = mode.replace('b', '') + + if isinstance(uri, pathlib.Path): + uri = str(uri) + + explicit_encoding = encoding + encoding = explicit_encoding if explicit_encoding else DEFAULT_ENCODING + + # + # This is how we get from the filename to the end result. Decompression is + # optional, but it always accepts bytes and returns bytes. + # + # Decoding is also optional, accepts bytes and returns text. The diagram + # below is for reading, for writing, the flow is from right to left, but + # the code is identical. + # + # open as binary decompress? decode? + # filename ---------------> bytes -------------> bytes ---------> text + # binary decompressed decode + # + + try: + binary_mode = _get_binary_mode(mode) + except ValueError as ve: + raise NotImplementedError(ve.args[0]) + + binary = _open_binary_stream(uri, binary_mode, transport_params) + filename = ( + binary.name + # if name attribute is not string-like (e.g. ftp socket fileno)... + if isinstance(getattr(binary, "name", None), (str, bytes)) + # ...fall back to uri + else uri + ) + decompressed = so_compression.compression_wrapper( + binary, + binary_mode, + compression, + filename=filename, + ) + + if 'b' not in mode or explicit_encoding is not None: + decoded = _encoding_wrapper( + decompressed, + mode, + encoding=encoding, + errors=errors, + newline=newline, + ) + else: + decoded = decompressed + + # + # There are some useful methods in the binary readers, e.g. to_boto3, that get + # hidden by the multiple layers of wrapping we just performed. Promote + # them so they are visible to the user. + # + if decoded != binary: + promoted_attrs = ['to_boto3'] + for attr in promoted_attrs: + try: + setattr(decoded, attr, getattr(binary, attr)) + except AttributeError: + pass + + return so_utils.FileLikeProxy(decoded, binary) + + +def _get_binary_mode(mode_str): + # + # https://docs.python.org/3/library/functions.html#open + # + # The order of characters in the mode parameter appears to be unspecified. + # The implementation follows the examples, just to be safe. + # + mode = list(mode_str) + binmode = [] + + if 't' in mode and 'b' in mode: + raise ValueError("can't have text and binary mode at once") + + counts = [mode.count(x) for x in 'rwa'] + if sum(counts) > 1: + raise ValueError("must have exactly one of create/read/write/append mode") + + def transfer(char): + binmode.append(mode.pop(mode.index(char))) + + if 'a' in mode: + transfer('a') + elif 'w' in mode: + transfer('w') + elif 'r' in mode: + transfer('r') + else: + raise ValueError( + "Must have exactly one of create/read/write/append " + "mode and at most one plus" + ) + + if 'b' in mode: + transfer('b') + elif 't' in mode: + mode.pop(mode.index('t')) + binmode.append('b') + else: + binmode.append('b') + + if '+' in mode: + transfer('+') + + # + # There shouldn't be anything left in the mode list at this stage. + # If there is, then either we've missed something and the implementation + # of this function is broken, or the original input mode is invalid. + # + if mode: + raise ValueError('invalid mode: %r' % mode_str) + + return ''.join(binmode) + + +def _shortcut_open( + uri, + mode, + compression, + buffering=-1, + encoding=None, + errors=None, + newline=None, + ): + """Try to open the URI using the standard library io.open function. + + This can be much faster than the alternative of opening in binary mode and + then decoding. + + This is only possible under the following conditions: + + 1. Opening a local file; and + 2. Compression is disabled + + If it is not possible to use the built-in open for the specified URI, returns None. + + :param str uri: A string indicating what to open. + :param str mode: The mode to pass to the open function. + :param str compression: The compression type selected. + :returns: The opened file + :rtype: file + """ + if not isinstance(uri, str): + return None + + scheme = _sniff_scheme(uri) + if scheme not in (transport.NO_SCHEME, so_file.SCHEME): + return None + + local_path = so_file.extract_local_path(uri) + if compression == so_compression.INFER_FROM_EXTENSION: + _, extension = P.splitext(local_path) + if extension in so_compression.get_supported_extensions(): + return None + elif compression != so_compression.NO_COMPRESSION: + return None + + open_kwargs = {} + if encoding is not None: + open_kwargs['encoding'] = encoding + mode = mode.replace('b', '') + if newline is not None: + open_kwargs['newline'] = newline + + # + # binary mode of the builtin/stdlib open function doesn't take an errors argument + # + if errors and 'b' not in mode: + open_kwargs['errors'] = errors + + return _builtin_open(local_path, mode, buffering=buffering, **open_kwargs) + + +def _open_binary_stream(uri, mode, transport_params): + """Open an arbitrary URI in the specified binary mode. + + Not all modes are supported for all protocols. + + :arg uri: The URI to open. May be a string, or something else. + :arg str mode: The mode to open with. Must be rb, wb or ab. + :arg transport_params: Keyword argumens for the transport layer. + :returns: A named file object + :rtype: file-like object with a .name attribute + """ + if mode not in ('rb', 'rb+', 'wb', 'wb+', 'ab', 'ab+'): + # + # This should really be a ValueError, but for the sake of compatibility + # with older versions, which raise NotImplementedError, we do the same. + # + raise NotImplementedError('unsupported mode: %r' % mode) + + if isinstance(uri, int): + # + # We're working with a file descriptor. If we open it, its name is + # just the integer value, which isn't helpful. Unfortunately, there's + # no easy cross-platform way to go from a file descriptor to the filename, + # so we just give up here. The user will have to handle their own + # compression, etc. explicitly. + # + fobj = _builtin_open(uri, mode, closefd=False) + return fobj + + if not isinstance(uri, str): + raise TypeError("don't know how to handle uri %s" % repr(uri)) + + scheme = _sniff_scheme(uri) + submodule = transport.get_transport(scheme) + fobj = submodule.open_uri(uri, mode, transport_params) + if not hasattr(fobj, 'name'): + fobj.name = uri + + return fobj + + +def _encoding_wrapper(fileobj, mode, encoding=None, errors=None, newline=None): + """Decode bytes into text, if necessary. + + If mode specifies binary access, does nothing, unless the encoding is + specified. A non-null encoding implies text mode. + + :arg fileobj: must quack like a filehandle object. + :arg str mode: is the mode which was originally requested by the user. + :arg str encoding: The text encoding to use. If mode is binary, overrides mode. + :arg str errors: The method to use when handling encoding/decoding errors. + :returns: a file object + """ + logger.debug('encoding_wrapper: %r', locals()) + + # + # If the mode is binary, but the user specified an encoding, assume they + # want text. If we don't make this assumption, ignore the encoding and + # return bytes, smart_open behavior will diverge from the built-in open: + # + # open(filename, encoding='utf-8') returns a text stream in Py3 + # smart_open(filename, encoding='utf-8') would return a byte stream + # without our assumption, because the default mode is rb. + # + if 'b' in mode and encoding is None: + return fileobj + + if encoding is None: + encoding = DEFAULT_ENCODING + + fileobj = so_utils.TextIOWrapper( + fileobj, + encoding=encoding, + errors=errors, + newline=newline, + write_through=True, + ) + return fileobj + + +class patch_pathlib(object): + """Replace `Path.open` with `smart_open.open`""" + + def __init__(self): + self.old_impl = _patch_pathlib(open) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + _patch_pathlib(self.old_impl) + + +def _patch_pathlib(func): + """Replace `Path.open` with `func`""" + old_impl = pathlib.Path.open + pathlib.Path.open = func + return old_impl + + +def smart_open( + uri, + mode='rb', + buffering=-1, + encoding=None, + errors=None, + newline=None, + closefd=True, + opener=None, + ignore_extension=False, + **kwargs + ): + # + # This is a thin wrapper of smart_open.open. It's here for backward + # compatibility. It works exactly like smart_open.open when the passed + # parameters are identical. Otherwise, it raises a DeprecationWarning. + # + # For completeness, the main differences of the old smart_open function: + # + # 1. Default mode was read binary (mode='rb') + # 2. compression parameter was called ignore_extension + # 3. Transport parameters were passed directly as kwargs + # + url = 'https://github.com/RaRe-Technologies/smart_open/blob/develop/MIGRATING_FROM_OLDER_VERSIONS.rst' + if kwargs: + raise DeprecationWarning( + 'The following keyword parameters are not supported: %r. ' + 'See %s for more information.' % (sorted(kwargs), url) + ) + message = 'This function is deprecated. See %s for more information' % url + warnings.warn(message, category=DeprecationWarning) + + if ignore_extension: + compression = so_compression.NO_COMPRESSION + else: + compression = so_compression.INFER_FROM_EXTENSION + del kwargs, url, message, ignore_extension + return open(**locals()) + + +# +# Prevent failures with doctools from messing up the entire library. We don't +# expect such failures, but contributed modules (e.g. new transport mechanisms) +# may not be as polished. +# +try: + doctools.tweak_open_docstring(open) + doctools.tweak_parse_uri_docstring(parse_uri) +except Exception as ex: + logger.error( + 'Encountered a non-fatal error while building docstrings (see below). ' + 'help(smart_open) will provide incomplete information as a result. ' + 'For full help text, see ' + '.' + ) + logger.exception(ex) diff --git a/.venv/lib/python3.11/site-packages/smart_open/ssh.py b/.venv/lib/python3.11/site-packages/smart_open/ssh.py new file mode 100644 index 0000000000000000000000000000000000000000..1dc73cbe58e00f970c545cef17b766dbaad6bc38 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/ssh.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Implements I/O streams over SSH. + +Examples +-------- + +>>> with open('/proc/version_signature', host='1.2.3.4') as conn: +... print(conn.read()) +b'Ubuntu 4.4.0-1061.70-aws 4.4.131' + +Similarly, from a command line:: + + $ python -c "from smart_open import ssh;print(ssh.open('/proc/version_signature', host='1.2.3.4').read())" + b'Ubuntu 4.4.0-1061.70-aws 4.4.131' + +""" + +import getpass +import os +import logging +import urllib.parse + +from typing import ( + Dict, + Callable, + Tuple, +) + +try: + import paramiko +except ImportError: + MISSING_DEPS = True + +import smart_open.utils + +logger = logging.getLogger(__name__) + +# +# Global storage for SSH connections. +# +_SSH = {} + +SCHEMES = ("ssh", "scp", "sftp") +"""Supported URL schemes.""" + +DEFAULT_PORT = 22 + +URI_EXAMPLES = ( + 'ssh://username@host/path/file', + 'ssh://username@host//path/file', + 'scp://username@host/path/file', + 'sftp://username@host/path/file', +) + +# +# Global storage for SSH config files. +# +_SSH_CONFIG_FILES = [os.path.expanduser("~/.ssh/config")] + + +def _unquote(text): + return text and urllib.parse.unquote(text) + + +def _str2bool(string): + if string == "no": + return False + if string == "yes": + return True + raise ValueError(f"Expected 'yes' / 'no', got {string}.") + + +# +# The parameter names used by Paramiko (and smart_open) slightly differ to +# those used in ~/.ssh/config, so we use a mapping to bridge the gap. +# +# The keys are option names as they appear in Paramiko (and smart_open) +# The values are a tuples containing: +# +# 1. their corresponding names in the ~/.ssh/config file +# 2. a callable to convert the parameter value from a string to the appropriate type +# +_PARAMIKO_CONFIG_MAP: Dict[str, Tuple[str, Callable]] = { + "timeout": ("connecttimeout", float), + "compress": ("compression", _str2bool), + "gss_auth": ("gssapiauthentication", _str2bool), + "gss_kex": ("gssapikeyexchange", _str2bool), + "gss_deleg_creds": ("gssapidelegatecredentials", _str2bool), + "gss_trust_dns": ("gssapitrustdns", _str2bool), +} + + +def parse_uri(uri_as_string): + split_uri = urllib.parse.urlsplit(uri_as_string) + assert split_uri.scheme in SCHEMES + return dict( + scheme=split_uri.scheme, + uri_path=_unquote(split_uri.path), + user=_unquote(split_uri.username), + host=split_uri.hostname, + port=int(split_uri.port) if split_uri.port else None, + password=_unquote(split_uri.password), + ) + + +def open_uri(uri, mode, transport_params): + # `connect_kwargs` is a legitimate param *only* for sftp, so this filters it out of validation + # (otherwise every call with this present complains it's not valid) + params_to_validate = {k: v for k, v in transport_params.items() if k != 'connect_kwargs'} + smart_open.utils.check_kwargs(open, params_to_validate) + parsed_uri = parse_uri(uri) + uri_path = parsed_uri.pop('uri_path') + parsed_uri.pop('scheme') + return open(uri_path, mode, transport_params=transport_params, **parsed_uri) + + +def _connect_ssh(hostname, username, port, password, transport_params): + ssh = paramiko.SSHClient() + ssh.load_system_host_keys() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + kwargs = transport_params.get('connect_kwargs', {}).copy() + if 'key_filename' not in kwargs: + kwargs.setdefault('password', password) + kwargs.setdefault('username', username) + ssh.connect(hostname, port, **kwargs) + return ssh + + +def _maybe_fetch_config(host, username=None, password=None, port=None, transport_params=None): + # If all fields are set, return as-is. + if not any(arg is None for arg in (host, username, password, port, transport_params)): + return host, username, password, port, transport_params + + if not host: + raise ValueError('you must specify the host to connect to') + if not transport_params: + transport_params = {} + if "connect_kwargs" not in transport_params: + transport_params["connect_kwargs"] = {} + + # Attempt to load an OpenSSH config. + # + # Connections configured in this way are not guaranteed to perform exactly + # as they do in typical usage due to mismatches between the set of OpenSSH + # configuration options and those that Paramiko supports. We provide a best + # attempt, and support: + # + # - hostname -> address resolution + # - username inference + # - port inference + # - identityfile inference + # - connection timeout inference + # - compression selection + # - GSS configuration + # + connect_params = transport_params["connect_kwargs"] + config_files = [f for f in _SSH_CONFIG_FILES if os.path.exists(f)] + # + # This is the actual name of the host. The input host may actually be an + # alias. + # + actual_hostname = "" + + for config_filename in config_files: + try: + cfg = paramiko.SSHConfig.from_path(config_filename) + except PermissionError: + continue + + if host not in cfg.get_hostnames(): + continue + + cfg = cfg.lookup(host) + if username is None: + username = cfg.get("user", None) + + if not actual_hostname: + actual_hostname = cfg["hostname"] + + if port is None: + try: + port = int(cfg["port"]) + except (IndexError, ValueError): + # + # Nb. ignore missing/invalid port numbers + # + pass + + # + # Special case, as we can have multiple identity files, so we check + # that the identityfile list has len > 0. This should be redundant, but + # keeping it for safety. + # + if connect_params.get("key_filename") is None: + identityfile = cfg.get("identityfile", []) + if len(identityfile): + connect_params["key_filename"] = identityfile + + for param_name, (sshcfg_name, from_str) in _PARAMIKO_CONFIG_MAP.items(): + if connect_params.get(param_name) is None and sshcfg_name in cfg: + connect_params[param_name] = from_str(cfg[sshcfg_name]) + + # + # Continue working through other config files, if there are any, + # as they may contain more options for our host + # + + if port is None: + port = DEFAULT_PORT + + if not username: + username = getpass.getuser() + + if actual_hostname: + host = actual_hostname + + return host, username, password, port, transport_params + + +def open(path, mode='r', host=None, user=None, password=None, port=None, transport_params=None): + """Open a file on a remote machine over SSH. + + Expects authentication to be already set up via existing keys on the local machine. + + Parameters + ---------- + path: str + The path to the file to open on the remote machine. + mode: str, optional + The mode to use for opening the file. + host: str, optional + The hostname of the remote machine. May not be None. + user: str, optional + The username to use to login to the remote machine. + If None, defaults to the name of the current user. + password: str, optional + The password to use to login to the remote machine. + port: int, optional + The port to connect to. + transport_params: dict, optional + Any additional settings to be passed to paramiko.SSHClient.connect + + Returns + ------- + A file-like object. + + Important + --------- + If you specify a previously unseen host, then its host key will be added to + the local ~/.ssh/known_hosts *automatically*. + + If ``username`` or ``password`` are specified in *both* the uri and + ``transport_params``, ``transport_params`` will take precedence + """ + host, user, password, port, transport_params = _maybe_fetch_config( + host, user, password, port, transport_params + ) + + key = (host, user) + + attempts = 2 + for attempt in range(attempts): + try: + ssh = _SSH[key] + # Validate that the cached connection is still an active connection + # and if not, refresh the connection + if not ssh.get_transport().active: + ssh.close() + ssh = _SSH[key] = _connect_ssh(host, user, port, password, transport_params) + except KeyError: + ssh = _SSH[key] = _connect_ssh(host, user, port, password, transport_params) + + try: + transport = ssh.get_transport() + sftp_client = transport.open_sftp_client() + break + except paramiko.SSHException as ex: + connection_timed_out = ex.args and ex.args[0] == 'SSH session not active' + if attempt == attempts - 1 or not connection_timed_out: + raise + + # + # Try again. Delete the connection from the cache to force a + # reconnect in the next attempt. + # + del _SSH[key] + + fobj = sftp_client.open(path, mode) + fobj.name = path + return fobj diff --git a/.venv/lib/python3.11/site-packages/smart_open/transport.py b/.venv/lib/python3.11/site-packages/smart_open/transport.py new file mode 100644 index 0000000000000000000000000000000000000000..086ea2b014b09cd7219d0112069e816aa605bb8c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/transport.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# +"""Maintains a registry of transport mechanisms. + +The main entrypoint is :func:`get_transport`. See also :file:`extending.md`. + +""" +import importlib +import logging + +import smart_open.local_file + +logger = logging.getLogger(__name__) + +NO_SCHEME = '' + +_REGISTRY = {NO_SCHEME: smart_open.local_file} +_ERRORS = {} +_MISSING_DEPS_ERROR = """You are trying to use the %(module)s functionality of smart_open +but you do not have the correct %(module)s dependencies installed. Try: + + pip install smart_open[%(module)s] + +""" + + +def register_transport(submodule): + """Register a submodule as a transport mechanism for ``smart_open``. + + This module **must** have: + + - `SCHEME` attribute (or `SCHEMES`, if the submodule supports multiple schemes) + - `open` function + - `open_uri` function + - `parse_uri' function + + Once registered, you can get the submodule by calling :func:`get_transport`. + + """ + global _REGISTRY, _ERRORS + module_name = submodule + if isinstance(submodule, str): + try: + submodule = importlib.import_module(submodule) + except ImportError: + return + else: + module_name = submodule.__name__ + # Save only the last module name piece + module_name = module_name.rsplit(".")[-1] + + if hasattr(submodule, "SCHEME"): + schemes = [submodule.SCHEME] + elif hasattr(submodule, "SCHEMES"): + schemes = submodule.SCHEMES + else: + raise ValueError("%r does not have a .SCHEME or .SCHEMES attribute" % submodule) + + for f in ("open", "open_uri", "parse_uri"): + assert hasattr(submodule, f), "%r is missing %r" % (submodule, f) + + for scheme in schemes: + assert scheme not in _REGISTRY + if getattr(submodule, "MISSING_DEPS", False): + _ERRORS[scheme] = module_name + else: + _REGISTRY[scheme] = submodule + + +def get_transport(scheme): + """Get the submodule that handles transport for the specified scheme. + + This submodule must have been previously registered via :func:`register_transport`. + + """ + global _ERRORS, _MISSING_DEPS_ERROR, _REGISTRY, SUPPORTED_SCHEMES + expected = SUPPORTED_SCHEMES + readme_url = ( + "https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst" + ) + message = ( + "Unable to handle scheme %(scheme)r, expected one of %(expected)r. " + "Extra dependencies required by %(scheme)r may be missing. " + "See <%(readme_url)s> for details." % locals() + ) + if scheme in _ERRORS: + raise ImportError(_MISSING_DEPS_ERROR % dict(module=_ERRORS[scheme])) + if scheme in _REGISTRY: + return _REGISTRY[scheme] + raise NotImplementedError(message) + + +register_transport(smart_open.local_file) +register_transport("smart_open.azure") +register_transport("smart_open.ftp") +register_transport("smart_open.gcs") +register_transport("smart_open.hdfs") +register_transport("smart_open.http") +register_transport("smart_open.s3") +register_transport("smart_open.ssh") +register_transport("smart_open.webhdfs") + +SUPPORTED_SCHEMES = tuple(sorted(_REGISTRY.keys())) +"""The transport schemes that the local installation of ``smart_open`` supports.""" diff --git a/.venv/lib/python3.11/site-packages/smart_open/utils.py b/.venv/lib/python3.11/site-packages/smart_open/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..efbb9374f93f4a6509c6b4d32d9e2813413e187e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/utils.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2020 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Helper functions for documentation, etc.""" + +import inspect +import io +import logging +import urllib.parse + +import wrapt + +logger = logging.getLogger(__name__) + +WORKAROUND_SCHEMES = ['s3', 's3n', 's3u', 's3a', 'gs'] +QUESTION_MARK_PLACEHOLDER = '///smart_open.utils.QUESTION_MARK_PLACEHOLDER///' + + +def inspect_kwargs(kallable): + # + # inspect.getargspec got deprecated in Py3.4, and calling it spews + # deprecation warnings that we'd prefer to avoid. Unfortunately, older + # versions of Python (<3.3) did not have inspect.signature, so we need to + # handle them the old-fashioned getargspec way. + # + try: + signature = inspect.signature(kallable) + except AttributeError: + try: + args, varargs, keywords, defaults = inspect.getargspec(kallable) + except TypeError: + # + # Happens under Py2.7 with mocking. + # + return {} + + if not defaults: + return {} + supported_keywords = args[-len(defaults):] + return dict(zip(supported_keywords, defaults)) + else: + return { + name: param.default + for name, param in signature.parameters.items() + if param.default != inspect.Parameter.empty + } + + +def check_kwargs(kallable, kwargs): + """Check which keyword arguments the callable supports. + + Parameters + ---------- + kallable: callable + A function or method to test + kwargs: dict + The keyword arguments to check. If the callable doesn't support any + of these, a warning message will get printed. + + Returns + ------- + dict + A dictionary of argument names and values supported by the callable. + """ + supported_keywords = sorted(inspect_kwargs(kallable)) + unsupported_keywords = [k for k in sorted(kwargs) if k not in supported_keywords] + supported_kwargs = {k: v for (k, v) in kwargs.items() if k in supported_keywords} + + if unsupported_keywords: + logger.warning('ignoring unsupported keyword arguments: %r', unsupported_keywords) + + return supported_kwargs + + +def clamp(value, minval=0, maxval=None): + """Clamp a numeric value to a specific range. + + Parameters + ---------- + value: numeric + The value to clamp. + + minval: numeric + The lower bound. + + maxval: numeric + The upper bound. + + Returns + ------- + numeric + The clamped value. It will be in the range ``[minval, maxval]``. + + """ + if maxval is not None: + value = min(value, maxval) + value = max(value, minval) + return value + + +def make_range_string(start=None, stop=None): + """Create a byte range specifier in accordance with RFC-2616. + + Parameters + ---------- + start: int, optional + The start of the byte range. If unspecified, stop indicated offset from EOF. + + stop: int, optional + The end of the byte range. If unspecified, indicates EOF. + + Returns + ------- + str + A byte range specifier. + + """ + # + # https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # + if start is None and stop is None: + raise ValueError("make_range_string requires either a stop or start value") + start_str = '' if start is None else str(start) + stop_str = '' if stop is None else str(stop) + return 'bytes=%s-%s' % (start_str, stop_str) + + +def parse_content_range(content_range): + """Extract units, start, stop, and length from a content range header like "bytes 0-846981/846982". + + Assumes a properly formatted content-range header from S3. + See werkzeug.http.parse_content_range_header for a more robust version. + + Parameters + ---------- + content_range: str + The content-range header to parse. + + Returns + ------- + tuple (units: str, start: int, stop: int, length: int) + The units and three integers from the content-range header. + + """ + units, numbers = content_range.split(' ', 1) + range, length = numbers.split('/', 1) + start, stop = range.split('-', 1) + return units, int(start), int(stop), int(length) + + +def safe_urlsplit(url): + """This is a hack to prevent the regular urlsplit from splitting around question marks. + + A question mark (?) in a URL typically indicates the start of a + querystring, and the standard library's urlparse function handles the + querystring separately. Unfortunately, question marks can also appear + _inside_ the actual URL for some schemas like S3, GS. + + Replaces question marks with a special placeholder substring prior to + splitting. This work-around behavior is disabled in the unlikely event the + placeholder is already part of the URL. If this affects you, consider + changing the value of QUESTION_MARK_PLACEHOLDER to something more suitable. + + See Also + -------- + https://bugs.python.org/issue43882 + https://github.com/python/cpython/blob/3.7/Lib/urllib/parse.py + https://github.com/RaRe-Technologies/smart_open/issues/285 + https://github.com/RaRe-Technologies/smart_open/issues/458 + smart_open/utils.py:QUESTION_MARK_PLACEHOLDER + """ + sr = urllib.parse.urlsplit(url, allow_fragments=False) + + placeholder = None + if sr.scheme in WORKAROUND_SCHEMES and '?' in url and QUESTION_MARK_PLACEHOLDER not in url: + # + # This is safe because people will _almost never_ use the below + # substring in a URL. If they do, then they're asking for trouble, + # and this special handling will simply not happen for them. + # + placeholder = QUESTION_MARK_PLACEHOLDER + url = url.replace('?', placeholder) + sr = urllib.parse.urlsplit(url, allow_fragments=False) + + if placeholder is None: + return sr + + path = sr.path.replace(placeholder, '?') + return urllib.parse.SplitResult(sr.scheme, sr.netloc, path, '', '') + + +class TextIOWrapper(io.TextIOWrapper): + def __exit__(self, exc_type, exc_val, exc_tb): + """Call close on underlying buffer only when there was no exception. + + Without this patch, TextIOWrapper would call self.buffer.close() during + exception handling, which is unwanted for e.g. s3 and azure. They only call + self.close() when there was no exception (self.terminate() otherwise) to avoid + committing unfinished/failed uploads. + """ + if exc_type is None: + self.close() + + +class FileLikeProxy(wrapt.ObjectProxy): + __inner = ... # initialized before wrapt disallows __setattr__ on certain objects + + def __init__(self, outer, inner): + super().__init__(outer) + self.__inner = inner + + def __exit__(self, *args, **kwargs): + """Exit inner after exiting outer.""" + try: + return super().__exit__(*args, **kwargs) + finally: + self.__inner.__exit__(*args, **kwargs) + + def __next__(self): + return self.__wrapped__.__next__() + + def close(self): + try: + return self.__wrapped__.close() + finally: + if self.__inner != self.__wrapped__: # Don't close again if inner and wrapped are the same + self.__inner.close() diff --git a/.venv/lib/python3.11/site-packages/smart_open/version.py b/.venv/lib/python3.11/site-packages/smart_open/version.py new file mode 100644 index 0000000000000000000000000000000000000000..59f7aab65871522fc539dfc77e715881b31af100 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/version.py @@ -0,0 +1,5 @@ +__version__ = '7.1.0' + + +if __name__ == '__main__': + print(__version__) diff --git a/.venv/lib/python3.11/site-packages/smart_open/webhdfs.py b/.venv/lib/python3.11/site-packages/smart_open/webhdfs.py new file mode 100644 index 0000000000000000000000000000000000000000..75804d7c0d819b09cc74bbee0528ffda2294f1be --- /dev/null +++ b/.venv/lib/python3.11/site-packages/smart_open/webhdfs.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Radim Rehurek +# +# This code is distributed under the terms and conditions +# from the MIT License (MIT). +# + +"""Implements reading and writing to/from WebHDFS. + +The main entry point is the :func:`~smart_open.webhdfs.open` function. + +""" + +import io +import logging +import urllib.parse + +try: + import requests +except ImportError: + MISSING_DEPS = True + +from smart_open import utils, constants + +import http.client as httplib + +logger = logging.getLogger(__name__) + +SCHEME = 'webhdfs' + +URI_EXAMPLES = ( + 'webhdfs://host:port/path/file', +) + +MIN_PART_SIZE = 50 * 1024**2 # minimum part size for HDFS multipart uploads + + +def parse_uri(uri_as_str): + return dict(scheme=SCHEME, uri=uri_as_str) + + +def open_uri(uri, mode, transport_params): + kwargs = utils.check_kwargs(open, transport_params) + return open(uri, mode, **kwargs) + + +def open(http_uri, mode, min_part_size=MIN_PART_SIZE): + """ + Parameters + ---------- + http_uri: str + webhdfs url converted to http REST url + min_part_size: int, optional + For writing only. + + """ + if http_uri.startswith(SCHEME): + http_uri = _convert_to_http_uri(http_uri) + + if mode == constants.READ_BINARY: + fobj = BufferedInputBase(http_uri) + elif mode == constants.WRITE_BINARY: + fobj = BufferedOutputBase(http_uri, min_part_size=min_part_size) + else: + raise NotImplementedError("webhdfs support for mode %r not implemented" % mode) + + fobj.name = http_uri.split('/')[-1] + return fobj + + +def _convert_to_http_uri(webhdfs_url): + """ + Convert webhdfs uri to http url and return it as text + + Parameters + ---------- + webhdfs_url: str + A URL starting with webhdfs:// + """ + split_uri = urllib.parse.urlsplit(webhdfs_url) + netloc = split_uri.hostname + if split_uri.port: + netloc += ":{}".format(split_uri.port) + query = split_uri.query + if split_uri.username: + query += ( + ("&" if query else "") + "user.name=" + urllib.parse.quote(split_uri.username) + ) + + return urllib.parse.urlunsplit( + ("http", netloc, "/webhdfs/v1" + split_uri.path, query, "") + ) + + +# +# For old unit tests. +# +def convert_to_http_uri(parsed_uri): + return _convert_to_http_uri(parsed_uri.uri) + + +class BufferedInputBase(io.BufferedIOBase): + _buf = None # so `closed` property works in case __init__ fails and __del__ is called + + def __init__(self, uri): + self._uri = uri + + payload = {"op": "OPEN", "offset": 0} + self._response = requests.get(self._uri, params=payload, stream=True) + if self._response.status_code != httplib.OK: + raise WebHdfsException.from_response(self._response) + self._buf = b'' + + # + # Override some methods from io.IOBase. + # + def close(self): + """Flush and close this stream.""" + logger.debug("close: called") + if not self.closed: + self._buf = None + + @property + def closed(self): + return self._buf is None + + def readable(self): + """Return True if the stream can be read from.""" + return True + + def seekable(self): + """If False, seek(), tell() and truncate() will raise IOError. + + We offer only seek support, and no truncate support.""" + return False + + # + # io.BufferedIOBase methods. + # + def detach(self): + """Unsupported.""" + raise io.UnsupportedOperation + + def read(self, size=None): + if size is None: + self._buf, retval = b'', self._buf + self._response.raw.read() + return retval + elif size < len(self._buf): + self._buf, retval = self._buf[size:], self._buf[:size] + return retval + + try: + buffers = [self._buf] + total_read = 0 + while total_read < size: + raw_data = self._response.raw.read(io.DEFAULT_BUFFER_SIZE) + # some times read returns 0 length data without throwing a + # StopIteration exception. We break here if this happens. + if len(raw_data) == 0: + break + + total_read += len(raw_data) + buffers.append(raw_data) + except StopIteration: + pass + + self._buf = b"".join(buffers) + self._buf, retval = self._buf[size:], self._buf[:size] + return retval + + def read1(self, size=-1): + """This is the same as read().""" + return self.read(size=size) + + def readinto(self, b): + """Read up to len(b) bytes into b, and return the number of bytes + read.""" + data = self.read(len(b)) + if not data: + return 0 + b[:len(data)] = data + return len(data) + + def readline(self): + self._buf, retval = b'', self._buf + self._response.raw.readline() + return retval + + +class BufferedOutputBase(io.BufferedIOBase): + def __init__(self, uri, min_part_size=MIN_PART_SIZE): + """ + Parameters + ---------- + min_part_size: int, optional + For writing only. + + """ + self._uri = uri + self._closed = False + self.min_part_size = min_part_size + # creating empty file first + payload = {"op": "CREATE", "overwrite": True} + init_response = requests.put(self._uri, params=payload, allow_redirects=False) + if not init_response.status_code == httplib.TEMPORARY_REDIRECT: + raise WebHdfsException.from_response(init_response) + uri = init_response.headers['location'] + response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'}) + if not response.status_code == httplib.CREATED: + raise WebHdfsException.from_response(response) + self.lines = [] + self.parts = 0 + self.chunk_bytes = 0 + self.total_size = 0 + + # + # This member is part of the io.BufferedIOBase interface. + # + self.raw = None + + # + # Override some methods from io.IOBase. + # + def writable(self): + """Return True if the stream supports writing.""" + return True + + # + # io.BufferedIOBase methods. + # + def detach(self): + raise io.UnsupportedOperation("detach() not supported") + + def _upload(self, data): + payload = {"op": "APPEND"} + init_response = requests.post(self._uri, params=payload, allow_redirects=False) + if not init_response.status_code == httplib.TEMPORARY_REDIRECT: + raise WebHdfsException.from_response(init_response) + uri = init_response.headers['location'] + response = requests.post(uri, data=data, + headers={'content-type': 'application/octet-stream'}) + if not response.status_code == httplib.OK: + raise WebHdfsException.from_response(response) + + def write(self, b): + """ + Write the given bytes (binary string) into the WebHDFS file from constructor. + + """ + if self._closed: + raise ValueError("I/O operation on closed file") + + if not isinstance(b, bytes): + raise TypeError("input must be a binary string") + + self.lines.append(b) + self.chunk_bytes += len(b) + self.total_size += len(b) + + if self.chunk_bytes >= self.min_part_size: + buff = b"".join(self.lines) + logger.info( + "uploading part #%i, %i bytes (total %.3fGB)", + self.parts, len(buff), self.total_size / 1024.0 ** 3 + ) + self._upload(buff) + logger.debug("upload of part #%i finished", self.parts) + self.parts += 1 + self.lines, self.chunk_bytes = [], 0 + + def close(self): + buff = b"".join(self.lines) + if buff: + logger.info( + "uploading last part #%i, %i bytes (total %.3fGB)", + self.parts, len(buff), self.total_size / 1024.0 ** 3 + ) + self._upload(buff) + logger.debug("upload of last part #%i finished", self.parts) + self._closed = True + + @property + def closed(self): + return self._closed + + +class WebHdfsException(Exception): + def __init__(self, msg="", status_code=None): + self.msg = msg + self.status_code = status_code + super(WebHdfsException, self).__init__(repr(self)) + + def __repr__(self): + return "{}(status_code={}, msg={!r})".format( + self.__class__.__name__, self.status_code, self.msg + ) + + @classmethod + def from_response(cls, response): + return cls(msg=response.text, status_code=response.status_code) diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/METADATA b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..a4a7bf6c6216a26e456707e03155fe8772c1ed25 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/METADATA @@ -0,0 +1,2279 @@ +Metadata-Version: 2.1 +Name: yarl +Version: 1.18.3 +Summary: Yet another URL library +Home-page: https://github.com/aio-libs/yarl +Author: Andrew Svetlov +Author-email: andrew.svetlov@gmail.com +Maintainer: aiohttp team +Maintainer-email: team@aiohttp.org +License: Apache-2.0 +Project-URL: Chat: Matrix, https://matrix.to/#/#aio-libs:matrix.org +Project-URL: Chat: Matrix Space, https://matrix.to/#/#aio-libs-space:matrix.org +Project-URL: CI: GitHub Workflows, https://github.com/aio-libs/yarl/actions?query=branch:master +Project-URL: Code of Conduct, https://github.com/aio-libs/.github/blob/master/CODE_OF_CONDUCT.md +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/yarl +Project-URL: Docs: Changelog, https://yarl.aio-libs.org/en/latest/changes/ +Project-URL: Docs: RTD, https://yarl.aio-libs.org +Project-URL: GitHub: issues, https://github.com/aio-libs/yarl/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/yarl +Keywords: cython,cext,yarl +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: NOTICE +Requires-Dist: idna>=2.0 +Requires-Dist: multidict>=4.0 +Requires-Dist: propcache>=0.2.0 + +yarl +==== + +The module provides handy URL class for URL parsing and changing. + +.. image:: https://github.com/aio-libs/yarl/workflows/CI/badge.svg + :target: https://github.com/aio-libs/yarl/actions?query=workflow%3ACI + :align: right + +.. image:: https://codecov.io/gh/aio-libs/yarl/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/yarl + +.. image:: https://img.shields.io/endpoint?url=https://codspeed.io/badge.json + :target: https://codspeed.io/aio-libs/yarl + +.. image:: https://badge.fury.io/py/yarl.svg + :target: https://badge.fury.io/py/yarl + +.. image:: https://readthedocs.org/projects/yarl/badge/?version=latest + :target: https://yarl.aio-libs.org + +.. image:: https://img.shields.io/pypi/pyversions/yarl.svg + :target: https://pypi.python.org/pypi/yarl + +.. image:: https://img.shields.io/matrix/aio-libs:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat + :target: https://matrix.to/#/%23aio-libs:matrix.org + :alt: Matrix Room — #aio-libs:matrix.org + +.. image:: https://img.shields.io/matrix/aio-libs-space:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs-space%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat + :target: https://matrix.to/#/%23aio-libs-space:matrix.org + :alt: Matrix Space — #aio-libs-space:matrix.org + + +Introduction +------------ + +Url is constructed from ``str``: + +.. code-block:: pycon + + >>> from yarl import URL + >>> url = URL('https://www.python.org/~guido?arg=1#frag') + >>> url + URL('https://www.python.org/~guido?arg=1#frag') + +All url parts: *scheme*, *user*, *password*, *host*, *port*, *path*, +*query* and *fragment* are accessible by properties: + +.. code-block:: pycon + + >>> url.scheme + 'https' + >>> url.host + 'www.python.org' + >>> url.path + '/~guido' + >>> url.query_string + 'arg=1' + >>> url.query + + >>> url.fragment + 'frag' + +All url manipulations produce a new url object: + +.. code-block:: pycon + + >>> url = URL('https://www.python.org') + >>> url / 'foo' / 'bar' + URL('https://www.python.org/foo/bar') + >>> url / 'foo' % {'bar': 'baz'} + URL('https://www.python.org/foo?bar=baz') + +Strings passed to constructor and modification methods are +automatically encoded giving canonical representation as result: + +.. code-block:: pycon + + >>> url = URL('https://www.python.org/шлях') + >>> url + URL('https://www.python.org/%D1%88%D0%BB%D1%8F%D1%85') + +Regular properties are *percent-decoded*, use ``raw_`` versions for +getting *encoded* strings: + +.. code-block:: pycon + + >>> url.path + '/шлях' + + >>> url.raw_path + '/%D1%88%D0%BB%D1%8F%D1%85' + +Human readable representation of URL is available as ``.human_repr()``: + +.. code-block:: pycon + + >>> url.human_repr() + 'https://www.python.org/шлях' + +For full documentation please read https://yarl.aio-libs.org. + + +Installation +------------ + +:: + + $ pip install yarl + +The library is Python 3 only! + +PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install +``yarl`` on another operating system where wheels are not provided, +the the tarball will be used to compile the library from +the source code. It requires a C compiler and and Python headers installed. + +To skip the compilation you must explicitly opt-in by using a PEP 517 +configuration setting ``pure-python``, or setting the ``YARL_NO_EXTENSIONS`` +environment variable to a non-empty value, e.g.: + +.. code-block:: console + + $ pip install yarl --config-settings=pure-python=false + +Please note that the pure-Python (uncompiled) version is much slower. However, +PyPy always uses a pure-Python implementation, and, as such, it is unaffected +by this variable. + +Dependencies +------------ + +YARL requires multidict_ and propcache_ libraries. + + +API documentation +------------------ + +The documentation is located at https://yarl.aio-libs.org. + + +Why isn't boolean supported by the URL query API? +------------------------------------------------- + +There is no standard for boolean representation of boolean values. + +Some systems prefer ``true``/``false``, others like ``yes``/``no``, ``on``/``off``, +``Y``/``N``, ``1``/``0``, etc. + +``yarl`` cannot make an unambiguous decision on how to serialize ``bool`` values because +it is specific to how the end-user's application is built and would be different for +different apps. The library doesn't accept booleans in the API; a user should convert +bools into strings using own preferred translation protocol. + + +Comparison with other URL libraries +------------------------------------ + +* furl (https://pypi.python.org/pypi/furl) + + The library has rich functionality but the ``furl`` object is mutable. + + I'm afraid to pass this object into foreign code: who knows if the + code will modify my url in a terrible way while I just want to send URL + with handy helpers for accessing URL properties. + + ``furl`` has other non-obvious tricky things but the main objection + is mutability. + +* URLObject (https://pypi.python.org/pypi/URLObject) + + URLObject is immutable, that's pretty good. + + Every URL change generates a new URL object. + + But the library doesn't do any decode/encode transformations leaving the + end user to cope with these gory details. + + +Source code +----------- + +The project is hosted on GitHub_ + +Please file an issue on the `bug tracker +`_ if you have found a bug +or have some suggestion in order to improve the library. + +Discussion list +--------------- + +*aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs + +Feel free to post your questions and ideas here. + + +Authors and License +------------------- + +The ``yarl`` package is written by Andrew Svetlov. + +It's *Apache 2* licensed and freely available. + + +.. _GitHub: https://github.com/aio-libs/yarl + +.. _multidict: https://github.com/aio-libs/multidict + +.. _propcache: https://github.com/aio-libs/propcache + +========= +Changelog +========= + +.. + You should *NOT* be adding new change log entries to this file, this + file is managed by towncrier. You *may* edit previous change logs to + fix problems like typo corrections or such. + To add a new change log entry, please see + https://pip.pypa.io/en/latest/development/#adding-a-news-entry + we named the news folder "changes". + + WARNING: Don't drop the next directive! + +.. towncrier release notes start + +1.18.3 +====== + +*(2024-12-01)* + + +Bug fixes +--------- + +- Fixed uppercase ASCII hosts being rejected by ``URL.build()()`` and ``yarl.URL.with_host()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#954 `__, `#1442 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performances of multiple path properties on cache miss -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1443 `__. + + +---- + + +1.18.2 +====== + +*(2024-11-29)* + + +No significant changes. + + +---- + + +1.18.1 +====== + +*(2024-11-29)* + + +Miscellaneous internal changes +------------------------------ + +- Improved cache performance when ``~yarl.URL`` objects are constructed from ``yarl.URL.build()`` with ``encoded=True`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1432 `__. + +- Improved cache performance for operations that produce a new ``~yarl.URL`` object -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1434 `__, `#1436 `__. + + +---- + + +1.18.0 +====== + +*(2024-11-21)* + + +Features +-------- + +- Added ``keep_query`` and ``keep_fragment`` flags in the ``yarl.URL.with_path()``, ``yarl.URL.with_name()`` and ``yarl.URL.with_suffix()`` methods, allowing users to optionally retain the query string and fragment in the resulting URL when replacing the path -- by `@paul-nameless `__. + + *Related issues and pull requests on GitHub:* + `#111 `__, `#1421 `__. + + +Contributor-facing changes +-------------------------- + +- Started running downstream ``aiohttp`` tests in CI -- by `@Cycloctane `__. + + *Related issues and pull requests on GitHub:* + `#1415 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of converting ``~yarl.URL`` to a string -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1422 `__. + + +---- + + +1.17.2 +====== + +*(2024-11-17)* + + +Bug fixes +--------- + +- Stopped implicitly allowing the use of Cython pre-release versions when + building the distribution package -- by `@ajsanchezsanz `__ and + `@markgreene74 `__. + + *Related issues and pull requests on GitHub:* + `#1411 `__, `#1412 `__. + +- Fixed a bug causing ``~yarl.URL.port`` to return the default port when the given port was zero + -- by `@gmacon `__. + + *Related issues and pull requests on GitHub:* + `#1413 `__. + + +Features +-------- + +- Make error messages include details of incorrect type when ``port`` is not int in ``yarl.URL.build()``. + -- by `@Cycloctane `__. + + *Related issues and pull requests on GitHub:* + `#1414 `__. + + +Packaging updates and notes for downstreams +------------------------------------------- + +- Stopped implicitly allowing the use of Cython pre-release versions when + building the distribution package -- by `@ajsanchezsanz `__ and + `@markgreene74 `__. + + *Related issues and pull requests on GitHub:* + `#1411 `__, `#1412 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of the ``yarl.URL.joinpath()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1418 `__. + + +---- + + +1.17.1 +====== + +*(2024-10-30)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of many ``~yarl.URL`` methods -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1396 `__, `#1397 `__, `#1398 `__. + +- Improved performance of passing a `dict` or `str` to ``yarl.URL.extend_query()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1401 `__. + + +---- + + +1.17.0 +====== + +*(2024-10-28)* + + +Features +-------- + +- Added ``~yarl.URL.host_port_subcomponent`` which returns the ``3986#section-3.2.2`` host and ``3986#section-3.2.3`` port subcomponent -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1375 `__. + + +---- + + +1.16.0 +====== + +*(2024-10-21)* + + +Bug fixes +--------- + +- Fixed blocking I/O to load Python code when creating a new ``~yarl.URL`` with non-ascii characters in the network location part -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1342 `__. + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- Migrated to using a single cache for encoding hosts -- by `@bdraco `__. + + Passing ``ip_address_size`` and ``host_validate_size`` to ``yarl.cache_configure()`` is deprecated in favor of the new ``encode_host_size`` parameter and will be removed in a future release. For backwards compatibility, the old parameters affect the ``encode_host`` cache size. + + *Related issues and pull requests on GitHub:* + `#1348 `__, `#1357 `__, `#1363 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of constructing ``~yarl.URL`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1336 `__. + +- Improved performance of calling ``yarl.URL.build()`` and constructing unencoded ``~yarl.URL`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1345 `__. + +- Reworked the internal encoding cache to improve performance on cache hit -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1369 `__. + + +---- + + +1.15.5 +====== + +*(2024-10-18)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of the ``yarl.URL.joinpath()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1304 `__. + +- Improved performance of the ``yarl.URL.extend_query()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1305 `__. + +- Improved performance of the ``yarl.URL.origin()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1306 `__. + +- Improved performance of the ``yarl.URL.with_path()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1307 `__. + +- Improved performance of the ``yarl.URL.with_query()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1308 `__, `#1328 `__. + +- Improved performance of the ``yarl.URL.update_query()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1309 `__, `#1327 `__. + +- Improved performance of the ``yarl.URL.join()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1313 `__. + +- Improved performance of ``~yarl.URL`` equality checks -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1315 `__. + +- Improved performance of ``~yarl.URL`` methods that modify the network location -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1316 `__. + +- Improved performance of the ``yarl.URL.with_fragment()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1317 `__. + +- Improved performance of calculating the hash of ``~yarl.URL`` objects -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1318 `__. + +- Improved performance of the ``yarl.URL.relative()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1319 `__. + +- Improved performance of the ``yarl.URL.with_name()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1320 `__. + +- Improved performance of ``~yarl.URL.parent`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1321 `__. + +- Improved performance of the ``yarl.URL.with_scheme()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1322 `__. + + +---- + + +1.15.4 +====== + +*(2024-10-16)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of the quoter when all characters are safe -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1288 `__. + +- Improved performance of unquoting strings -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1292 `__, `#1293 `__. + +- Improved performance of calling ``yarl.URL.build()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1297 `__. + + +---- + + +1.15.3 +====== + +*(2024-10-15)* + + +Bug fixes +--------- + +- Fixed ``yarl.URL.build()`` failing to validate paths must start with a ``/`` when passing ``authority`` -- by `@bdraco `__. + + The validation only worked correctly when passing ``host``. + + *Related issues and pull requests on GitHub:* + `#1265 `__. + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- Removed support for Python 3.8 as it has reached end of life -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1203 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of constructing ``~yarl.URL`` when the net location is only the host -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1271 `__. + + +---- + + +1.15.2 +====== + +*(2024-10-13)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of converting ``~yarl.URL`` to a string -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1234 `__. + +- Improved performance of ``yarl.URL.joinpath()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1248 `__, `#1250 `__. + +- Improved performance of constructing query strings from ``~multidict.MultiDict`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1256 `__. + +- Improved performance of constructing query strings with ``int`` values -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1259 `__. + + +---- + + +1.15.1 +====== + +*(2024-10-12)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of calling ``yarl.URL.build()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1222 `__. + +- Improved performance of all ``~yarl.URL`` methods that create new ``~yarl.URL`` objects -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1226 `__. + +- Improved performance of ``~yarl.URL`` methods that modify the network location -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1229 `__. + + +---- + + +1.15.0 +====== + +*(2024-10-11)* + + +Bug fixes +--------- + +- Fixed validation with ``yarl.URL.with_scheme()`` when passed scheme is not lowercase -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1189 `__. + + +Features +-------- + +- Started building ``armv7l`` wheels -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1204 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of constructing unencoded ``~yarl.URL`` objects -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1188 `__. + +- Added a cache for parsing hosts to reduce overhead of encoding ``~yarl.URL`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1190 `__. + +- Improved performance of constructing query strings from ``~collections.abc.Mapping`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1193 `__. + +- Improved performance of converting ``~yarl.URL`` objects to strings -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1198 `__. + + +---- + + +1.14.0 +====== + +*(2024-10-08)* + + +Packaging updates and notes for downstreams +------------------------------------------- + +- Switched to using the ``propcache`` package for property caching + -- by `@bdraco `__. + + The ``propcache`` package is derived from the property caching + code in ``yarl`` and has been broken out to avoid maintaining it for multiple + projects. + + *Related issues and pull requests on GitHub:* + `#1169 `__. + + +Contributor-facing changes +-------------------------- + +- Started testing with Hypothesis -- by `@webknjaz `__ and `@bdraco `__. + + Special thanks to `@Zac-HD `__ for helping us get started with this framework. + + *Related issues and pull requests on GitHub:* + `#860 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of ``yarl.URL.is_default_port()`` when no explicit port is set -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1168 `__. + +- Improved performance of converting ``~yarl.URL`` to a string when no explicit port is set -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1170 `__. + +- Improved performance of the ``yarl.URL.origin()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1175 `__. + +- Improved performance of encoding hosts -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1176 `__. + + +---- + + +1.13.1 +====== + +*(2024-09-27)* + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of calling ``yarl.URL.build()`` with ``authority`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1163 `__. + + +---- + + +1.13.0 +====== + +*(2024-09-26)* + + +Bug fixes +--------- + +- Started rejecting ASCII hostnames with invalid characters. For host strings that + look like authority strings, the exception message includes advice on what to do + instead -- by `@mjpieters `__. + + *Related issues and pull requests on GitHub:* + `#880 `__, `#954 `__. + +- Fixed IPv6 addresses missing brackets when the ``~yarl.URL`` was converted to a string -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1157 `__, `#1158 `__. + + +Features +-------- + +- Added ``~yarl.URL.host_subcomponent`` which returns the ``3986#section-3.2.2`` host subcomponent -- by `@bdraco `__. + + The only current practical difference between ``~yarl.URL.raw_host`` and ``~yarl.URL.host_subcomponent`` is that IPv6 addresses are returned bracketed. + + *Related issues and pull requests on GitHub:* + `#1159 `__. + + +---- + + +1.12.1 +====== + +*(2024-09-23)* + + +No significant changes. + + +---- + + +1.12.0 +====== + +*(2024-09-23)* + + +Features +-------- + +- Added ``~yarl.URL.path_safe`` to be able to fetch the path without ``%2F`` and ``%25`` decoded -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1150 `__. + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- Restore decoding ``%2F`` (``/``) in ``URL.path`` -- by `@bdraco `__. + + This change restored the behavior before `#1057 `__. + + *Related issues and pull requests on GitHub:* + `#1151 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of processing paths -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1143 `__. + + +---- + + +1.11.1 +====== + +*(2024-09-09)* + + +Bug fixes +--------- + +- Allowed scheme replacement for relative URLs if the scheme does not require a host -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#280 `__, `#1138 `__. + +- Allowed empty host for URL schemes other than the special schemes listed in the WHATWG URL spec -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1136 `__. + + +Features +-------- + +- Loosened restriction on integers as query string values to allow classes that implement ``__int__`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1139 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of normalizing paths -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1137 `__. + + +---- + + +1.11.0 +====== + +*(2024-09-08)* + + +Features +-------- + +- Added ``URL.extend_query()()`` method, which can be used to extend parameters without replacing same named keys -- by `@bdraco `__. + + This method was primarily added to replace the inefficient hand rolled method currently used in ``aiohttp``. + + *Related issues and pull requests on GitHub:* + `#1128 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of the Cython ``cached_property`` implementation -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1122 `__. + +- Simplified computing ports by removing unnecessary code -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1123 `__. + +- Improved performance of encoding non IPv6 hosts -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1125 `__. + +- Improved performance of ``URL.build()()`` when the path, query string, or fragment is an empty string -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1126 `__. + +- Improved performance of the ``URL.update_query()()`` method -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1130 `__. + +- Improved performance of processing query string changes when arguments are ``str`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1131 `__. + + +---- + + +1.10.0 +====== + +*(2024-09-06)* + + +Bug fixes +--------- + +- Fixed joining a path when the existing path was empty -- by `@bdraco `__. + + A regression in ``URL.join()()`` was introduced in `#1082 `__. + + *Related issues and pull requests on GitHub:* + `#1118 `__. + + +Features +-------- + +- Added ``URL.without_query_params()()`` method, to drop some parameters from query string -- by `@hongquan `__. + + *Related issues and pull requests on GitHub:* + `#774 `__, `#898 `__, `#1010 `__. + +- The previously protected types ``_SimpleQuery``, ``_QueryVariable``, and ``_Query`` are now available for use externally as ``SimpleQuery``, ``QueryVariable``, and ``Query`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1050 `__, `#1113 `__. + + +Contributor-facing changes +-------------------------- + +- Replaced all ``~typing.Optional`` with ``~typing.Union`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1095 `__. + + +Miscellaneous internal changes +------------------------------ + +- Significantly improved performance of parsing the network location -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1112 `__. + +- Added internal types to the cache to prevent future refactoring errors -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1117 `__. + + +---- + + +1.9.11 +====== + +*(2024-09-04)* + + +Bug fixes +--------- + +- Fixed a ``TypeError`` with ``MultiDictProxy`` and Python 3.8 -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1084 `__, `#1105 `__, `#1107 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of encoding hosts -- by `@bdraco `__. + + Previously, the library would unconditionally try to parse a host as an IP Address. The library now avoids trying to parse a host as an IP Address if the string is not in one of the formats described in ``3986#section-3.2.2``. + + *Related issues and pull requests on GitHub:* + `#1104 `__. + + +---- + + +1.9.10 +====== + +*(2024-09-04)* + + +Bug fixes +--------- + +- ``URL.join()()`` has been changed to match + ``3986`` and align with + ``/ operation()`` and ``URL.joinpath()()`` + when joining URLs with empty segments. + Previously ``urllib.parse.urljoin`` was used, + which has known issues with empty segments + (`python/cpython#84774 `_). + + Due to the semantics of ``URL.join()()``, joining an + URL with scheme requires making it relative, prefixing with ``./``. + + .. code-block:: pycon + + >>> URL("https://web.archive.org/web/").join(URL("./https://github.com/aio-libs/yarl")) + URL('https://web.archive.org/web/https://github.com/aio-libs/yarl') + + + Empty segments are honored in the base as well as the joined part. + + .. code-block:: pycon + + >>> URL("https://web.archive.org/web/https://").join(URL("github.com/aio-libs/yarl")) + URL('https://web.archive.org/web/https://github.com/aio-libs/yarl') + + + + -- by `@commonism `__ + + This change initially appeared in 1.9.5 but was reverted in 1.9.6 to resolve a problem with query string handling. + + *Related issues and pull requests on GitHub:* + `#1039 `__, `#1082 `__. + + +Features +-------- + +- Added ``~yarl.URL.absolute`` which is now preferred over ``URL.is_absolute()`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1100 `__. + + +---- + + +1.9.9 +===== + +*(2024-09-04)* + + +Bug fixes +--------- + +- Added missing type on ``~yarl.URL.port`` -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1097 `__. + + +---- + + +1.9.8 +===== + +*(2024-09-03)* + + +Features +-------- + +- Covered the ``~yarl.URL`` object with types -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1084 `__. + +- Cache parsing of IP Addresses when encoding hosts -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1086 `__. + + +Contributor-facing changes +-------------------------- + +- Covered the ``~yarl.URL`` object with types -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1084 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of handling ports -- by `@bdraco `__. + + *Related issues and pull requests on GitHub:* + `#1081 `__. + + +---- + + +1.9.7 +===== + +*(2024-09-01)* + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- Removed support ``3986#section-3.2.3`` port normalization when the scheme is not one of ``http``, ``https``, ``wss``, or ``ws`` -- by `@bdraco `__. + + Support for port normalization was recently added in `#1033 `__ and contained code that would do blocking I/O if the scheme was not one of the four listed above. The code has been removed because this library is intended to be safe for usage with ``asyncio``. + + *Related issues and pull requests on GitHub:* + `#1076 `__. + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of property caching -- by `@bdraco `__. + + The ``reify`` implementation from ``aiohttp`` was adapted to replace the internal ``cached_property`` implementation. + + *Related issues and pull requests on GitHub:* + `#1070 `__. + + +---- + + +1.9.6 +===== + +*(2024-08-30)* + + +Bug fixes +--------- + +- Reverted ``3986`` compatible ``URL.join()()`` honoring empty segments which was introduced in `#1039 `__. + + This change introduced a regression handling query string parameters with joined URLs. The change was reverted to maintain compatibility with the previous behavior. + + *Related issues and pull requests on GitHub:* + `#1067 `__. + + +---- + + +1.9.5 +===== + +*(2024-08-30)* + + +Bug fixes +--------- + +- Joining URLs with empty segments has been changed + to match ``3986``. + + Previously empty segments would be removed from path, + breaking use-cases such as + + .. code-block:: python + + URL("https://web.archive.org/web/") / "https://github.com/" + + Now ``/ operation()`` and ``URL.joinpath()()`` + keep empty segments, but do not introduce new empty segments. + e.g. + + .. code-block:: python + + URL("https://example.org/") / "" + + does not introduce an empty segment. + + -- by `@commonism `__ and `@youtux `__ + + *Related issues and pull requests on GitHub:* + `#1026 `__. + +- The default protocol ports of well-known URI schemes are now taken into account + during the normalization of the URL string representation in accordance with + ``3986#section-3.2.3``. + + Specified ports are removed from the ``str`` representation of a ``~yarl.URL`` + if the port matches the scheme's default port -- by `@commonism `__. + + *Related issues and pull requests on GitHub:* + `#1033 `__. + +- ``URL.join()()`` has been changed to match + ``3986`` and align with + ``/ operation()`` and ``URL.joinpath()()`` + when joining URLs with empty segments. + Previously ``urllib.parse.urljoin`` was used, + which has known issues with empty segments + (`python/cpython#84774 `_). + + Due to the semantics of ``URL.join()()``, joining an + URL with scheme requires making it relative, prefixing with ``./``. + + .. code-block:: pycon + + >>> URL("https://web.archive.org/web/").join(URL("./https://github.com/aio-libs/yarl")) + URL('https://web.archive.org/web/https://github.com/aio-libs/yarl') + + + Empty segments are honored in the base as well as the joined part. + + .. code-block:: pycon + + >>> URL("https://web.archive.org/web/https://").join(URL("github.com/aio-libs/yarl")) + URL('https://web.archive.org/web/https://github.com/aio-libs/yarl') + + + + -- by `@commonism `__ + + *Related issues and pull requests on GitHub:* + `#1039 `__. + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- Stopped decoding ``%2F`` (``/``) in ``URL.path``, as this could lead to code incorrectly treating it as a path separator + -- by `@Dreamsorcerer `__. + + *Related issues and pull requests on GitHub:* + `#1057 `__. + +- Dropped support for Python 3.7 -- by `@Dreamsorcerer `__. + + *Related issues and pull requests on GitHub:* + `#1016 `__. + + +Improved documentation +---------------------- + +- On the ``Contributing docs`` page, + a link to the ``Towncrier philosophy`` has been fixed. + + *Related issues and pull requests on GitHub:* + `#981 `__. + +- The pre-existing ``/ magic method()`` + has been documented in the API reference -- by `@commonism `__. + + *Related issues and pull requests on GitHub:* + `#1026 `__. + + +Packaging updates and notes for downstreams +------------------------------------------- + +- A flaw in the logic for copying the project directory into a + temporary folder that led to infinite recursion when ``TMPDIR`` + was set to a project subdirectory path. This was happening in Fedora + and its downstream due to the use of `pyproject-rpm-macros + `__. It was + only reproducible with ``pip wheel`` and was not affecting the + ``pyproject-build`` users. + + -- by `@hroncok `__ and `@webknjaz `__ + + *Related issues and pull requests on GitHub:* + `#992 `__, `#1014 `__. + +- Support Python 3.13 and publish non-free-threaded wheels + + *Related issues and pull requests on GitHub:* + `#1054 `__. + + +Contributor-facing changes +-------------------------- + +- The CI/CD setup has been updated to test ``arm64`` wheels + under macOS 14, except for Python 3.7 that is unsupported + in that environment -- by `@webknjaz `__. + + *Related issues and pull requests on GitHub:* + `#1015 `__. + +- Removed unused type ignores and casts -- by `@hauntsaninja `__. + + *Related issues and pull requests on GitHub:* + `#1031 `__. + + +Miscellaneous internal changes +------------------------------ + +- ``port``, ``scheme``, and ``raw_host`` are now ``cached_property`` -- by `@bdraco `__. + + ``aiohttp`` accesses these properties quite often, which cause ``urllib`` to build the ``_hostinfo`` property every time. ``port``, ``scheme``, and ``raw_host`` are now cached properties, which will improve performance. + + *Related issues and pull requests on GitHub:* + `#1044 `__, `#1058 `__. + + +---- + + +1.9.4 (2023-12-06) +================== + +Bug fixes +--------- + +- Started raising ``TypeError`` when a string value is passed into + ``yarl.URL.build()`` as the ``port`` argument -- by `@commonism `__. + + Previously the empty string as port would create malformed URLs when rendered as string representations. (`#883 `__) + + +Packaging updates and notes for downstreams +------------------------------------------- + +- The leading ``--`` has been dropped from the `PEP 517 `__ in-tree build + backend config setting names. ``--pure-python`` is now just ``pure-python`` + -- by `@webknjaz `__. + + The usage now looks as follows: + + .. code-block:: console + + $ python -m build \ + --config-setting=pure-python=true \ + --config-setting=with-cython-tracing=true + + (`#963 `__) + + +Contributor-facing changes +-------------------------- + +- A step-by-step ``Release Guide`` guide has + been added, describing how to release *yarl* -- by `@webknjaz `__. + + This is primarily targeting maintainers. (`#960 `__) +- Coverage collection has been implemented for the Cython modules + -- by `@webknjaz `__. + + It will also be reported to Codecov from any non-release CI jobs. + + To measure coverage in a development environment, *yarl* can be + installed in editable mode: + + .. code-block:: console + + $ python -Im pip install -e . + + Editable install produces C-files required for the Cython coverage + plugin to map the measurements back to the PYX-files. + + `#961 `__ + +- It is now possible to request line tracing in Cython builds using the + ``with-cython-tracing`` `PEP 517 `__ config setting + -- `@webknjaz `__. + + This can be used in CI and development environment to measure coverage + on Cython modules, but is not normally useful to the end-users or + downstream packagers. + + Here's a usage example: + + .. code-block:: console + + $ python -Im pip install . --config-settings=with-cython-tracing=true + + For editable installs, this setting is on by default. Otherwise, it's + off unless requested explicitly. + + The following produces C-files required for the Cython coverage + plugin to map the measurements back to the PYX-files: + + .. code-block:: console + + $ python -Im pip install -e . + + Alternatively, the ``YARL_CYTHON_TRACING=1`` environment variable + can be set to do the same as the `PEP 517 `__ config setting. + + `#962 `__ + + +1.9.3 (2023-11-20) +================== + +Bug fixes +--------- + +- Stopped dropping trailing slashes in ``yarl.URL.joinpath()`` -- by `@gmacon `__. (`#862 `__, `#866 `__) +- Started accepting string subclasses in ``yarl.URL.__truediv__()`` operations (``URL / segment``) -- by `@mjpieters `__. (`#871 `__, `#884 `__) +- Fixed the human representation of URLs with square brackets in usernames and passwords -- by `@mjpieters `__. (`#876 `__, `#882 `__) +- Updated type hints to include ``URL.missing_port()``, ``URL.__bytes__()`` + and the ``encoding`` argument to ``yarl.URL.joinpath()`` + -- by `@mjpieters `__. (`#891 `__) + + +Packaging updates and notes for downstreams +------------------------------------------- + +- Integrated Cython 3 to enable building *yarl* under Python 3.12 -- by `@mjpieters `__. (`#829 `__, `#881 `__) +- Declared modern ``setuptools.build_meta`` as the `PEP 517 `__ build + backend in ``pyproject.toml`` explicitly -- by `@webknjaz `__. (`#886 `__) +- Converted most of the packaging setup into a declarative ``setup.cfg`` + config -- by `@webknjaz `__. (`#890 `__) +- The packaging is replaced from an old-fashioned ``setup.py`` to an + in-tree `PEP 517 `__ build backend -- by `@webknjaz `__. + + Whenever the end-users or downstream packagers need to build ``yarl`` from + source (a Git checkout or an sdist), they may pass a ``config_settings`` + flag ``--pure-python``. If this flag is not set, a C-extension will be built + and included into the distribution. + + Here is how this can be done with ``pip``: + + .. code-block:: console + + $ python -m pip install . --config-settings=--pure-python=false + + This will also work with ``-e | --editable``. + + The same can be achieved via ``pypa/build``: + + .. code-block:: console + + $ python -m build --config-setting=--pure-python=false + + Adding ``-w | --wheel`` can force ``pypa/build`` produce a wheel from source + directly, as opposed to building an ``sdist`` and then building from it. (`#893 `__) + + .. attention:: + + v1.9.3 was the only version using the ``--pure-python`` setting name. + Later versions dropped the ``--`` prefix, making it just ``pure-python``. + +- Declared Python 3.12 supported officially in the distribution package metadata + -- by `@edgarrmondragon `__. (`#942 `__) + + +Contributor-facing changes +-------------------------- + +- A regression test for no-host URLs was added per `#821 `__ + and ``3986`` -- by `@kenballus `__. (`#821 `__, `#822 `__) +- Started testing *yarl* against Python 3.12 in CI -- by `@mjpieters `__. (`#881 `__) +- All Python 3.12 jobs are now marked as required to pass in CI + -- by `@edgarrmondragon `__. (`#942 `__) +- MyST is now integrated in Sphinx -- by `@webknjaz `__. + + This allows the contributors to author new documents in Markdown + when they have difficulties with going straight RST. (`#953 `__) + + +1.9.2 (2023-04-25) +================== + +Bugfixes +-------- + +- Fix regression with ``yarl.URL.__truediv__()`` and absolute URLs with empty paths causing the raw path to lack the leading ``/``. + (`#854 `_) + + +1.9.1 (2023-04-21) +================== + +Bugfixes +-------- + +- Marked tests that fail on older Python patch releases (< 3.7.10, < 3.8.8 and < 3.9.2) as expected to fail due to missing a security fix for CVE-2021-23336. (`#850 `_) + + +1.9.0 (2023-04-19) +================== + +This release was never published to PyPI, due to issues with the build process. + +Features +-------- + +- Added ``URL.joinpath(*elements)``, to create a new URL appending multiple path elements. (`#704 `_) +- Made ``URL.__truediv__()()`` return ``NotImplemented`` if called with an + unsupported type — by `@michaeljpeters `__. + (`#832 `_) + + +Bugfixes +-------- + +- Path normalization for absolute URLs no longer raises a ValueError exception + when ``..`` segments would otherwise go beyond the URL path root. + (`#536 `_) +- Fixed an issue with update_query() not getting rid of the query when argument is None. (`#792 `_) +- Added some input restrictions on with_port() function to prevent invalid boolean inputs or out of valid port inputs; handled incorrect 0 port representation. (`#793 `_) +- Made ``yarl.URL.build()`` raise a ``TypeError`` if the ``host`` argument is ``None`` — by `@paulpapacz `__. (`#808 `_) +- Fixed an issue with ``update_query()`` getting rid of the query when the argument + is empty but not ``None``. (`#845 `_) + + +Misc +---- + +- `#220 `_ + + +1.8.2 (2022-12-03) +================== + +This is the first release that started shipping wheels for Python 3.11. + + +1.8.1 (2022-08-01) +================== + +Misc +---- + +- `#694 `_, `#699 `_, `#700 `_, `#701 `_, `#702 `_, `#703 `_, `#739 `_ + + +1.8.0 (2022-08-01) +================== + +Features +-------- + +- Added ``URL.raw_suffix``, ``URL.suffix``, ``URL.raw_suffixes``, ``URL.suffixes``, ``URL.with_suffix``. (`#613 `_) + + +Improved Documentation +---------------------- + +- Fixed broken internal references to ``yarl.URL.human_repr()``. + (`#665 `_) +- Fixed broken external references to ``multidict:index`` docs. (`#665 `_) + + +Deprecations and Removals +------------------------- + +- Dropped Python 3.6 support. (`#672 `_) + + +Misc +---- + +- `#646 `_, `#699 `_, `#701 `_ + + +1.7.2 (2021-11-01) +================== + +Bugfixes +-------- + +- Changed call in ``with_port()`` to stop reencoding parts of the URL that were already encoded. (`#623 `_) + + +1.7.1 (2021-10-07) +================== + +Bugfixes +-------- + +- Fix 1.7.0 build error + +1.7.0 (2021-10-06) +================== + +Features +-------- + +- Add ``__bytes__()`` magic method so that ``bytes(url)`` will work and use optimal ASCII encoding. + (`#582 `_) +- Started shipping platform-specific arm64 wheels for Apple Silicon. (`#622 `_) +- Started shipping platform-specific wheels with the ``musl`` tag targeting typical Alpine Linux runtimes. (`#622 `_) +- Added support for Python 3.10. (`#622 `_) + + +1.6.3 (2020-11-14) +================== + +Bugfixes +-------- + +- No longer loose characters when decoding incorrect percent-sequences (like ``%e2%82%f8``). All non-decodable percent-sequences are now preserved. + `#517 `_ +- Provide x86 Windows wheels. + `#535 `_ + + +---- + + +1.6.2 (2020-10-12) +================== + + +Bugfixes +-------- + +- Provide generated ``.c`` files in TarBall distribution. + `#530 `_ + +1.6.1 (2020-10-12) +================== + +Features +-------- + +- Provide wheels for ``aarch64``, ``i686``, ``ppc64le``, ``s390x`` architectures on + Linux as well as ``x86_64``. + `#507 `_ +- Provide wheels for Python 3.9. + `#526 `_ + +Bugfixes +-------- + +- ``human_repr()`` now always produces valid representation equivalent to the original URL (if the original URL is valid). + `#511 `_ +- Fixed requoting a single percent followed by a percent-encoded character in the Cython implementation. + `#514 `_ +- Fix ValueError when decoding ``%`` which is not followed by two hexadecimal digits. + `#516 `_ +- Fix decoding ``%`` followed by a space and hexadecimal digit. + `#520 `_ +- Fix annotation of ``with_query()``/``update_query()`` methods for ``key=[val1, val2]`` case. + `#528 `_ + +Removal +------- + +- Drop Python 3.5 support; Python 3.6 is the minimal supported Python version. + + +---- + + +1.6.0 (2020-09-23) +================== + +Features +-------- + +- Allow for int and float subclasses in query, while still denying bool. + `#492 `_ + + +Bugfixes +-------- + +- Do not requote arguments in ``URL.build()``, ``with_xxx()`` and in ``/`` operator. + `#502 `_ +- Keep IPv6 brackets in ``origin()``. + `#504 `_ + + +---- + + +1.5.1 (2020-08-01) +================== + +Bugfixes +-------- + +- Fix including relocated internal ``yarl._quoting_c`` C-extension into published PyPI dists. + `#485 `_ + + +Misc +---- + +- `#484 `_ + + +---- + + +1.5.0 (2020-07-26) +================== + +Features +-------- + +- Convert host to lowercase on URL building. + `#386 `_ +- Allow using ``mod`` operator (``%``) for updating query string (an alias for ``update_query()`` method). + `#435 `_ +- Allow use of sequences such as ``list`` and ``tuple`` in the values + of a mapping such as ``dict`` to represent that a key has many values:: + + url = URL("http://example.com") + assert url.with_query({"a": [1, 2]}) == URL("http://example.com/?a=1&a=2") + + `#443 `_ +- Support ``URL.build()`` with scheme and path (creates a relative URL). + `#464 `_ +- Cache slow IDNA encode/decode calls. + `#476 `_ +- Add ``@final`` / ``Final`` type hints + `#477 `_ +- Support URL authority/raw_authority properties and authority argument of ``URL.build()`` method. + `#478 `_ +- Hide the library implementation details, make the exposed public list very clean. + `#483 `_ + + +Bugfixes +-------- + +- Fix tests with newer Python (3.7.6, 3.8.1 and 3.9.0+). + `#409 `_ +- Fix a bug where query component, passed in a form of mapping or sequence, is unquoted in unexpected way. + `#426 `_ +- Hide ``Query`` and ``QueryVariable`` type aliases in ``__init__.pyi``, now they are prefixed with underscore. + `#431 `_ +- Keep IPv6 brackets after updating port/user/password. + `#451 `_ + + +---- + + +1.4.2 (2019-12-05) +================== + +Features +-------- + +- Workaround for missing ``str.isascii()`` in Python 3.6 + `#389 `_ + + +---- + + +1.4.1 (2019-11-29) +================== + +* Fix regression, make the library work on Python 3.5 and 3.6 again. + +1.4.0 (2019-11-29) +================== + +* Distinguish an empty password in URL from a password not provided at all (#262) + +* Fixed annotations for optional parameters of ``URL.build`` (#309) + +* Use None as default value of ``user`` parameter of ``URL.build`` (#309) + +* Enforce building C Accelerated modules when installing from source tarball, use + ``YARL_NO_EXTENSIONS`` environment variable for falling back to (slower) Pure Python + implementation (#329) + +* Drop Python 3.5 support + +* Fix quoting of plus in path by pure python version (#339) + +* Don't create a new URL if fragment is unchanged (#292) + +* Included in error message the path that produces starting slash forbidden error (#376) + +* Skip slow IDNA encoding for ASCII-only strings (#387) + + +1.3.0 (2018-12-11) +================== + +* Fix annotations for ``query`` parameter (#207) + +* An incoming query sequence can have int variables (the same as for + Mapping type) (#208) + +* Add ``URL.explicit_port`` property (#218) + +* Give a friendlier error when port can't be converted to int (#168) + +* ``bool(URL())`` now returns ``False`` (#272) + +1.2.6 (2018-06-14) +================== + +* Drop Python 3.4 trove classifier (#205) + +1.2.5 (2018-05-23) +================== + +* Fix annotations for ``build`` (#199) + +1.2.4 (2018-05-08) +================== + +* Fix annotations for ``cached_property`` (#195) + +1.2.3 (2018-05-03) +================== + +* Accept ``str`` subclasses in ``URL`` constructor (#190) + +1.2.2 (2018-05-01) +================== + +* Fix build + +1.2.1 (2018-04-30) +================== + +* Pin minimal required Python to 3.5.3 (#189) + +1.2.0 (2018-04-30) +================== + +* Forbid inheritance, replace ``__init__`` with ``__new__`` (#171) + +* Support PEP-561 (provide type hinting marker) (#182) + +1.1.1 (2018-02-17) +================== + +* Fix performance regression: don't encode empty ``netloc`` (#170) + +1.1.0 (2018-01-21) +================== + +* Make pure Python quoter consistent with Cython version (#162) + +1.0.0 (2018-01-15) +================== + +* Use fast path if quoted string does not need requoting (#154) + +* Speed up quoting/unquoting by ``_Quoter`` and ``_Unquoter`` classes (#155) + +* Drop ``yarl.quote`` and ``yarl.unquote`` public functions (#155) + +* Add custom string writer, reuse static buffer if available (#157) + Code is 50-80 times faster than Pure Python version (was 4-5 times faster) + +* Don't recode IP zone (#144) + +* Support ``encoded=True`` in ``yarl.URL.build()`` (#158) + +* Fix updating query with multiple keys (#160) + +0.18.0 (2018-01-10) +=================== + +* Fallback to IDNA 2003 if domain name is not IDNA 2008 compatible (#152) + +0.17.0 (2017-12-30) +=================== + +* Use IDNA 2008 for domain name processing (#149) + +0.16.0 (2017-12-07) +=================== + +* Fix raising ``TypeError`` by ``url.query_string()`` after + ``url.with_query({})`` (empty mapping) (#141) + +0.15.0 (2017-11-23) +=================== + +* Add ``raw_path_qs`` attribute (#137) + +0.14.2 (2017-11-14) +=================== + +* Restore ``strict`` parameter as no-op in ``quote`` / ``unquote`` + +0.14.1 (2017-11-13) +=================== + +* Restore ``strict`` parameter as no-op for sake of compatibility with + aiohttp 2.2 + +0.14.0 (2017-11-11) +=================== + +* Drop strict mode (#123) + +* Fix ``"ValueError: Unallowed PCT %"`` when there's a ``"%"`` in the URL (#124) + +0.13.0 (2017-10-01) +=================== + +* Document ``encoded`` parameter (#102) + +* Support relative URLs like ``'?key=value'`` (#100) + +* Unsafe encoding for QS fixed. Encode ``;`` character in value parameter (#104) + +* Process passwords without user names (#95) + +0.12.0 (2017-06-26) +=================== + +* Properly support paths without leading slash in ``URL.with_path()`` (#90) + +* Enable type annotation checks + +0.11.0 (2017-06-26) +=================== + +* Normalize path (#86) + +* Clear query and fragment parts in ``.with_path()`` (#85) + +0.10.3 (2017-06-13) +=================== + +* Prevent double URL arguments unquoting (#83) + +0.10.2 (2017-05-05) +=================== + +* Unexpected hash behavior (#75) + + +0.10.1 (2017-05-03) +=================== + +* Unexpected compare behavior (#73) + +* Do not quote or unquote + if not a query string. (#74) + + +0.10.0 (2017-03-14) +=================== + +* Added ``URL.build`` class method (#58) + +* Added ``path_qs`` attribute (#42) + + +0.9.8 (2017-02-16) +================== + +* Do not quote ``:`` in path + + +0.9.7 (2017-02-16) +================== + +* Load from pickle without _cache (#56) + +* Percent-encoded pluses in path variables become spaces (#59) + + +0.9.6 (2017-02-15) +================== + +* Revert backward incompatible change (BaseURL) + + +0.9.5 (2017-02-14) +================== + +* Fix BaseURL rich comparison support + + +0.9.4 (2017-02-14) +================== + +* Use BaseURL + + +0.9.3 (2017-02-14) +================== + +* Added BaseURL + + +0.9.2 (2017-02-08) +================== + +* Remove debug print + + +0.9.1 (2017-02-07) +================== + +* Do not lose tail chars (#45) + + +0.9.0 (2017-02-07) +================== + +* Allow to quote ``%`` in non strict mode (#21) + +* Incorrect parsing of query parameters with %3B (;) inside (#34) + +* Fix core dumps (#41) + +* ``tmpbuf`` - compiling error (#43) + +* Added ``URL.update_path()`` method + +* Added ``URL.update_query()`` method (#47) + + +0.8.1 (2016-12-03) +================== + +* Fix broken aiohttp: revert back ``quote`` / ``unquote``. + + +0.8.0 (2016-12-03) +================== + +* Support more verbose error messages in ``.with_query()`` (#24) + +* Don't percent-encode ``@`` and ``:`` in path (#32) + +* Don't expose ``yarl.quote`` and ``yarl.unquote``, these functions are + part of private API + +0.7.1 (2016-11-18) +================== + +* Accept not only ``str`` but all classes inherited from ``str`` also (#25) + +0.7.0 (2016-11-07) +================== + +* Accept ``int`` as value for ``.with_query()`` + +0.6.0 (2016-11-07) +================== + +* Explicitly use UTF8 encoding in ``setup.py`` (#20) +* Properly unquote non-UTF8 strings (#19) + +0.5.3 (2016-11-02) +================== + +* Don't use ``typing.NamedTuple`` fields but indexes on URL construction + +0.5.2 (2016-11-02) +================== + +* Inline ``_encode`` class method + +0.5.1 (2016-11-02) +================== + +* Make URL construction faster by removing extra classmethod calls + +0.5.0 (2016-11-02) +================== + +* Add Cython optimization for quoting/unquoting +* Provide binary wheels + +0.4.3 (2016-09-29) +================== + +* Fix typing stubs + +0.4.2 (2016-09-29) +================== + +* Expose ``quote()`` and ``unquote()`` as public API + +0.4.1 (2016-09-28) +================== + +* Support empty values in query (``'/path?arg'``) + +0.4.0 (2016-09-27) +================== + +* Introduce ``relative()`` (#16) + +0.3.2 (2016-09-27) +================== + +* Typo fixes #15 + +0.3.1 (2016-09-26) +================== + +* Support sequence of pairs as ``with_query()`` parameter + +0.3.0 (2016-09-26) +================== + +* Introduce ``is_default_port()`` + +0.2.1 (2016-09-26) +================== + +* Raise ValueError for URLs like 'http://:8080/' + +0.2.0 (2016-09-18) +================== + +* Avoid doubling slashes when joining paths (#13) + +* Appending path starting from slash is forbidden (#12) + +0.1.4 (2016-09-09) +================== + +* Add ``kwargs`` support for ``with_query()`` (#10) + +0.1.3 (2016-09-07) +================== + +* Document ``with_query()``, ``with_fragment()`` and ``origin()`` + +* Allow ``None`` for ``with_query()`` and ``with_fragment()`` + +0.1.2 (2016-09-07) +================== + +* Fix links, tune docs theme. + +0.1.1 (2016-09-06) +================== + +* Update README, old version used obsolete API + +0.1.0 (2016-09-06) +================== + +* The library was deeply refactored, bytes are gone away but all + accepted strings are encoded if needed. + +0.0.1 (2016-08-30) +================== + +* The first release. diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/NOTICE b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..fa53b2b138df881c4c95239d0e4bede831b36ab5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/NOTICE @@ -0,0 +1,13 @@ + Copyright 2016-2021, Andrew Svetlov and aio-libs team + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/RECORD b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..62171f349fe656c39ad72f633325900eaf3d6f6b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/RECORD @@ -0,0 +1,27 @@ +yarl-1.18.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +yarl-1.18.3.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +yarl-1.18.3.dist-info/METADATA,sha256=uCw3M98Beq35Z_oabzdjvJfOVVHsUKQLJkHFYlsLOAQ,69168 +yarl-1.18.3.dist-info/NOTICE,sha256=VtasbIEFwKUTBMIdsGDjYa-ajqCvmnXCOcKLXRNpODg,609 +yarl-1.18.3.dist-info/RECORD,, +yarl-1.18.3.dist-info/WHEEL,sha256=9BFfIe-Zq441iQ0ehutX65O5faGDpmB1Uw3WaQGk4f0,151 +yarl-1.18.3.dist-info/top_level.txt,sha256=vf3SJuQh-k7YtvsUrV_OPOrT9Kqn0COlk7IPYyhtGkQ,5 +yarl/__init__.py,sha256=kOikSKDR4e054Sr-uNvv5edPUBIKHZ7Z-v4jYcffZFs,281 +yarl/__pycache__/__init__.cpython-311.pyc,, +yarl/__pycache__/_parse.cpython-311.pyc,, +yarl/__pycache__/_path.cpython-311.pyc,, +yarl/__pycache__/_query.cpython-311.pyc,, +yarl/__pycache__/_quoters.cpython-311.pyc,, +yarl/__pycache__/_quoting.cpython-311.pyc,, +yarl/__pycache__/_quoting_py.cpython-311.pyc,, +yarl/__pycache__/_url.cpython-311.pyc,, +yarl/_parse.py,sha256=b7EwD9mfrwh-1gOivqIo7674TFrnYjvlTMGy0jqyByc,6719 +yarl/_path.py,sha256=A0FJUylZyzmlT0a3UDOBbK-EzZXCAYuQQBvG9eAC9hs,1291 +yarl/_query.py,sha256=uOIglvIOHqxt_QPSFBVc_nTJZmnysEo3E0if0u064s8,4014 +yarl/_quoters.py,sha256=IX1lZ0Dxz4yYjVCKAFZqq4QK8fBwYRoF9tURTX_eWDU,1079 +yarl/_quoting.py,sha256=_Kyqs76exTwTY4HVChMOBvxYM2-ymb4dsApoiaJFCUs,509 +yarl/_quoting_c.cpython-311-x86_64-linux-gnu.so,sha256=uSljlnIm0-pqINXra6mKuB1OwmSaVDLFNAtxS0RRpC0,1040232 +yarl/_quoting_c.pyi,sha256=78wyjRGZNgeGX2VzQZlM6bLZEJSCoImGz-mll39VoW4,411 +yarl/_quoting_c.pyx,sha256=qZ3AaFlE3yDqd2tdBwszXfd3FydhoSRusso0pHqilGU,13106 +yarl/_quoting_py.py,sha256=DxBm600yabcmB3YanNbhH1OPJO-Dp2icEWzkGIDT0jw,6361 +yarl/_url.py,sha256=n0Xt2EmyrMdQ8vK15Z_LsirgHxb5g9L6g7LFmy-gw3g,54413 +yarl/py.typed,sha256=ay5OMO475PlcZ_Fbun9maHW7Y6MBTk0UXL4ztHx3Iug,14 diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..8b0363604372a86351ec451bb3c5afa26234e6f0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e93e8bddefb14a8a753f7ecab6b934fd899cd9e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/yarl-1.18.3.dist-info/top_level.txt @@ -0,0 +1 @@ +yarl