diff --git a/.gitattributes b/.gitattributes index bb9674e4f1166f098431c8c8c4245ef3dbe12c70..2b2e1f180d8b67fd4edbdedd16608b3b0cf6e835 100644 --- a/.gitattributes +++ b/.gitattributes @@ -801,3 +801,5 @@ evalkit_eagle/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766220fa0617d0aede86d460aa2743f09fad78e3 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a0ea612edfdf28fd3e8164271395b7485af1961 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7e213da599a5b3b1cab110765ccd9990243a080 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/consts.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/consts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c61635787f6625fa31ae09ae9571dbb9307221f Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/consts.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cuda_specs.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cuda_specs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ebe6b366853da5ba9a6128817b1052f103de29a Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cuda_specs.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89913685cf7e1e47092f2e0517c7bff3311dac42 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d93f6e5e8f7e5c8b9b4e26db9e13c4c59ff86e49 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..216d21dd7ce73b29e1e13f4fb894afd5f91c500a Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ea8498c5f916d3bf3c7072d15c4f2995013248 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4dacf22bf182231ead220e5a7de5a65a77728a2 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3acab7b1d1c62b9f99e3fcc1bda574f418312edf Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..45dc98deaaf6338cc7c406e7ecb872b7d8133edd --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py @@ -0,0 +1,176 @@ +import logging +import os +from pathlib import Path +from typing import Dict, Iterable, Iterator + +import torch + +from bitsandbytes.cextension import get_cuda_bnb_library_path +from bitsandbytes.consts import NONPYTORCH_DOC_URL +from bitsandbytes.cuda_specs import CUDASpecs +from bitsandbytes.diagnostics.utils import print_dedented + +CUDART_PATH_PREFERRED_ENVVARS = ("CONDA_PREFIX", "LD_LIBRARY_PATH") + +CUDART_PATH_IGNORED_ENVVARS = { + "DBUS_SESSION_BUS_ADDRESS", # hardware related + "GOOGLE_VM_CONFIG_LOCK_FILE", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks + "HOME", # Linux shell default + "LESSCLOSE", + "LESSOPEN", # related to the `less` command + "MAIL", # something related to emails + "OLDPWD", + "PATH", # this is for finding binaries, not libraries + "PWD", # PWD: this is how the shell keeps track of the current working dir + "SHELL", # binary for currently invoked shell + "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated + "SSH_TTY", + "TMUX", # Terminal Multiplexer + "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff + "XDG_RUNTIME_DIR", + "_", # current Python interpreter +} + +CUDA_RUNTIME_LIB_PATTERNS = ( + "cudart64*.dll", # Windows + "libcudart*.so*", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc. + "nvcuda*.dll", # Windows +) + +logger = logging.getLogger(__name__) + + +def find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]: + for dir_string in paths_list_candidate.split(os.pathsep): + if not dir_string: + continue + if os.sep not in dir_string: + continue + try: + dir = Path(dir_string) + try: + if not dir.exists(): + logger.warning(f"The directory listed in your path is found to be non-existent: {dir}") + continue + except OSError: # Assume an esoteric error trying to poke at the directory + pass + for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS: + for pth in dir.glob(lib_pattern): + if pth.is_file(): + yield pth + except (OSError, PermissionError): + pass + + +def is_relevant_candidate_env_var(env_var: str, value: str) -> bool: + return ( + env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location + or ( + os.sep in value # might contain a path + and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored + and "CONDA" not in env_var # not another conda envvar + and "BASH_FUNC" not in env_var # not a bash function defined via envvar + and "\n" not in value # likely e.g. a script or something? + ) + ) + + +def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]: + return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)} + + +def find_cudart_libraries() -> Iterator[Path]: + """ + Searches for a cuda installations, in the following order of priority: + 1. active conda env + 2. LD_LIBRARY_PATH + 3. any other env vars, while ignoring those that + - are known to be unrelated + - don't contain the path separator `/` + + If multiple libraries are found in part 3, we optimistically try one, + while giving a warning message. + """ + candidate_env_vars = get_potentially_lib_path_containing_env_vars() + + for envvar in CUDART_PATH_PREFERRED_ENVVARS: + if envvar in candidate_env_vars: + directory = candidate_env_vars[envvar] + yield from find_cuda_libraries_in_path_list(directory) + candidate_env_vars.pop(envvar) + + for env_var, value in candidate_env_vars.items(): + yield from find_cuda_libraries_in_path_list(value) + + +def print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None: + print( + f"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, " + f"Highest Compute Capability: {cuda_specs.highest_compute_capability}.", + ) + + binary_path = get_cuda_bnb_library_path(cuda_specs) + if not binary_path.exists(): + print_dedented( + f""" + Library not found: {binary_path}. Maybe you need to compile it from source? + If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION`, + for example, `make CUDA_VERSION=113`. + + The CUDA version for the compile might depend on your conda install, if using conda. + Inspect CUDA version via `conda list | grep cuda`. + """, + ) + + cuda_major, cuda_minor = cuda_specs.cuda_version_tuple + if cuda_major < 11: + print_dedented( + """ + WARNING: CUDA versions lower than 11 are currently not supported for LLM.int8(). + You will be only to use 8-bit optimizers and quantization routines! + """, + ) + + print(f"To manually override the PyTorch CUDA version please see: {NONPYTORCH_DOC_URL}") + + # 7.5 is the minimum CC for int8 tensor cores + if not cuda_specs.has_imma: + print_dedented( + """ + WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! + If you run into issues with 8-bit matmul, you can try 4-bit quantization: + https://huggingface.co/blog/4bit-transformers-bitsandbytes + """, + ) + + # TODO: + # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) + # (2) Multiple CUDA versions installed + + +def print_cuda_runtime_diagnostics() -> None: + cudart_paths = list(find_cudart_libraries()) + if not cudart_paths: + print("CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.") + elif len(cudart_paths) > 1: + print_dedented( + f""" + Found duplicate CUDA runtime files (see below). + + We select the PyTorch default CUDA runtime, which is {torch.version.cuda}, + but this might mismatch with the CUDA version that is needed for bitsandbytes. + To override this behavior set the `BNB_CUDA_VERSION=` environmental variable. + + For example, if you want to use the CUDA version 122, + BNB_CUDA_VERSION=122 python ... + + OR set the environmental variable in your .bashrc: + export BNB_CUDA_VERSION=122 + + In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g. + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2, + """, + ) + for pth in cudart_paths: + print(f"* Found CUDA runtime at: {pth}") diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py new file mode 100644 index 0000000000000000000000000000000000000000..1ce096f697f228d3ff08f5a5e68e74ab01628999 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py @@ -0,0 +1,85 @@ +import sys +import traceback + +import torch + +from bitsandbytes.consts import PACKAGE_GITHUB_URL +from bitsandbytes.cuda_specs import get_cuda_specs +from bitsandbytes.diagnostics.cuda import ( + print_cuda_diagnostics, + print_cuda_runtime_diagnostics, +) +from bitsandbytes.diagnostics.utils import print_dedented, print_header + + +def sanity_check(): + from bitsandbytes.cextension import lib + + if lib is None: + print_dedented( + """ + Couldn't load the bitsandbytes library, likely due to missing binaries. + Please ensure bitsandbytes is properly installed. + + For source installations, compile the binaries with `cmake -DCOMPUTE_BACKEND=cuda -S .`. + See the documentation for more details if needed. + + Trying a simple check anyway, but this will likely fail... + """, + ) + + from bitsandbytes.optim import Adam + + p = torch.nn.Parameter(torch.rand(10, 10).cuda()) + a = torch.rand(10, 10).cuda() + p1 = p.data.sum().item() + adam = Adam([p]) + out = a * p + loss = out.sum() + loss.backward() + adam.step() + p2 = p.data.sum().item() + assert p1 != p2 + + +def main(): + print_header("") + print_header("BUG REPORT INFORMATION") + print_header("") + + print_header("OTHER") + cuda_specs = get_cuda_specs() + print("CUDA specs:", cuda_specs) + if not torch.cuda.is_available(): + print("Torch says CUDA is not available. Possible reasons:") + print("1. CUDA driver not installed") + print("2. CUDA not installed") + print("3. You have multiple conflicting CUDA libraries") + if cuda_specs: + print_cuda_diagnostics(cuda_specs) + print_cuda_runtime_diagnostics() + print_header("") + print_header("DEBUG INFO END") + print_header("") + print("Checking that the library is importable and CUDA is callable...") + try: + sanity_check() + print("SUCCESS!") + print("Installation was successful!") + return + except ImportError: + print( + f"WARNING: {__package__} is currently running as CPU-only!\n" + "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n" + f"If you think that this is so erroneously,\nplease report an issue!", + ) + except Exception: + traceback.print_exc() + print_dedented( + f""" + Above we output some debug information. + Please provide this info when creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose + WARNING: Please be sure to sanitize sensitive info from the output before posting it. + """, + ) + sys.exit(1) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..770209b9da04298a4c3cd8abec8fe228702d43d0 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py @@ -0,0 +1,12 @@ +import textwrap + +HEADER_WIDTH = 60 + + +def print_header(txt: str, width: int = HEADER_WIDTH, filler: str = "+") -> None: + txt = f" {txt} " if txt else "" + print(txt.center(width, filler)) + + +def print_dedented(text): + print("\n".join(textwrap.dedent(text).strip().split("\n"))) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so new file mode 100644 index 0000000000000000000000000000000000000000..edd05ad5606ec9050e1ff374f7113b2ebaef22dc --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:906a547b09630095e0355cc3780c84169e1b222a00571b0f738760da5f3df665 +size 25822992 diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so new file mode 100644 index 0000000000000000000000000000000000000000..6bc1ce49fab162d721ada6d5453560456afaec65 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06689764b9601734d0e8f5bec701255fd38d4480de704d637800c8874348717a +size 25123696 diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20aff67a33d3d3f2f323379378fae99bd39ba725 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from .modules import ( + Embedding, + Embedding4bit, + Embedding8bit, + EmbeddingFP4, + EmbeddingNF4, + Int8Params, + Linear4bit, + Linear8bitLt, + LinearFP4, + LinearNF4, + OutlierAwareLinear, + Params4bit, + StableEmbedding, + SwitchBackLinearBnb, +) +from .triton_based_modules import ( + StandardLinear, + SwitchBackLinear, + SwitchBackLinearGlobal, + SwitchBackLinearVectorwise, +) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6a176d5eb4d65ebeba0ad3cbee1e40f2c6b2624 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6405d18cdfacb7ff1b2e7106316308450a256e8 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81f65df63148afafc3b9de9ec95c495ffa4d1120 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/modules.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..e63cd8db911a2b6c8d6814c4f3d93df76fbdd50f --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/modules.py @@ -0,0 +1,1061 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import copy +from typing import Any, Dict, Optional, TypeVar, Union, overload +import warnings + +import torch +from torch import Tensor, device, dtype, nn +import torch.nn.functional as F + +import bitsandbytes as bnb +from bitsandbytes.autograd._functions import get_tile_inds, undo_layout +from bitsandbytes.functional import QuantState +from bitsandbytes.optim import GlobalOptimManager +from bitsandbytes.utils import ( + INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING, + OutlierTracer, +) + +T = TypeVar("T", bound="torch.nn.Module") + + +class StableEmbedding(torch.nn.Embedding): + """ + Custom embedding layer designed to improve stability during training for NLP tasks by using 32-bit optimizer states. It is designed to reduce gradient variations that can result from quantization. This embedding layer is initialized with Xavier uniform initialization followed by layer normalization. + + Example: + + ``` + # Initialize StableEmbedding layer with vocabulary size 1000, embedding dimension 300 + embedding_layer = StableEmbedding(num_embeddings=1000, embedding_dim=300) + + # Reset embedding parameters + embedding_layer.reset_parameters() + + # Perform a forward pass with input tensor + input_tensor = torch.tensor([1, 2, 3]) + output_embedding = embedding_layer(input_tensor) + ``` + + Attributes: + norm (`torch.nn.LayerNorm`): Layer normalization applied after the embedding. + + Methods: + reset_parameters(): Reset embedding parameters using Xavier uniform initialization. + forward(input: Tensor) -> Tensor: Forward pass through the stable embedding layer. + """ + + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, + norm_type: float = 2.0, + scale_grad_by_freq: bool = False, + sparse: bool = False, + _weight: Optional[Tensor] = None, + device=None, + dtype=None, + ) -> None: + """ + Args: + num_embeddings (`int`): + The number of unique embeddings (vocabulary size). + embedding_dim (`int`): + The dimensionality of the embedding. + padding_idx (`Optional[int]`): + Pads the output with zeros at the given index. + max_norm (`Optional[float]`): + Renormalizes embeddings to have a maximum L2 norm. + norm_type (`float`, defaults to `2.0`): + The p-norm to compute for the `max_norm` option. + scale_grad_by_freq (`bool`, defaults to `False`): + Scale gradient by frequency during backpropagation. + sparse (`bool`, defaults to `False`): + Computes dense gradients. Set to `True` to compute sparse gradients instead. + _weight (`Optional[Tensor]`): + Pretrained embeddings. + """ + super().__init__( + num_embeddings, + embedding_dim, + padding_idx, + max_norm, + norm_type, + scale_grad_by_freq, + sparse, + _weight, + device, + dtype, + ) + self.norm = torch.nn.LayerNorm(embedding_dim, device=device) + GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32}) + + def reset_parameters(self) -> None: + torch.nn.init.xavier_uniform_(self.weight) + self._fill_padding_idx_with_zero() + + """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding + to make the Layer compatible with Pytorch < 1.9. + This means that if this changes in future PyTorch releases this need to change too + which is cumbersome. However, with this we can ensure compatibility with previous + PyTorch releases. + """ + + def _fill_padding_idx_with_zero(self) -> None: + if self.padding_idx is not None: + with torch.no_grad(): + self.weight[self.padding_idx].fill_(0) + + def forward(self, input: Tensor) -> Tensor: + emb = F.embedding( + input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + + # always apply layer norm in full precision + emb = emb.to(torch.get_default_dtype()) + + return self.norm(emb).to(self.weight.dtype) + + +class Embedding(torch.nn.Embedding): + """ + Embedding class to store and retrieve word embeddings from their indices. + """ + + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, + norm_type: float = 2.0, + scale_grad_by_freq: bool = False, + sparse: bool = False, + _weight: Optional[Tensor] = None, + device: Optional[device] = None, + ) -> None: + """ + Args: + num_embeddings (`int`): + The number of unique embeddings (vocabulary size). + embedding_dim (`int`): + The dimensionality of the embedding. + padding_idx (`Optional[int]`): + Pads the output with zeros at the given index. + max_norm (`Optional[float]`): + Renormalizes embeddings to have a maximum L2 norm. + norm_type (`float`, defaults to `2.0`): + The p-norm to compute for the `max_norm` option. + scale_grad_by_freq (`bool`, defaults to `False`): + Scale gradient by frequency during backpropagation. + sparse (`bool`, defaults to `False`): + Computes dense gradients. Set to `True` to compute sparse gradients instead. + _weight (`Optional[Tensor]`): + Pretrained embeddings. + """ + super().__init__( + num_embeddings, + embedding_dim, + padding_idx, + max_norm, + norm_type, + scale_grad_by_freq, + sparse, + _weight, + device=device, + ) + GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32}) + + def reset_parameters(self) -> None: + torch.nn.init.xavier_uniform_(self.weight) + self._fill_padding_idx_with_zero() + + """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding + to make the Layer compatible with Pytorch < 1.9. + This means that if this changes in future PyTorch releases this need to change too + which is cumbersome. However, with this we can ensure compatibility with previous + PyTorch releases. + """ + + def _fill_padding_idx_with_zero(self) -> None: + if self.padding_idx is not None: + with torch.no_grad(): + self.weight[self.padding_idx].fill_(0) + + def forward(self, input: Tensor) -> Tensor: + emb = F.embedding( + input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + + return emb + + +class Params4bit(torch.nn.Parameter): + def __new__( + cls, + data: Optional[torch.Tensor] = None, + requires_grad=False, # quantized weights should be frozen by default + quant_state: Optional[QuantState] = None, + blocksize: int = 64, + compress_statistics: bool = True, + quant_type: str = "fp4", + quant_storage: torch.dtype = torch.uint8, + module: Optional["Linear4bit"] = None, + bnb_quantized: bool = False, + ) -> "Params4bit": + if data is None: + data = torch.empty(0) + + self = torch.Tensor._make_subclass(cls, data, requires_grad) + self.blocksize = blocksize + self.compress_statistics = compress_statistics + self.quant_type = quant_type + self.quant_state = quant_state + self.quant_storage = quant_storage + self.bnb_quantized = bnb_quantized + self.data = data + self.module = module + return self + + def __getstate__(self): + state = self.__dict__.copy() + state["data"] = self.data + state["requires_grad"] = self.requires_grad + return state + + def __setstate__(self, state): + self.requires_grad = state["requires_grad"] + self.blocksize = state["blocksize"] + self.compress_statistics = state["compress_statistics"] + self.quant_type = state["quant_type"] + self.quant_state = state["quant_state"] + self.data = state["data"] + self.quant_storage = state["quant_storage"] + self.bnb_quantized = state["bnb_quantized"] + self.module = state["module"] + + def __deepcopy__(self, memo): + new_instance = type(self).__new__(type(self)) + state = self.__getstate__() + new_instance.__setstate__(state) + new_instance.quant_state = copy.deepcopy(state["quant_state"]) + new_instance.data = copy.deepcopy(state["data"]) + return new_instance + + def __copy__(self): + new_instance = type(self).__new__(type(self)) + state = self.__getstate__() + new_instance.__setstate__(state) + return new_instance + + @classmethod + def from_prequantized( + cls, + data: torch.Tensor, + quantized_stats: Dict[str, Any], + requires_grad: bool = False, + device="cuda", + module: Optional["Linear4bit"] = None, + **kwargs, + ) -> "Params4bit": + self = torch.Tensor._make_subclass(cls, data.to(device)) + self.requires_grad = requires_grad + self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device) + self.blocksize = self.quant_state.blocksize + self.compress_statistics = self.quant_state.nested + self.quant_type = self.quant_state.quant_type + self.bnb_quantized = True + + self.quant_storage = data.dtype + self.module = module + + if self.module is not None: + self.module.quant_state = self.quant_state + + return self + + def _quantize(self, device): + w = self.data.contiguous().to(device) + w_4bit, quant_state = bnb.functional.quantize_4bit( + w, + blocksize=self.blocksize, + compress_statistics=self.compress_statistics, + quant_type=self.quant_type, + quant_storage=self.quant_storage, + ) + self.data = w_4bit + self.quant_state = quant_state + if self.module is not None: + self.module.quant_state = quant_state + self.bnb_quantized = True + return self + + def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False): + return self.to(device="cuda" if device is None else device, non_blocking=non_blocking) + + @overload + def to( + self: T, + device: Optional[Union[int, device]] = ..., + dtype: Optional[Union[dtype, str]] = ..., + non_blocking: bool = ..., + ) -> T: ... + + @overload + def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ... + + @overload + def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ... + + def to(self, *args, **kwargs): + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) + + if device is not None and device.type == "cuda" and not self.bnb_quantized: + return self._quantize(device) + else: + if self.quant_state is not None: + self.quant_state.to(device) + + new_param = Params4bit( + super().to(device=device, dtype=dtype, non_blocking=non_blocking), + requires_grad=self.requires_grad, + quant_state=self.quant_state, + blocksize=self.blocksize, + compress_statistics=self.compress_statistics, + quant_type=self.quant_type, + quant_storage=self.quant_storage, + ) + + return new_param + + +def fix_4bit_weight_quant_state_from_module(module: Union["Embedding4bit", "Linear4bit"]): + if getattr(module.weight, "quant_state", None) is not None: + return + + if getattr(module, "quant_state", None) is None: + warnings.warn( + "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.", + ) + + # the quant state got lost when the parameter got converted. This happens for example for fsdp + # since we registered the module, we can recover the state here + assert module.weight.shape[1] == 1 + if not isinstance(module.weight, Params4bit): + module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True) + module.weight.quant_state = module.quant_state + + +class Linear4bit(nn.Linear): + """ + This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314). + QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various + compute datatypes such as FP4 and NF4. + + In order to quantize a linear layer one should first load the original fp16 / bf16 weights into + the Linear4bit module, then call `quantized_module.to("cuda")` to quantize the fp16 / bf16 weights. + + Example: + + ```python + import torch + import torch.nn as nn + + import bitsandbytes as bnb + from bnb.nn import Linear4bit + + fp16_model = nn.Sequential( + nn.Linear(64, 64), + nn.Linear(64, 64) + ) + + quantized_model = nn.Sequential( + Linear4bit(64, 64), + Linear4bit(64, 64) + ) + + quantized_model.load_state_dict(fp16_model.state_dict()) + quantized_model = quantized_model.to(0) # Quantization happens here + ``` + """ + + def __init__( + self, + input_features, + output_features, + bias=True, + compute_dtype=None, + compress_statistics=True, + quant_type="fp4", + quant_storage=torch.uint8, + device=None, + ): + """ + Initialize Linear4bit class. + + Args: + input_features (`str`): + Number of input features of the linear layer. + output_features (`str`): + Number of output features of the linear layer. + bias (`bool`, defaults to `True`): + Whether the linear class uses the bias term as well. + """ + super().__init__(input_features, output_features, bias, device) + self.weight = Params4bit( + self.weight.data, + requires_grad=False, + compress_statistics=compress_statistics, + quant_type=quant_type, + quant_storage=quant_storage, + module=self, + ) + # self.persistent_buffers = [] # TODO consider as way to save quant state + self.compute_dtype = compute_dtype + self.compute_type_is_set = False + self.quant_state = None + self.quant_storage = quant_storage + + def set_compute_type(self, x): + if x.dtype in [torch.float32, torch.bfloat16]: + # the input is in a dtype that is safe to compute in, we switch + # to this type for speed and stability + self.compute_dtype = x.dtype + elif x.dtype == torch.float16: + # we take the compoute dtype passed into the layer + if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]): + # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast + # warn the user about this + warnings.warn( + "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.", + ) + warnings.filterwarnings("ignore", message=".*inference.") + if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]): + warnings.warn( + "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.", + ) + warnings.filterwarnings("ignore", message=".*inference or training") + + def _save_to_state_dict(self, destination, prefix, keep_vars): + """ + save weight and bias, + then fill state_dict with components of quant_state + """ + super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias + + if getattr(self.weight, "quant_state", None) is not None: + for k, v in self.weight.quant_state.as_dict(packed=True).items(): + destination[prefix + "weight." + k] = v if keep_vars else v.detach() + + def forward(self, x: torch.Tensor): + fix_4bit_weight_quant_state_from_module(self) + + # weights are cast automatically as Int8Params, but the bias has to be cast manually + if self.bias is not None and self.bias.dtype != x.dtype: + self.bias.data = self.bias.data.to(x.dtype) + + if not self.compute_type_is_set: + self.set_compute_type(x) + self.compute_type_is_set = True + + inp_dtype = x.dtype + if self.compute_dtype is not None: + x = x.to(self.compute_dtype) + + bias = None if self.bias is None else self.bias.to(self.compute_dtype) + + return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype) + + +class LinearFP4(Linear4bit): + """ + Implements the FP4 data type. + """ + + def __init__( + self, + input_features, + output_features, + bias=True, + compute_dtype=None, + compress_statistics=True, + quant_storage=torch.uint8, + device=None, + ): + """ + Args: + input_features (`str`): + Number of input features of the linear layer. + output_features (`str`): + Number of output features of the linear layer. + bias (`bool`, defaults to `True`): + Whether the linear class uses the bias term as well. + """ + super().__init__( + input_features, + output_features, + bias, + compute_dtype, + compress_statistics, + "fp4", + quant_storage, + device, + ) + + +class LinearNF4(Linear4bit): + """Implements the NF4 data type. + + Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that + is normalized into the range [-1, 1]. + + For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314) + + Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in + the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236. + """ + + def __init__( + self, + input_features, + output_features, + bias=True, + compute_dtype=None, + compress_statistics=True, + quant_storage=torch.uint8, + device=None, + ): + """ + Args: + input_features (`str`): + Number of input features of the linear layer. + output_features (`str`): + Number of output features of the linear layer. + bias (`bool`, defaults to `True`): + Whether the linear class uses the bias term as well. + """ + super().__init__( + input_features, + output_features, + bias, + compute_dtype, + compress_statistics, + "nf4", + quant_storage, + device, + ) + + +class Int8Params(torch.nn.Parameter): + def __new__( + cls, + data: Optional[torch.Tensor] = None, + requires_grad=True, + has_fp16_weights=False, + CB: Optional[torch.Tensor] = None, + SCB: Optional[torch.Tensor] = None, + ): + if data is None: + data = torch.empty(0) + obj = torch.Tensor._make_subclass(cls, data, requires_grad) + obj.CB = CB + obj.SCB = SCB + obj.has_fp16_weights = has_fp16_weights + return obj + + def cuda(self, device): + if self.has_fp16_weights: + return super().cuda(device) + else: + # We quantize the weight and store in 8bit row-major + B = self.data.contiguous().half().cuda(device) + CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B) + self.data = CB + self.CB = CB + self.SCB = SCB + + return self + + def __deepcopy__(self, memo): + # adjust this if new arguments are added to the constructor + new_instance = type(self).__new__( + type(self), + data=copy.deepcopy(self.data, memo), + requires_grad=self.requires_grad, + has_fp16_weights=self.has_fp16_weights, + CB=copy.deepcopy(self.CB, memo), + SCB=copy.deepcopy(self.SCB, memo), + ) + return new_instance + + @overload + def to( + self: T, + device: Optional[Union[int, device]] = ..., + dtype: Optional[Union[dtype, str]] = ..., + non_blocking: bool = ..., + ) -> T: ... + + @overload + def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ... + + @overload + def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ... + + def to(self, *args, **kwargs): + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) + + if device is not None and device.type == "cuda" and self.data.device.type == "cpu": + return self.cuda(device) + else: + new_param = Int8Params( + super().to(device=device, dtype=dtype, non_blocking=non_blocking), + requires_grad=self.requires_grad, + has_fp16_weights=self.has_fp16_weights, + ) + new_param.CB = self.CB + new_param.SCB = self.SCB + + return new_param + + +def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + weight = state_dict.get(f"{prefix}weight") + if weight is None: + # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing + return + weight_format = state_dict.pop(f"{prefix}weight_format", "row") + + if isinstance(weight_format, torch.Tensor): + weight_format = weight_format.item() + + # For new weights format storage type, we explicitly check + # if weights_format is on the mapping + if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING: + raise ValueError(f"Expected supported weight format - got {weight_format}") + elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING: + weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format] + + if weight_format != "row": + tile_indices = get_tile_inds(weight_format, weight.device) + state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices) + + +class Embedding8bit(nn.Embedding): + """ + This class implements [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm for embedding layer + + Quantization API is similar to Linear8bitLt: + ```python + import torch + import torch.nn as nn + + from bitsandbytes.nn import Embedding8bit + + fp16_module = nn.Embedding(128, 64) + int8_module = Embedding8bit(128, 64) + + int8_module.load_state_dict(fp16_module.state_dict()) + + int8_module = int8_module.to(0) # Quantization happens here + ``` + """ + + def __init__(self, num_embeddings, embedding_dim, device=None, dtype=None): + super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype) + self.dtype = self.weight.data.dtype + + self.weight = Int8Params(self.weight.data, has_fp16_weights=False, requires_grad=False) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + raise NotImplementedError("Saving Embedding8bit module is not implemented") + + def forward(self, input: Tensor) -> Tensor: + if not hasattr(self.weight, "SCB"): + raise RuntimeError("Embedding layer is not quantized. Please call .cuda() or .to(device) first.") + + rows = self.weight.data + row_stats = self.weight.SCB + + assert rows.shape == (self.num_embeddings, self.embedding_dim) + assert row_stats.shape == (self.num_embeddings,) + + compressed_output = F.embedding(input, rows) + compressed_output_stats = F.embedding(input, row_stats.view(self.num_embeddings, 1)) + + output = compressed_output * (compressed_output_stats / 127.0) + + return output.to(self.dtype) + + +class Embedding4bit(nn.Embedding): + """ + This is the base class similar to Linear4bit. It implements the 4-bit quantization algorithm presented in + [QLoRA](https://arxiv.org/abs/2305.14314) for embeddings. + + Quantization API is similar to Linear4bit: + ```python + import torch + import torch.nn as nn + + from bitsandbytes.nn import Embedding4bit + + fp16_module = nn.Embedding(128, 64) + quantized_module = Embedding4bit(128, 64) + + quantized_module.load_state_dict(fp16_module.state_dict()) + + quantized_module = quantized_module.to(0) # Quantization happens here + ``` + """ + + def __init__( + self, + num_embeddings, + embedding_dim, + dtype=None, + quant_type="fp4", + quant_storage=torch.uint8, + device=None, + ): + super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype) + self.dtype = self.weight.data.dtype + + self.weight = Params4bit( + self.weight.data, + requires_grad=False, + compress_statistics=None, + quant_type=quant_type, + quant_storage=quant_storage, + module=self, + ) + + blocksize = self.weight.blocksize + + if embedding_dim % blocksize != 0: + warnings.warn( + f"Embedding size {embedding_dim} is not divisible by block size {blocksize}. " + "This will lead to slow inference.", + ) + + def _forward_with_partial_dequantize(self, input: Tensor): + assert self.embedding_dim % self.weight.quant_state.blocksize == 0 + + w_4bit_uint8 = self.weight.data.view(torch.uint8).view(self.num_embeddings * self.embedding_dim // 2, 1) + + output_4bit = torch.nn.functional.embedding( + weight=w_4bit_uint8.view(self.num_embeddings, self.embedding_dim // 2), + input=input, + ).view(-1, 1) + assert output_4bit.shape == (input.numel() * self.embedding_dim // 2, 1) + + blocks_per_emb = self.embedding_dim // self.weight.blocksize + + absmax = self.weight.quant_state.absmax + assert absmax.shape == (self.num_embeddings * blocks_per_emb,) + + output_absmax = torch.nn.functional.embedding( + weight=absmax.view(self.num_embeddings, blocks_per_emb), + input=input, + ).view( + -1, + ) + assert output_absmax.shape == (input.numel() * blocks_per_emb,) + + output_quant_state = copy.deepcopy(self.weight.quant_state) + output_quant_state.absmax = output_absmax + output_quant_state.shape = torch.Size((*input.shape, self.embedding_dim)) + + output = bnb.functional.dequantize_4bit(output_4bit, output_quant_state) + assert output.shape == (*input.shape, self.embedding_dim) + + return output.to(self.dtype) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + raise NotImplementedError("Saving Embedding4bit module is not implemented") + + def forward(self, input: Tensor) -> Tensor: + fix_4bit_weight_quant_state_from_module(self) + + if self.embedding_dim % self.weight.quant_state.blocksize == 0: + return self._forward_with_partial_dequantize(input) + + dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state) + + return torch.nn.functional.embedding( + weight=dequantized_weight, + input=input, + ).to(self.dtype) + + +class EmbeddingFP4(Embedding4bit): + def __init__( + self, + num_embeddings, + embedding_dim, + dtype=None, + quant_storage=torch.uint8, + device=None, + ): + super().__init__( + num_embeddings, + embedding_dim, + dtype=dtype, + quant_type="fp4", + quant_storage=quant_storage, + device=device, + ) + + +class EmbeddingNF4(Embedding4bit): + def __init__( + self, + num_embeddings, + embedding_dim, + dtype=None, + quant_storage=torch.uint8, + device=None, + ): + super().__init__( + num_embeddings, + embedding_dim, + dtype=dtype, + quant_type="nf4", + quant_storage=quant_storage, + device=device, + ) + + +class Linear8bitLt(nn.Linear): + """ + This class is the base module for the [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm. + To read more about it, have a look at the paper. + + In order to quantize a linear layer one should first load the original fp16 / bf16 weights into + the Linear8bitLt module, then call `int8_module.to("cuda")` to quantize the fp16 weights. + + Example: + + ```python + import torch + import torch.nn as nn + + import bitsandbytes as bnb + from bnb.nn import Linear8bitLt + + fp16_model = nn.Sequential( + nn.Linear(64, 64), + nn.Linear(64, 64) + ) + + int8_model = nn.Sequential( + Linear8bitLt(64, 64, has_fp16_weights=False), + Linear8bitLt(64, 64, has_fp16_weights=False) + ) + + int8_model.load_state_dict(fp16_model.state_dict()) + int8_model = int8_model.to(0) # Quantization happens here + ``` + """ + + def __init__( + self, + input_features: int, + output_features: int, + bias=True, + has_fp16_weights=True, + threshold=0.0, + index=None, + device=None, + ): + """ + Initialize Linear8bitLt class. + + Args: + input_features (`int`): + Number of input features of the linear layer. + output_features (`int`): + Number of output features of the linear layer. + bias (`bool`, defaults to `True`): + Whether the linear class uses the bias term as well. + """ + super().__init__(input_features, output_features, bias, device) + self.state = bnb.MatmulLtState() + self.index = index + + self.state.threshold = threshold + self.state.has_fp16_weights = has_fp16_weights + + if threshold > 0.0 and not has_fp16_weights: + self.state.use_pool = True + + self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) + self._register_load_state_dict_pre_hook(maybe_rearrange_weight) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + + # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data + scb_name = "SCB" + + # case 1: .cuda was called, SCB is in self.weight + param_from_weight = getattr(self.weight, scb_name) + # case 2: self.init_8bit_state was called, SCB is in self.state + param_from_state = getattr(self.state, scb_name) + + key_name = prefix + f"{scb_name}" + + # We now only save in row-major. This format information is stored for backwards compatibility. + format_name = prefix + "weight_format" + + if not self.state.has_fp16_weights: + if param_from_weight is not None: + destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach() + destination[format_name] = torch.tensor(0, dtype=torch.uint8) + elif param_from_state is not None: + destination[key_name] = param_from_state if keep_vars else param_from_state.detach() + destination[format_name] = torch.tensor(0, dtype=torch.uint8) + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + unexpected_copy = list(unexpected_keys) + + for key in unexpected_copy: + input_name = key[len(prefix) :] + if input_name == "SCB": + if self.weight.SCB is None: + # buffers not yet initialized, can't access them directly without quantizing first + raise RuntimeError( + "Loading a quantized checkpoint into non-quantized Linear8bitLt is " + "not supported. Please call module.cuda() before module.load_state_dict()", + ) + + input_param = state_dict[key] + self.weight.SCB.copy_(input_param) + + if self.state.SCB is not None: + self.state.SCB = self.weight.SCB + + unexpected_keys.remove(key) + + def init_8bit_state(self): + self.state.CB = self.weight.CB + self.state.SCB = self.weight.SCB + self.weight.CB = None + self.weight.SCB = None + + def forward(self, x: torch.Tensor): + self.state.is_training = self.training + if self.weight.CB is not None: + self.init_8bit_state() + + # weights are cast automatically as Int8Params, but the bias has to be cast manually + if self.bias is not None and self.bias.dtype != x.dtype: + self.bias.data = self.bias.data.to(x.dtype) + + out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) + + if not self.state.has_fp16_weights and self.state.CB is not None: + self.weight.data = self.state.CB + + return out + + +class OutlierAwareLinear(nn.Linear): + def __init__(self, input_features, output_features, bias=True, device=None): + super().__init__(input_features, output_features, bias, device) + self.outlier_dim = None + self.is_quantized = False + + def forward_with_outliers(self, x, outlier_idx): + raise NotImplementedError("Please override the `forward_with_outliers(self, x, outlier_idx)` function") + + def quantize_weight(self, w, outlier_idx): + raise NotImplementedError("Please override the `quantize_weights(self, w, outlier_idx)` function") + + def forward(self, x): + if self.outlier_dim is None: + tracer = OutlierTracer.get_instance() + if not tracer.is_initialized(): + print("Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer") + outlier_idx = tracer.get_outliers(self.weight) + # print(outlier_idx, tracer.get_hvalue(self.weight)) + self.outlier_dim = outlier_idx + + if not self.is_quantized: + w = self.quantize_weight(self.weight, self.outlier_dim) + self.weight.data.copy_(w) + self.is_quantized = True + + +class SwitchBackLinearBnb(nn.Linear): + def __init__( + self, + input_features, + output_features, + bias=True, + has_fp16_weights=True, + memory_efficient_backward=False, + threshold=0.0, + index=None, + device=None, + ): + super().__init__(input_features, output_features, bias, device) + self.state = bnb.MatmulLtState() + self.index = index + + self.state.threshold = threshold + self.state.has_fp16_weights = has_fp16_weights + self.state.memory_efficient_backward = memory_efficient_backward + if threshold > 0.0 and not has_fp16_weights: + self.state.use_pool = True + + self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) + + def init_8bit_state(self): + self.state.CB = self.weight.CB + self.state.SCB = self.weight.SCB + self.weight.CB = None + self.weight.SCB = None + + def forward(self, x): + self.state.is_training = self.training + + if self.weight.CB is not None: + self.init_8bit_state() + + out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..aa849494266596317232256f90f118a4614d02ca --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py @@ -0,0 +1,264 @@ +from functools import partial + +import torch +import torch.nn as nn + +from bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise +from bitsandbytes.triton.int8_matmul_mixed_dequantize import ( + int8_matmul_mixed_dequantize, +) +from bitsandbytes.triton.int8_matmul_rowwise_dequantize import ( + int8_matmul_rowwise_dequantize, +) +from bitsandbytes.triton.quantize_columnwise_and_transpose import ( + quantize_columnwise_and_transpose, +) +from bitsandbytes.triton.quantize_global import ( + quantize_global, + quantize_global_transpose, +) +from bitsandbytes.triton.quantize_rowwise import quantize_rowwise +from bitsandbytes.triton.triton_utils import is_triton_available + + +class _switchback_global(torch.autograd.Function): + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + + # rowwise quantize for X, global quantize for W + X_int8, state_X = quantize_rowwise(X) + W_int8, state_W = quantize_global(W) + + # save for backward. + ctx.save_for_backward = X, W + + # matmult, fused dequant and add bias + # call "mixed" because we are mixing rowwise quantized and global quantized + return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + # reshape input to [N_out * L, D] + G = G_3D.reshape(-1, G_3D.size(-1)) + + grad_X = grad_W = grad_bias = None + + X, W = ctx.save_for_backward + if ctx.needs_input_grad[0]: + # rowwise quantize for G, global quantize for W + # for W, we also fuse the transpose operation because only A @ B^T is supported + # so we transpose once then call .t() in the matmul + G_int8, state_G = quantize_rowwise(G) + W_int8, state_W = quantize_global_transpose(W) + grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view( + *G_3D.size()[:-1], + -1, + ) + if ctx.needs_input_grad[1]: + # backward pass uses standard weight grad + grad_W = torch.matmul(G.t(), X.to(G.dtype)) + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + + return grad_X, grad_W, grad_bias + + +class _switchback_vectorrize(torch.autograd.Function): + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + + ctx.save_for_backward = X, W + # rowwise quantize for X + # columnwise quantize for W (first rowwise, transpose later) + X_int8, state_X = quantize_rowwise(X) + W_int8, state_W = quantize_rowwise(W) + + # matmult, fused dequant and add bias + # call kernel which expects rowwise quantized X and W + return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + X, W = ctx.save_for_backward + + G = G_3D.reshape(-1, G_3D.size(-1)) + + grad_X = grad_W = grad_bias = None + + if ctx.needs_input_grad[0]: + # rowwise quantize for G, columnwise quantize for W and fused transpose + # we call .t() for weight later because only A @ B^T is supported + G_int8, state_G = quantize_rowwise(G) + W_int8, state_W = quantize_columnwise_and_transpose(W) + grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view( + *G_3D.size()[:-1], + -1, + ) + if ctx.needs_input_grad[1]: + # backward pass uses standard weight grad + grad_W = torch.matmul(G.t(), X.to(G.dtype)) + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + + return grad_X, grad_W, grad_bias + + +class _switchback_global_mem_efficient(torch.autograd.Function): + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + X_3D_sz = X_3D.size() + + # rowwise quantize for X, global quantize for W + X_int8, state_X = quantize_rowwise(X) + del X + W_int8, state_W = quantize_global(W) + + # save for backward. + ctx.save_for_backward = X_int8, state_X, W_int8, state_W + + # matmult, fused dequant and add bias + # call "mixed" because we are mixing rowwise quantized and global quantized + return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + # reshape input to [N_out * L, D] + G = G_3D.reshape(-1, G_3D.size(-1)) + G_3D_sz = G_3D.size() + + grad_X = grad_W = grad_bias = None + + X_int8, state_X, W_int8, state_W = ctx.save_for_backward + if ctx.needs_input_grad[1]: + real_X = dequantize_rowwise(X_int8, state_X) + del X_int8 + grad_W = torch.matmul(G.t(), real_X.to(G.dtype)) + del real_X + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + if ctx.needs_input_grad[0]: + G_int8, state_G = quantize_rowwise(G) + del G + W_int8 = W_int8.t().contiguous() + grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1) + + return grad_X, grad_W, grad_bias + + +class SwitchBackLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device=None, + dtype=None, + vector_wise_quantization: bool = False, + mem_efficient: bool = False, + ): + super().__init__(in_features, out_features, bias, device, dtype) + + if not is_triton_available(): + raise ImportError("""Could not import triton. Please install triton to use SwitchBackLinear. + Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower""") + + # By default, we use the global quantization. + self.vector_wise_quantization = vector_wise_quantization + if self.vector_wise_quantization: + self._fn = _switchback_vectorrize + if mem_efficient: + print("mem efficient is not supported for vector-wise quantization.") + exit(1) + else: + if mem_efficient: + self._fn = _switchback_global_mem_efficient + else: + self._fn = _switchback_global + + def prepare_for_eval(self): + # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass. + # Note this is experimental and not tested thoroughly. + # Note this needs to be explicitly called with something like + # def cond_prepare(m): + # if hasattr(m, "prepare_for_eval"): + # m.prepare_for_eval() + # model.apply(cond_prepare) + print("=> preparing for eval.") + if self.vector_wise_quantization: + W_int8, state_W = quantize_rowwise(self.weight) + else: + W_int8, state_W = quantize_global(self.weight) + + self.register_buffer("W_int8", W_int8) + self.register_buffer("state_W", state_W) + + del self.weight + + def forward(self, x): + if self.training: + return self._fn.apply(x, self.weight, self.bias) + else: + # If it hasn't been "prepared for eval", run the standard forward pass. + if not hasattr(self, "W_int8"): + return self._fn.apply(x, self.weight, self.bias) + + # Otherwise, use pre-computed weights. + X = x.view(-1, x.size(-1)) + X_int8, state_X = quantize_rowwise(X) + + if self.vector_wise_quantization: + return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view( + *x.size()[:-1], + -1, + ) + else: + return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view( + *x.size()[:-1], + -1, + ) + + +SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False) +SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True) +SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True) + + +# This is just the standard linear function. +class StandardLinearFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias=None): + X = input.view(-1, input.size(-1)) + + ctx.save_for_backward(X, weight, bias) + output = input.matmul(weight.t()) + if bias is not None: + output += bias.unsqueeze(0).expand_as(output) + return output.view(*input.size()[:-1], -1) + + @staticmethod + def backward(ctx, grad_output_3D): + input, weight, bias = ctx.saved_tensors + + grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1)) + + grad_input = grad_weight = grad_bias = None + + if ctx.needs_input_grad[0]: + grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1) + if ctx.needs_input_grad[1]: + grad_weight = grad_output.t().matmul(input.to(grad_output.dtype)) + if bias is not None and ctx.needs_input_grad[2]: + grad_bias = grad_output.sum(0) + + return grad_input, grad_weight, grad_bias + + +class StandardLinear(nn.Linear): + def forward(self, x): + return StandardLinearFunction.apply(x, self.weight, self.bias) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10f85348ca3e2cab5e47b0c832b7e42c7f524c8f Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0927d12305d83cdf0aa38184c6f38ccf6ff2b1a Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..31db4f282d6eff1c60d1b5696a9dc035d232b82b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__init__.py @@ -0,0 +1,6 @@ +from . import nn +from .autograd._functions import ( + matmul_fp8_global, + matmul_fp8_mixed, + switchback_bnb, +) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f886190cc465c010d4433cb09dc3fbd627db3d66 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c437036b6473eebceb20e6fdcdd60e700f6d31ce Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..333c855ee808df41812a4fd0777fbe4d91320f67 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..d9718382ba17e15e45e651d58c5686d60696ae96 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py @@ -0,0 +1,396 @@ +from functools import reduce # Required in Python 3 +import operator +from typing import Optional +import warnings + +import torch + +from bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState +import bitsandbytes.functional as F + + +# math.prod not compatible with python < 3.8 +def prod(iterable): + return reduce(operator.mul, iterable, 1) + + +class MatMulFP8Mixed(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + + @staticmethod + def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024): + # default of pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + + B_shape = B.shape + if A.shape[-1] == B_shape[0]: + return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device) + + # 1. Dequantize + # 2. MatmulnN + cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz) + fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype) + + cB, state = F.quantize(B.float(), code=fw_code) + fp8B = F.dequantize(cB, state).to(B.dtype) + + output = torch.matmul(fp8A, fp8B) + + # output is half + + # 3. Save state + ctx.fw_code = fw_code + ctx.bw_code = bw_code + ctx.bsz = bsz + ctx.bsz2 = bsz2 + ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype + + if any(ctx.needs_input_grad[:2]): + # NOTE: we send back A, and re-quant. + ctx.tensors = (A, fp8B) + else: + ctx.tensors = (None, None) + + return output + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None + + req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad + A, B = ctx.tensors + + grad_A, grad_B = None, None + + # TODO: Fix blocksize to be output_dim + cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2) + fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype) + + # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code) + # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype) + + # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() + # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector') + # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose + # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2]) + + # not supported by PyTorch. TODO: create work-around + if req_gradA: + grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype) + + if req_gradB: + if len(A.shape) == 3: + At = A.transpose(2, 1).contiguous() + else: + At = A.transpose(1, 0).contiguous() + # cA, state = F.quantize(At.float(), code=ctx.fw_code) + # fp8At = F.dequantize(cA, state).to(A.dtype) + grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype) + + return grad_A, grad_B, None, None, None, None, None + + +class MatMulFP8Global(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + + @staticmethod + def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024): + # default of pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + + B_shape = B.shape + if A.shape[-1] == B_shape[0]: + return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device) + + # 1. Dequantize + # 2. MatmulnN + cA, state = F.quantize(A.float(), code=fw_code) + fp8A = F.dequantize(cA, state).to(A.dtype) + + cB, state = F.quantize(B.float(), code=fw_code) + fp8B = F.dequantize(cB, state).to(B.dtype) + + output = torch.matmul(fp8A, fp8B) + + # output is half + + # 3. Save state + ctx.fw_code = fw_code + ctx.bw_code = bw_code + ctx.bsz = bsz + ctx.bsz2 = bsz2 + ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype + + if any(ctx.needs_input_grad[:2]): + # NOTE: we send back A, and re-quant. + ctx.tensors = (A, fp8B) + else: + ctx.tensors = (None, None) + + return output + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None + + req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad + A, B = ctx.tensors + + grad_A, grad_B = None, None + + # TODO: Fix blocksize to be output_dim + cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code) + fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype) + + # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code) + # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype) + + # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() + # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector') + # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose + # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2]) + + # not supported by PyTorch. TODO: create work-around + if req_gradA: + grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype) + + if req_gradB: + if len(A.shape) == 3: + At = A.transpose(2, 1).contiguous() + else: + At = A.transpose(1, 0).contiguous() + cA, state = F.quantize(At.float(), code=ctx.fw_code) + fp8At = F.dequantize(cA, state).to(A.dtype) + grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype) + + return grad_A, grad_B, None, None, None, None, None + + +class SwitchBackBnb(torch.autograd.Function): + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None): + state = state or MatmulLtState() + + # default to pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + ctx.bias = bias + if A.shape[-1] == B.shape[0]: + return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device) + + # 1. Quantize A + # 2. Quantize B + # 3. Matmul + # 4. Mixed-precision decomposition matmul + # 5. Save state + input_shape = A.shape + if state.outlier_pool is None: + state.outlier_pool = GlobalOutlierPooler.get_instance() + + # Cast A to fp16 + if A.dtype != torch.float16: + warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") + + # 1. Quantize A + if len(A.shape) == 3: + A = A.view(-1, A.shape[-1]).contiguous() + CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold) + + if state.threshold > 0.0 and outlier_cols is not None: + if state.has_fp16_weights: + idx = outlier_cols + CA[:, idx] = 0 + subA = A[:, idx] + state.subB = B[:, idx].t().contiguous() + state.idx = idx + else: + if state.SB is None: + state.SB = (state.CB.shape, "row") + else: + if not state.has_fp16_weights and state.SB is None: + state.SB = (state.CB.shape, "row") + subA = None + + # 2. Quantize B + if state.has_fp16_weights: + # print('B shape', B.shape) + has_grad = True if (getattr(B, "grad", None) is not None) else False + is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1) + if is_transposed: + B = B.contiguous() + + if (state.is_training and not has_grad) or state.SB is None: + state.reset_grads() + ( + state.CB, + state.CBt, + state.SCB, + state.SCBt, + _, + ) = F.int8_double_quant(B.to(torch.float16)) + state.SB = (state.CB.shape, "row") + else: + has_grad = False + + if outlier_cols is not None and not state.has_fp16_weights: + # extract outliers + state.idx = outlier_cols + outliers = state.CB[:, state.idx.long()].clone() + state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype) + CA[:, state.idx.long()] = 0 + + subA = A[:, state.idx.long()] + + shapeB = state.SB[0] + + if len(input_shape) == 3: + output_shape = (input_shape[0], input_shape[1], shapeB[0]) + else: + output_shape = (input_shape[0], shapeB[0]) + + # 3. Matmul + out32 = F.int8_linear_matmul(CA, state.CB) + # we apply the fused bias here + + if bias is None or bias.dtype == torch.float16: + output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=bias).to(A.dtype) + else: # apply bias separately + output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=None).to(A.dtype) + output.add_(bias) + + # 4. Mixed-precision decomposition matmul + if outlier_cols is not None and subA is not None: + output += torch.matmul(subA, state.subB) + + # 5. Save state + ctx.state = state + + ctx.grad_shape = input_shape + ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype + + if any(ctx.needs_input_grad[:2]): + ctx.tensors = (CAt, subA, A) + ctx.tensor_states = (SCAt, state.idx) + else: + ctx.tensors = [None, None, None] + ctx.tensor_states = (None, None) + ctx.save_for_backward(None, None) + + clone_func = torch.clone if len(output_shape) == 3 else lambda x: x + return clone_func(output.view(output_shape)) + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias) + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None + + req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad + CAt, subA, A = ctx.tensors + SCAt, idx = ctx.tensor_states + state = ctx.state + grad_A = grad_B = grad_bias = None + + if req_gradBias: + # compute grad_bias first before changing grad_output dtype + grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias) + + # Cast grad_output to fp16 + if len(grad_output.shape) == 3: + grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() + + Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16)) + + if req_gradB: + # print('back A shape', A.shape) + # print('grad output t shape', grad_output.t().shape) + grad_B = torch.matmul(grad_output.t(), A) + + if req_gradA: + if state.CB is not None: + CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0)) + grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A) + else: + raise Exception("State must contain either CBt or CB matrix for backward") + + return grad_A, grad_B, None, grad_bias, None + + +def get_block_sizes(input_matrix, weight_matrix): + input_features = input_matrix.shape[-1] + output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1] + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + bsz, bsz2 = 1024, 1024 + for i, k in enumerate(array): + if input_features > array[i + 1]: + bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + bsz2 = k + break + + return bsz, bsz2 + + +def matmul_fp8_global( + A: torch.Tensor, + B: torch.Tensor, + fw_code: torch.Tensor, + bw_code: torch.Tensor, + out: Optional[torch.Tensor] = None, + bsz: int = -1, + bsz2: int = -1, +): + if bsz == -1 or bsz2 == -1: + bsz, bsz2 = get_block_sizes(A, B) + return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2) + + +def matmul_fp8_mixed( + A: torch.Tensor, + B: torch.Tensor, + fw_code: torch.Tensor, + bw_code: torch.Tensor, + out: Optional[torch.Tensor] = None, + bsz: int = -1, + bsz2: int = -1, +): + if bsz == -1 or bsz2 == -1: + bsz, bsz2 = get_block_sizes(A, B) + return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2) + + +def switchback_bnb( + A: torch.Tensor, + B: torch.Tensor, + out: Optional[torch.Tensor] = None, + state: Optional[MatmulLtState] = None, + threshold=0.0, + bias=None, +): + state = state or MatmulLtState() + if threshold > 0.0: + state.threshold = threshold + return SwitchBackBnb.apply(A, B, out, bias, state) diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4170112184deb9958d9846230a0e9a664c3f652b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py @@ -0,0 +1 @@ +from .modules import LinearFP8Global, LinearFP8Mixed diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6275d6e6c6e377254003fe150c6d9a0e002ff81 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27125ad0125d8da2653cf33ac3d0c1b11472fabb Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..57c0f33584a48b55158bdb03e085b131f1805a08 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py @@ -0,0 +1,76 @@ +from typing import TypeVar + +import torch +from torch import nn + +import bitsandbytes as bnb + +T = TypeVar("T", bound="torch.nn.Module") + + +class LinearFP8Mixed(nn.Linear): + def __init__(self, input_features, output_features, bias=True): + super().__init__(input_features, output_features, bias) + self.bw_code = None + self.fw_code = None + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + for i, k in enumerate(array): + if input_features > array[i + 1]: + self.bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + self.bsz2 = k + break + + def forward(self, x: torch.Tensor): + if self.fw_code is None: + self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device) + self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device) + + out = bnb.research.matmul_fp8_mixed( + x, + self.weight.t(), + fw_code=self.fw_code, + bw_code=self.bw_code, + bsz=self.bsz, + bsz2=self.bsz2, + ) + if self.bias is not None: + out += self.bias + + return out + + +class LinearFP8Global(nn.Linear): + def __init__(self, input_features, output_features, bias=True): + super().__init__(input_features, output_features, bias) + self.bw_code = None + self.fw_code = None + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + for i, k in enumerate(array): + if input_features > array[i + 1]: + self.bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + self.bsz2 = k + break + + def forward(self, x: torch.Tensor): + if self.fw_code is None: + self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device) + self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device) + + out = bnb.matmul_fp8_global( + x, + self.weight.t(), + fw_code=self.fw_code, + bw_code=self.bw_code, + bsz=self.bsz, + bsz2=self.bsz2, + ) + if self.bias is not None: + out += self.bias + + return out diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..203e3c365f1ca63199d4d791d65d3a5b478ec54e Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfa2385f692af4a6dcbddfbb7f1e361d0067fa14 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e745be59daf7e90bc0e0877bea37e9789e3f8ba Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a15e716a18af3a56dee4034824a4c29c9134cfd6 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ac506ae4fe03bac882e8d62e33edaf344bc2bd Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe33d9841a465aa852627c8e8b6dcfbc757cc7a3 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..317bb64729d9e31849b779419550711d2a292e64 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc1a1f35aeb8d1319d9e2dc3c82d164ca4b74b2 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py new file mode 100644 index 0000000000000000000000000000000000000000..26eab84f210519a9e3b1066e3aca83930d8722a8 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py @@ -0,0 +1,64 @@ +import math + +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor): + return None +else: + import triton + import triton.language as tl + + # rowwise quantize + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=["n_elements"], + ) + @triton.jit + def _dequantize_rowwise( + x_ptr, + state_x, + output_ptr, + inv_127, + n_elements, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + arange = tl.arange(0, P2) + offsets = block_start + arange + row_mask = arange < BLOCK_SIZE + x = tl.load(x_ptr + offsets, mask=row_mask) + max_val = tl.load(state_x + pid) + output = max_val * x * inv_127 + tl.store(output_ptr + offsets, output, mask=row_mask) + + def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor): + output = torch.empty(*x.shape, device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(x.shape[1])))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (x.shape[0],) + _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2) + return output diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py new file mode 100644 index 0000000000000000000000000000000000000000..583371d915c5a30879f41475f5ac8558a8a9040d --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py @@ -0,0 +1,205 @@ +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias): + return None +else: + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # This is a matmul kernel based on triton.ops.matmul + # It is modified to support rowwise quantized input and global quantized weight + # It's purpose is fused matmul then dequantize + # It does support bias. + + def init_to_zero(name): + return lambda nargs: nargs[name].zero_() + + def get_configs_io_bound(): + configs = [] + for num_stages in [2, 3, 4, 5, 6]: + for block_m in [16, 32]: + for block_k in [32, 64]: + for block_n in [32, 64, 128, 256]: + num_warps = 2 if block_n <= 64 else 4 + configs.append( + triton.Config( + {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1}, + num_stages=num_stages, + num_warps=num_warps, + ), + ) + # split_k + for split_k in [2, 4, 8, 16]: + configs.append( + triton.Config( + {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k}, + num_stages=num_stages, + num_warps=num_warps, + pre_hook=init_to_zero("C"), + ), + ) + return configs + + @triton.autotune( + configs=[ + # basic configs for compute-bound matmuls + triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2), + # good for int8 + triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2), + *get_configs_io_bound(), + ], + key=["M", "N", "K"], + prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10}, + ) + @triton.heuristics( + { + "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0, + }, + ) + @triton.jit + def _int8_matmul_mixed_dequantize( + A, + B, + C, + bias, + state_x_ptr, + state_w_ptr, + M, + N, + K, + divfactor: tl.constexpr, + has_bias: tl.constexpr, + stride_am, + stride_ak, + stride_bk, + stride_bn, + stride_cm, + stride_cn, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_K: tl.constexpr, + GROUP_M: tl.constexpr, + SPLIT_K: tl.constexpr, + EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr, + ): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + + w_factor = tl.load(state_w_ptr) + x_factor = tl.load(state_x_ptr + ram)[:, None] + + # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32) + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0) + acc += tl.dot(a, b) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + + acc = w_factor * (x_factor * (acc * divfactor)) + acc = acc.to(C.dtype.element_ty) + + # conditionally add bias + if has_bias: + bias = tl.load(bias + rn).to(C.dtype.element_ty) + acc = acc + bias[None, :] + + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias): + device = a.device + divfactor = 1.0 / (127.0 * 127.0) + has_bias = 0 if bias is None else 1 + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + # allocates output + c = torch.empty((M, N), device=device, dtype=torch.float16) + # accumulator types + ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32 + # launch int8_matmul_mixed_dequantize kernel + grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"]) + _int8_matmul_mixed_dequantize[grid]( + a, + b, + c, + bias, + state_x, + state_w, + M, + N, + K, + divfactor, + has_bias, + a.stride(0), + a.stride(1), + b.stride(0), + b.stride(1), + c.stride(0), + c.stride(1), + GROUP_M=8, + ACC_TYPE=ACC_TYPE, + ) + return c diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py new file mode 100644 index 0000000000000000000000000000000000000000..e3d192dedd9ab62e7689cbb99fb55ba1bb075843 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py @@ -0,0 +1,206 @@ +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias): + return None +else: + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # This is a matmul kernel based on triton.ops.matmul + # It is modified to support rowwise quantized input and columnwise quantized weight + # It's purpose is fused matmul then dequantize + # It does support bias. + + def init_to_zero(name): + return lambda nargs: nargs[name].zero_() + + def get_configs_io_bound(): + configs = [] + for num_stages in [2, 3, 4, 5, 6]: + for block_m in [16, 32]: + for block_k in [32, 64]: + for block_n in [32, 64, 128, 256]: + num_warps = 2 if block_n <= 64 else 4 + configs.append( + triton.Config( + {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1}, + num_stages=num_stages, + num_warps=num_warps, + ), + ) + # split_k + for split_k in [2, 4, 8, 16]: + configs.append( + triton.Config( + {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k}, + num_stages=num_stages, + num_warps=num_warps, + pre_hook=init_to_zero("C"), + ), + ) + return configs + + @triton.autotune( + configs=[ + # basic configs for compute-bound matmuls + triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2), + # good for int8 + triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2), + *get_configs_io_bound(), + ], + key=["M", "N", "K"], + prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10}, + ) + @triton.heuristics( + { + "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0, + }, + ) + @triton.jit + def _int8_matmul_rowwise_dequantize( + A, + B, + C, + bias, + state_x_ptr, + state_w_ptr, + M, + N, + K, + divfactor, + has_bias: tl.constexpr, + stride_am, + stride_ak, + stride_bk, + stride_bn, + stride_cm, + stride_cn, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_K: tl.constexpr, + GROUP_M: tl.constexpr, + SPLIT_K: tl.constexpr, + EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr, + ): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + + w_factor = tl.load(state_w_ptr + rbn)[None, :] + x_factor = tl.load(state_x_ptr + ram)[:, None] + + # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32) + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0) + acc += tl.dot(a, b) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + + acc = w_factor * (x_factor * (acc * divfactor)) + acc = acc.to(C.dtype.element_ty) + + if has_bias: + bias = tl.load(bias + rn).to(C.dtype.element_ty) + acc = acc + bias[None, :] + + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias): + divfactor = 1.0 / (127.0 * 127.0) + + has_bias = 0 if bias is None else 1 + + device = a.device + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + # allocates output + c = torch.empty((M, N), device=device, dtype=torch.float16) + # accumulator types + ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32 + # launch int8_matmul_rowwise_dequantize kernel + grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"]) + _int8_matmul_rowwise_dequantize[grid]( + a, + b, + c, + bias, + state_x, + state_w, + M, + N, + K, + divfactor, + has_bias, + a.stride(0), + a.stride(1), + b.stride(0), + b.stride(1), + c.stride(0), + c.stride(1), + GROUP_M=8, + ACC_TYPE=ACC_TYPE, + ) + return c diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..b8eeffd0c6a67502b30e2a213845a9762f2106ee --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py @@ -0,0 +1,75 @@ +import math + +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def quantize_columnwise_and_transpose(x: torch.Tensor): + return None +else: + import triton + import triton.language as tl + + # This kernel does fused columnwise quantization and transpose. + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_stages=16), + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=16, num_warps=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=["n_elements"], + ) + @triton.jit + def _quantize_columnwise_and_transpose( + x_ptr, + output_ptr, + output_maxs, + n_elements, + M: tl.constexpr, + N: tl.constexpr, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid + p2_arange = tl.arange(0, P2) + p2_arange_mask = p2_arange < M + arange = p2_arange * N + offsets = block_start + arange + x = tl.load(x_ptr + offsets, mask=p2_arange_mask) + abs_x = tl.abs(x) + max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0) + output = tl.libdevice.llrint(127.0 * (x / max_val)) + + new_start = pid * M + new_offsets = new_start + p2_arange + tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask) + tl.store(output_maxs + pid, max_val) + + def quantize_columnwise_and_transpose(x: torch.Tensor): + M, N = x.shape + output = torch.empty(N, M, device=x.device, dtype=torch.int8) + output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(M)))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2) + return output, output_maxs diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py new file mode 100644 index 0000000000000000000000000000000000000000..f35bdd3040c575ea5cf171b2c6342c61c9740f12 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py @@ -0,0 +1,124 @@ +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def quantize_global_transpose(input): + return None + + def quantize_global(x: torch.Tensor): + return None +else: + import triton + import triton.language as tl + + # global quantize + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 1024}, num_warps=4), + triton.Config({"BLOCK_SIZE": 2048}, num_stages=1), + ], + key=["n_elements"], + ) + @triton.jit + def _quantize_global( + x_ptr, + absmax_inv_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(x_ptr + offsets, mask=mask) + absmax_inv = tl.load(absmax_inv_ptr) + output = tl.libdevice.llrint(127.0 * (x * absmax_inv)) + tl.store(output_ptr + offsets, output, mask=mask) + + def quantize_global(x: torch.Tensor): + absmax = x.abs().max().unsqueeze(0) + absmax_inv = 1.0 / absmax + output = torch.empty(*x.shape, device="cuda", dtype=torch.int8) + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + _quantize_global[grid](x, absmax_inv, output, n_elements) + return output, absmax + + # global quantize and transpose + @triton.autotune( + configs=[ + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4), + triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4), + # ... + ], + key=["M", "N"], + ) + @triton.jit + def _quantize_global_transpose( + A, + absmax_inv_ptr, + B, + stride_am, + stride_an, + stride_bn, + stride_bm, + M, + N, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + GROUP_M: tl.constexpr, + ): + pid = tl.program_id(0) + grid_m = (M + BLOCK_M - 1) // BLOCK_M + grid_n = (N + BLOCK_N - 1) // BLOCK_N + + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // group_size + + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an) + mask = (rm < M)[:, None] & (rn < N)[None, :] + a = tl.load(A, mask=mask) + absmax_inv = tl.load(absmax_inv_ptr) + + # rematerialize to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + + output = tl.libdevice.llrint(127.0 * (a * absmax_inv)) + + tl.store(B, output, mask=mask) + + def quantize_global_transpose(input): + absmax = input.abs().max().unsqueeze(0) + absmax_inv = 1.0 / absmax + M, N = input.shape + out = torch.empty(N, M, device="cuda", dtype=torch.int8) + + assert out.size(0) == N and out.size(1) == M + assert input.stride(0) == 1 or input.stride(1) == 1 + assert out.stride(0) == 1 or out.stride(1) == 1 + + grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) + _quantize_global_transpose[grid]( + input, + absmax_inv, + out, + input.stride(0), + input.stride(1), + out.stride(0), + out.stride(1), + M, + N, + ) + return out, absmax diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py new file mode 100644 index 0000000000000000000000000000000000000000..f92ace02c2d4c7c452eb1e935f13e8e0098c88e3 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py @@ -0,0 +1,67 @@ +import math + +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + + def quantize_rowwise(x: torch.Tensor): + return None +else: + import triton + import triton.language as tl + + # rowwise quantize + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=["n_elements"], + ) + @triton.jit + def _quantize_rowwise( + x_ptr, + output_ptr, + output_maxs, + n_elements, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + arange = tl.arange(0, P2) + offsets = block_start + arange + row_mask = arange < BLOCK_SIZE + x = tl.load(x_ptr + offsets, mask=row_mask) + + abs_x = tl.abs(x) + max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0) + output = tl.libdevice.llrint(127.0 * (x / max_val)) + tl.store(output_ptr + offsets, output, mask=row_mask) + tl.store(output_maxs + pid, max_val) + + def quantize_rowwise(x: torch.Tensor): + output = torch.empty(*x.shape, device=x.device, dtype=torch.int8) + output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(x.shape[1])))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (x.shape[0],) + _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2) + return output, output_maxs diff --git a/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6bbdbf1c1f907448f5e4984980e2be660a901545 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py @@ -0,0 +1,5 @@ +import importlib + + +def is_triton_available(): + return importlib.util.find_spec("triton") is not None diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..726c76cb2449bf6d0c16df95a92a2395d65d727e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__init__.py @@ -0,0 +1,326 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = """ +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :meth:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of :class:`'+'`, +:class:`'|'`, :class:`'^'` and :class:`'&'` operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parse_string` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.set_results_name` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`DelimitedList` + and :class:`one_of` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" +from typing import NamedTuple + + +class version_info(NamedTuple): + major: int + minor: int + micro: int + releaselevel: str + serial: int + + @property + def __version__(self): + return ( + f"{self.major}.{self.minor}.{self.micro}" + + ( + f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}", + "", + )[self.releaselevel == "final"] + ) + + def __str__(self): + return f"{__name__} {self.__version__} / {__version_time__}" + + def __repr__(self): + return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})" + + +__version_info__ = version_info(3, 2, 1, "final", 1) +__version_time__ = "31 Dec 2024 20:41 UTC" +__version__ = __version_info__.__version__ +__versionTime__ = __version_time__ +__author__ = "Paul McGuire " + +from .util import * +from .exceptions import * +from .actions import * +from .core import __diag__, __compat__ +from .results import * +from .core import * +from .core import _builtin_exprs as core_builtin_exprs +from .helpers import * +from .helpers import _builtin_exprs as helper_builtin_exprs + +from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode +from .testing import pyparsing_test as testing +from .common import ( + pyparsing_common as common, + _builtin_exprs as common_builtin_exprs, +) + +# Compatibility synonyms +if "pyparsing_unicode" not in globals(): + pyparsing_unicode = unicode # type: ignore[misc] +if "pyparsing_common" not in globals(): + pyparsing_common = common +if "pyparsing_test" not in globals(): + pyparsing_test = testing + +core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs + + +__all__ = [ + "__version__", + "__version_time__", + "__author__", + "__compat__", + "__diag__", + "And", + "AtLineStart", + "AtStringStart", + "CaselessKeyword", + "CaselessLiteral", + "CharsNotIn", + "CloseMatch", + "Combine", + "DelimitedList", + "Dict", + "Each", + "Empty", + "FollowedBy", + "Forward", + "GoToColumn", + "Group", + "IndentedBlock", + "Keyword", + "LineEnd", + "LineStart", + "Literal", + "Located", + "PrecededBy", + "MatchFirst", + "NoMatch", + "NotAny", + "OneOrMore", + "OnlyOnce", + "OpAssoc", + "Opt", + "Optional", + "Or", + "ParseBaseException", + "ParseElementEnhance", + "ParseException", + "ParseExpression", + "ParseFatalException", + "ParseResults", + "ParseSyntaxException", + "ParserElement", + "PositionToken", + "QuotedString", + "RecursiveGrammarException", + "Regex", + "SkipTo", + "StringEnd", + "StringStart", + "Suppress", + "Tag", + "Token", + "TokenConverter", + "White", + "Word", + "WordEnd", + "WordStart", + "ZeroOrMore", + "Char", + "alphanums", + "alphas", + "alphas8bit", + "any_close_tag", + "any_open_tag", + "autoname_elements", + "c_style_comment", + "col", + "common_html_entity", + "condition_as_parse_action", + "counted_array", + "cpp_style_comment", + "dbl_quoted_string", + "dbl_slash_comment", + "delimited_list", + "dict_of", + "empty", + "hexnums", + "html_comment", + "identchars", + "identbodychars", + "infix_notation", + "java_style_comment", + "line", + "line_end", + "line_start", + "lineno", + "make_html_tags", + "make_xml_tags", + "match_only_at_col", + "match_previous_expr", + "match_previous_literal", + "nested_expr", + "null_debug_action", + "nums", + "one_of", + "original_text_for", + "printables", + "punc8bit", + "pyparsing_common", + "pyparsing_test", + "pyparsing_unicode", + "python_style_comment", + "quoted_string", + "remove_quotes", + "replace_with", + "replace_html_entity", + "rest_of_line", + "sgl_quoted_string", + "srange", + "string_end", + "string_start", + "token_map", + "trace_parse_action", + "ungroup", + "unicode_set", + "unicode_string", + "with_attribute", + "with_class", + # pre-PEP8 compatibility names + "__versionTime__", + "anyCloseTag", + "anyOpenTag", + "cStyleComment", + "commonHTMLEntity", + "conditionAsParseAction", + "countedArray", + "cppStyleComment", + "dblQuotedString", + "dblSlashComment", + "delimitedList", + "dictOf", + "htmlComment", + "indentedBlock", + "infixNotation", + "javaStyleComment", + "lineEnd", + "lineStart", + "locatedExpr", + "makeHTMLTags", + "makeXMLTags", + "matchOnlyAtCol", + "matchPreviousExpr", + "matchPreviousLiteral", + "nestedExpr", + "nullDebugAction", + "oneOf", + "opAssoc", + "originalTextFor", + "pythonStyleComment", + "quotedString", + "removeQuotes", + "replaceHTMLEntity", + "replaceWith", + "restOfLine", + "sglQuotedString", + "stringEnd", + "stringStart", + "tokenMap", + "traceParseAction", + "unicodeString", + "withAttribute", + "withClass", + "common", + "unicode", + "testing", +] diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73fd22a5138dce590cbab2cc32d284d4e3aed561 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/actions.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/actions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..788abbeaafb0b1bef84a1a0ef9ad3e3ef09791d6 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/actions.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/common.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62623415cde25689901d816e1f338973dd7f3fd3 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/common.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/exceptions.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeb69bbac6df8b94b5180c682e7d437024755fa7 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/exceptions.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/helpers.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48f4cdf4af40bce71d9564ff31554a404be4feff Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/helpers.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/testing.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c78277ed241f34fab2ff5794519d60d98a24b1e Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/testing.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/unicode.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffca1ca2c5f46d0a3e2f7113e7d2eb25c9482be9 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/unicode.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/util.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e147af1f29209d6ff96ab9c7bb462913a2af11d Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/__pycache__/util.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/actions.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/actions.py new file mode 100644 index 0000000000000000000000000000000000000000..f491aab986e92c2b9baad972fa75272eae77d333 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/actions.py @@ -0,0 +1,219 @@ +# actions.py +from __future__ import annotations + +from typing import Union, Callable, Any + +from .exceptions import ParseException +from .util import col, replaced_by_pep8 +from .results import ParseResults + + +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] + + +class OnlyOnce: + """ + Wrapper for parse actions, to ensure they are only called once. + Note: parse action signature must include all 3 arguments. + """ + + def __init__(self, method_call: Callable[[str, int, ParseResults], Any]): + from .core import _trim_arity + + self.callable = _trim_arity(method_call) + self.called = False + + def __call__(self, s: str, l: int, t: ParseResults) -> ParseResults: + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") + + def reset(self): + """ + Allow the associated parse action to be called once more. + """ + + self.called = False + + +def match_only_at_col(n: int) -> ParseAction: + """ + Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + + def verify_col(strg: str, locn: int, toks: ParseResults) -> None: + if col(locn, strg) != n: + raise ParseException(strg, locn, f"matched token not at column {n}") + + return verify_col + + +def replace_with(repl_str: str) -> ParseAction: + """ + Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transform_string` (). + + Example:: + + num = Word(nums).set_parse_action(lambda toks: int(toks[0])) + na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) + term = na | num + + term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s, l, t: [repl_str] + + +def remove_quotes(s: str, l: int, t: ParseResults) -> Any: + """ + Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use remove_quotes to strip quotation marks from parsed results + quoted_string.set_parse_action(remove_quotes) + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + + +def with_attribute(*args: tuple[str, str], **attr_dict) -> ParseAction: + """ + Helper to create a validating parse action to be used with start + tags created with :class:`make_xml_tags` or + :class:`make_html_tags`. Use ``with_attribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + attrs_list: list[tuple[str, str]] = [] + if args: + attrs_list.extend(args) + else: + attrs_list.extend(attr_dict.items()) + + def pa(s: str, l: int, tokens: ParseResults) -> None: + for attrName, attrValue in attrs_list: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: # type: ignore [attr-defined] + raise ParseException( + s, + l, + f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}", + ) + + return pa + + +with_attribute.ANY_VALUE = object() # type: ignore [attr-defined] + + +def with_class(classname: str, namespace: str = "") -> ParseAction: + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = f"{namespace}:class" if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# Compatibility synonyms +# fmt: off +replaceWith = replaced_by_pep8("replaceWith", replace_with) +removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) +withAttribute = replaced_by_pep8("withAttribute", with_attribute) +withClass = replaced_by_pep8("withClass", with_class) +matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) +# fmt: on diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/core.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/core.py new file mode 100644 index 0000000000000000000000000000000000000000..b884e2d4a40b4702a28ad38faee68bf86f3f3ebe --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/core.py @@ -0,0 +1,6226 @@ +# +# core.py +# +from __future__ import annotations + +import collections.abc +from collections import deque +import os +import typing +from typing import ( + Any, + Callable, + Generator, + NamedTuple, + Sequence, + TextIO, + Union, + cast, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _bslash, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, + replaced_by_pep8, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +from functools import cached_property + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls) -> None: + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum: Diagnostics) -> None: + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum: Diagnostics) -> None: + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings() -> None: + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +# fmt: off +_single_arg_builtins = { + sum, len, sorted, reversed, list, tuple, set, any, all, min, max +} +# fmt: on + +_generatorType = types.GeneratorType +ParseImplReturnType = tuple[int, Any] +PostParseReturnType = Union[ParseResults, Sequence[ParseResults]] + +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas: str = string.ascii_uppercase + string.ascii_lowercase +identchars: str = pyparsing_unicode.Latin1.identchars +identbodychars: str = pyparsing_unicode.Latin1.identbodychars +nums: str = "0123456789" +hexnums: str = nums + "ABCDEFabcdef" +alphanums: str = alphas + nums +printables: str = "".join([c for c in string.printable if c not in string.whitespace]) + + +class _ParseActionIndexError(Exception): + """ + Internal wrapper around IndexError so that IndexErrors raised inside + parse actions aren't misinterpreted as IndexErrors raised inside + ParserElement parseImpl methods. + """ + + def __init__(self, msg: str, exc: BaseException): + self.msg: str = msg + self.exc: BaseException = exc + + +_trim_arity_call_line: traceback.StackSummary = None # type: ignore[assignment] +pa_call_line_synth = () + + +def _trim_arity(func, max_limit=3): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line, pa_call_line_synth + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + # fmt: off + LINE_DIFF = 9 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] + pa_call_line_synth = pa_call_line_synth or (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) + + def wrapper(*args): + nonlocal found_arity, limit + if found_arity: + return func(*args[limit:]) + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + frames = traceback.extract_tb(tb, limit=2) + frame_summary = frames[-1] + trim_arity_type_error = ( + [frame_summary[:2]][-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit < max_limit: + limit += 1 + continue + + raise + except IndexError as ie: + # wrap IndexErrors inside a _ParseActionIndexError + raise _ParseActionIndexError( + "IndexError raised in parse action", ie + ).with_traceback(None) + # fmt: on + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + wrapper.__doc__ = func.__doc__ + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: typing.Optional[str] = None, fatal: bool = False +) -> ParseAction: + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + - ``message`` - define a custom message to be used in the raised exception + - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: ParserElement, cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + f"{cache_hit_str}Match {expr} at loc {loc}({lineno(loc, instring)},{col(loc, instring)})\n" + f" {line(loc, instring)}\n" + f" {' ' * (col(loc, instring) - 1)}^" + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: ParserElement, + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Matched {expr} -> {toks.as_list()}") + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: ParserElement, + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Match {expr} failed, {type(exc).__name__} raised: {exc}") + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: type = None # type: ignore[assignment] + + @staticmethod + def set_default_whitespace_chars(chars: str) -> None: + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, and newline + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.set_default_whitespace_chars(" \t") + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type) -> None: + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inline_literals_using(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + @classmethod + def using_each(cls, seq, **class_kwargs): + """ + Yields a sequence of ``class(obj, **class_kwargs)`` for obj in seq. + + Example:: + + LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};") + + """ + yield from (cls(obj, **class_kwargs) for obj in seq) + + class DebugActions(NamedTuple): + debug_try: typing.Optional[DebugStartAction] + debug_match: typing.Optional[DebugSuccessAction] + debug_fail: typing.Optional[DebugExceptionAction] + + def __init__(self, savelist: bool = False): + self.parseAction: list[ParseAction] = list() + self.failAction: typing.Optional[ParseFailAction] = None + self.customName: str = None # type: ignore[assignment] + self._defaultName: typing.Optional[str] = None + self.resultsName: str = None # type: ignore[assignment] + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + # used when checking for left-recursion + self.mayReturnEmpty = False + self.keepTabs = False + self.ignoreExprs: list[ParserElement] = list() + self.debug = False + self.streamlined = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError = True + self.errmsg: Union[str, None] = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults = True + # custom debug actions + self.debugActions = self.DebugActions(None, None, None) + # avoid redundant calls to preParse + self.callPreparse = True + self.callDuringTry = False + self.suppress_warnings_: list[Diagnostics] = [] + self.show_in_diagram = True + + def suppress_warning(self, warning_type: Diagnostics) -> ParserElement: + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example:: + + base = pp.Forward() + base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) + + # statement would normally raise a warning, but is now suppressed + print(base.parse_string("x")) + + """ + self.suppress_warnings_.append(warning_type) + return self + + def visit_all(self): + """General-purpose method to yield all expressions and sub-expressions + in a grammar. Typically just for internal use. + """ + to_visit = deque([self]) + seen = set() + while to_visit: + cur = to_visit.popleft() + + # guard against looping forever through recursive grammars + if cur in seen: + continue + seen.add(cur) + + to_visit.extend(cur.recurse()) + yield cur + + def copy(self) -> ParserElement: + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False + ) -> ParserElement: + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :class:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches = listAllMatches or list_all_matches + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + list_all_matches = True + newself.resultsName = name + newself.modalResults = not list_all_matches + return newself + + def set_break(self, break_flag: bool = True) -> ParserElement: + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, do_actions=True, callPreParse=True): + # this call to breakpoint() is intentional, not a checkin error + breakpoint() + return _parseMethod(instring, loc, do_actions, callPreParse) + + breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined] + self._parse = breaker # type: ignore [method-assign] + elif hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod # type: ignore [method-assign] + return self + + def set_parse_action(self, *fns: ParseAction, **kwargs: Any) -> ParserElement: + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - ``s`` = the original string being parsed (see note below) + - ``loc`` = the location of the matching substring + - ``toks`` = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + - ``call_during_try`` = (default= ``False``) indicate if parse action should be run during + lookaheads and alternate testing. For parse actions that have side effects, it is + important to only call the parse action once it is determined that it is being + called as part of a successful parse. For parse actions that perform additional + validation, then call_during_try should be passed as True, so that the validation + code is included in the preliminary "try" parses. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + # parse dates in the form YYYY/MM/DD + + # use parse action to convert toks from str to int at parse time + def convert_to_int(toks): + return int(toks[0]) + + # use a parse action to verify that the date is a valid date + def is_valid_date(instring, loc, toks): + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException(instring, loc, "invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + # note that integer fields are now ints, not strings + date_str.run_tests(''' + # successful parse - note that integer fields were converted to ints + 1999/12/31 + + # fail - invalid date + 1999/13/31 + ''') + """ + if list(fns) == [None]: + self.parseAction.clear() + return self + + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction[:] = [_trim_arity(fn) for fn in fns] + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + + return self + + def add_parse_action(self, *fns: ParseAction, **kwargs: Any) -> ParserElement: + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + self.parseAction += [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_condition(self, *fns: ParseCondition, **kwargs: Any) -> ParserElement: + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - ``message`` = define a custom message to be used in the raised exception + - ``fatal`` = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - ``call_during_try`` = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), + (line:1, col:1) + """ + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, + message=str(kwargs.get("message")), + fatal=bool(kwargs.get("fatal", False)), + ) + ) + + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def set_fail_action(self, fn: ParseFailAction) -> ParserElement: + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - ``s`` = string being parsed + - ``loc`` = location where expression match was attempted and failed + - ``expr`` = the parse expression that failed + - ``err`` = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring: str, loc: int) -> int: + if not self.ignoreExprs: + return loc + exprsFound = True + ignore_expr_fns = [e._parse for e in self.ignoreExprs] + last_loc = loc + while exprsFound: + exprsFound = False + for ignore_fn in ignore_expr_fns: + try: + while 1: + loc, dummy = ignore_fn(instring, loc) + exprsFound = True + except ParseException: + pass + # check if all ignore exprs matched but didn't actually advance the parse location + if loc == last_loc: + break + last_loc = loc + return loc + + def preParse(self, instring: str, loc: int) -> int: + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, do_actions=True, callPreParse=True + ) -> tuple[int, ParseResults]: + debugging = self.debug # and do_actions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions.debug_try: + self.debugActions.debug_try(instring, tokens_start, self, False) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (do_actions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions.debug_match: + self.debugActions.debug_match( + instring, tokens_start, loc, self, ret_tokens, False + ) + + return loc, ret_tokens + + def try_parse( + self, + instring: str, + loc: int, + *, + raise_fatal: bool = False, + do_actions: bool = False, + ) -> int: + try: + return self._parse(instring, loc, do_actions=do_actions)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int, do_actions: bool = False) -> bool: + try: + self.try_parse(instring, loc, do_actions=do_actions) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: collections.abc.MutableMapping[ + tuple[int, Forward, bool], tuple[int, Union[ParseResults, Exception]] + ] = {} + + class _CacheType(typing.Protocol): + """ + Class to be used for packrat and left-recursion cacheing of results + and exceptions. + """ + + not_in_cache: bool + + def get(self, *args) -> typing.Any: ... + + def set(self, *args) -> None: ... + + def clear(self) -> None: ... + + class NullCache(dict): + """ + A null cache type for initialization of the packrat_cache class variable. + If/when enable_packrat() is called, this null cache will be replaced by a + proper _CacheType class instance. + """ + + not_in_cache: bool = True + + def get(self, *args) -> typing.Any: ... + + def set(self, *args) -> None: ... + + def clear(self) -> None: ... + + # class-level argument cache for optimizing repeated calls when backtracking + # through recursive expressions + packrat_cache: _CacheType = NullCache() + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, do_actions=True, callPreParse=True + ) -> tuple[int, ParseResults]: + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, do_actions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, do_actions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions.debug_try: + try: + self.debugActions.debug_try(instring, loc, self, cache_hit=True) # type: ignore [call-arg] + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions.debug_fail: + try: + self.debugActions.debug_fail( + instring, loc, self, value, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + raise value + + value = cast(tuple[int, ParseResults, int], value) + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions.debug_match: + try: + self.debugActions.debug_match( + instring, loc_, endloc, self, result, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: typing.Optional[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example:: + + import pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3")) + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - ``cache_size_limit`` - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment] + else: + raise NotImplementedError(f"Memo size of {cache_size_limit}") + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat( + cache_size_limit: Union[int, None] = 128, *, force: bool = False + ) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - ``cache_size_limit`` - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + Example:: + + import pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + + if ParserElement._packratEnabled: + return + + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, *, parseAll: bool = False + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`\\ (). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ... + pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) + """ + parseAll = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd().set_debug(False) + se._parse(instring, loc) + except _ParseActionIndexError as pa_exc: + raise pa_exc.exc + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + always_skip_whitespace=True, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> Generator[tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + if always_skip_whitespace: + preparser = Empty() + preparser.ignoreExprs = self.ignoreExprs + preparser.whiteChars = self.whiteChars + preparseFn = preparser.preParse + else: + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc: int = preparseFn(instring, loc) + nextLoc: int + tokens: ParseResults + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.asList(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out: list[str] = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + if s > lastE: + out.append(instring[lastE:s]) + lastE = e + + if not t: + continue + + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join([str(s) for s in _flatten(out)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches = min(maxMatches, max_matches) + try: + return ParseResults( + [ + t + for t, s, e in self.scan_string( + instring, maxMatches, always_skip_whitespace=False, debug=debug + ) + ] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + *, + includeSeparators=False, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other) -> ParserElement: + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`\\ s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`:: + + Literal('start') + ... + Literal('end') + + is equivalent to:: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return And([self, other]) + + def __radd__(self, other) -> ParserElement: + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other + self + + def __sub__(self, other) -> ParserElement: + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self + And._ErrorStop() + other + + def __rsub__(self, other) -> ParserElement: + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other - self + + def __mul__(self, other) -> ParserElement: + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if not isinstance(other, (int, tuple)): + return NotImplemented + + if isinstance(other, int): + minElements, optElements = other, 0 + else: + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + return NotImplemented + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other) -> ParserElement: + return self.__mul__(other) + + def __or__(self, other) -> ParserElement: + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + # `expr | ""` is equivalent to `Opt(expr)` + if other == "": + return Opt(self) + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return MatchFirst([self, other]) + + def __ror__(self, other) -> ParserElement: + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other | self + + def __xor__(self, other) -> ParserElement: + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Or([self, other]) + + def __rxor__(self, other) -> ParserElement: + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other ^ self + + def __and__(self, other) -> ParserElement: + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Each([self, other]) + + def __rand__(self, other) -> ParserElement: + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other & self + + def __invert__(self) -> ParserElement: + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]`` do not raise an exception + if more than ``n`` ``expr``\\ s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + + For repetition with a stop_on expression, use slice notation: + + - ``expr[...: end_expr]`` and ``expr[0, ...: end_expr]`` are equivalent to ``ZeroOrMore(expr, stop_on=end_expr)`` + - ``expr[1, ...: end_expr]`` is equivalent to ``OneOrMore(expr, stop_on=end_expr)`` + + """ + + stop_on_defined = False + stop_on = NoMatch() + if isinstance(key, slice): + key, stop_on = key.start, key.stop + if key is None: + key = ... + stop_on_defined = True + elif isinstance(key, tuple) and isinstance(key[-1], slice): + key, stop_on = (key[0], key[1].start), key[1].stop + stop_on_defined = True + + # convert single arg keys to tuples + if isinstance(key, str_type): + key = (key,) + try: + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + f"only 1 or 2 index arguments supported ({key[:5]}{f'... [{len(key)}]' if len(key) > 5 else ''})" + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + ret = typing.cast(_MultipleMatch, ret) + + if stop_on_defined: + ret.stopOn(stop_on) + + return ret + + def __call__(self, name: typing.Optional[str] = None) -> ParserElement: + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name`` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + + return self.copy() + + def suppress(self) -> ParserElement: + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[set[str], str], copy_defaults: bool = False + ) -> ParserElement: + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> ParserElement: + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: ParserElement) -> ParserElement: + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = Word(alphas)[...] + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj'] + + patt.ignore(c_style_comment) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> ParserElement: + """ + Customize display of debugging messages while doing pattern matching: + + - ``start_action`` - method to be called when an expression is about to be parsed; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` + + - ``success_action`` - method to be called when an expression has successfully parsed; + should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` + + - ``exception_action`` - method to be called when expression fails to parse; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` + """ + self.debugActions = self.DebugActions( + start_action or _default_start_debug_action, # type: ignore[truthy-function] + success_action or _default_success_debug_action, # type: ignore[truthy-function] + exception_action or _default_exception_debug_action, # type: ignore[truthy-function] + ) + self.debug = True + return self + + def set_debug(self, flag: bool = True, recurse: bool = False) -> ParserElement: + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + Set ``recurse`` to ``True`` to set the debug flag on this expression and all sub-expressions. + + Example:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + term[1, ...].parse_string("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. + """ + if recurse: + for expr in self.visit_all(): + expr.set_debug(flag, recurse=False) + return self + + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self) -> str: + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: typing.Optional[str]) -> ParserElement: + """ + Define name for this expression, makes debugging and exception messages clearer. If + `__diag__.enable_debug_on_named_expressions` is set to True, setting a name will also + enable debug for this expression. + + If `name` is None, clears any custom name for this expression, and clears the + debug flag is it was enabled via `__diag__.enable_debug_on_named_expressions`. + + Example:: + + integer = Word(nums) + integer.parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + + integer.set_name("integer") + integer.parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.customName = name # type: ignore[assignment] + self.errmsg = f"Expected {str(self)}" + + if __diag__.enable_debug_on_named_expressions: + self.set_debug(name is not None) + + return self + + @property + def name(self) -> str: + # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name + return self.customName if self.customName is not None else self.default_name + + @name.setter + def name(self, new_name) -> None: + self.set_name(new_name) + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> ParserElement: + self.streamlined = True + self._defaultName = None + return self + + def recurse(self) -> list[ParserElement]: + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + *, + parseAll: bool = False, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll = parseAll or parse_all + try: + file_or_filename = typing.cast(TextIO, file_or_filename) + file_contents = file_or_filename.read() + except AttributeError: + file_or_filename = typing.cast(str, file_or_filename) + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches( + self, test_string: str, parse_all: bool = True, *, parseAll: bool = True + ) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + + - ``test_string`` - to test against this expression for a match + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + parseAll = parseAll and parse_all + try: + self.parse_string(str(test_string), parse_all=parseAll) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, list[str]], + parse_all: bool = True, + comment: typing.Optional[Union[ParserElement, str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: typing.Optional[ + Callable[[str, ParseResults], typing.Optional[str]] + ] = None, + file: typing.Optional[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: typing.Optional[ + Callable[[str, ParseResults], typing.Optional[str]] + ] = None, + ) -> tuple[bool, list[tuple[str, Union[ParseResults, Exception]]]]: + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.run_tests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + tests = typing.cast(str, tests) + line_strip = type(tests).strip + tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] + comment_specified = comment is not None + if comment_specified: + if isinstance(comment, str_type): + comment = typing.cast(str, comment) + comment = Literal(comment) + comment = typing.cast(ParserElement, comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults: list[tuple[str, Union[ParseResults, Exception]]] = [] + comments: list[str] = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + nlstr = "\n" + for t in tests: + if comment_specified and comment.matches(t, False) or comments and not t: + comments.append( + pyparsing_test.with_line_numbers(t) if with_line_numbers else t + ) + continue + if not t: + continue + out = [ + f"{nlstr}{nlstr.join(comments) if comments else ''}", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments.clear() + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append(f"FAIL: {fatal}{pe}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + tag = "FAIL-EXCEPTION" + + # see if this exception was raised in a parse action + tb = exc.__traceback__ + it = iter(traceback.walk_tb(tb)) + for f, line in it: + if (f.f_code.co_filename, line) == pa_call_line_synth: + next_f = next(it)[0] + tag += f" (raised in parse action {next_f.f_code.co_name!r})" + break + + out.append(f"{tag}: {type(exc).__name__}: {exc}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + f"{postParse.__name__} failed: {type(e).__name__}: {e}" + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, + embed: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + + - ``output_html`` (str or file-like object) - output target for generated + diagram HTML + - ``vertical`` (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - ``show_results_names`` - bool flag whether diagram should show annotations for + defined results names + - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box + - ``embed`` - bool flag whether generated HTML should omit , , and tags to embed + the resulting HTML in an enclosing HTML source + - ``head`` - str containing additional HTML to insert into the section of the generated code; + can be used to insert custom CSS styling + - ``body`` - str containing additional HTML to insert at the beginning of the section of the + generated code + + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + diagram_kwargs=kwargs, + ) + if not isinstance(output_html, (str, Path)): + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad, embed=embed, **kwargs)) + return + + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) + + # Compatibility synonyms + # fmt: off + inlineLiteralsUsing = staticmethod(replaced_by_pep8("inlineLiteralsUsing", inline_literals_using)) + setDefaultWhitespaceChars = staticmethod(replaced_by_pep8( + "setDefaultWhitespaceChars", set_default_whitespace_chars + )) + disableMemoization = staticmethod(replaced_by_pep8("disableMemoization", disable_memoization)) + enableLeftRecursion = staticmethod(replaced_by_pep8("enableLeftRecursion", enable_left_recursion)) + enablePackrat = staticmethod(replaced_by_pep8("enablePackrat", enable_packrat)) + resetCache = staticmethod(replaced_by_pep8("resetCache", reset_cache)) + + setResultsName = replaced_by_pep8("setResultsName", set_results_name) + setBreak = replaced_by_pep8("setBreak", set_break) + setParseAction = replaced_by_pep8("setParseAction", set_parse_action) + addParseAction = replaced_by_pep8("addParseAction", add_parse_action) + addCondition = replaced_by_pep8("addCondition", add_condition) + setFailAction = replaced_by_pep8("setFailAction", set_fail_action) + tryParse = replaced_by_pep8("tryParse", try_parse) + parseString = replaced_by_pep8("parseString", parse_string) + scanString = replaced_by_pep8("scanString", scan_string) + transformString = replaced_by_pep8("transformString", transform_string) + searchString = replaced_by_pep8("searchString", search_string) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars) + parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs) + setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions) + setDebug = replaced_by_pep8("setDebug", set_debug) + setName = replaced_by_pep8("setName", set_name) + parseFile = replaced_by_pep8("parseFile", parse_file) + runTests = replaced_by_pep8("runTests", run_tests) + canParseNext = replaced_by_pep8("canParseNext", can_parse_next) + defaultName = default_name + # fmt: on + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False): + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self) -> str: + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other) -> ParserElement: + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = f"missing <{self.anchor!r}>" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args) -> ParseImplReturnType: + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self): + super().__init__(savelist=False) + + def _generateDefaultName(self) -> str: + return type(self).__name__ + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + + Literal('abc').parse_string('abc') # -> ['abc'] + Literal('abc').parse_string('abcdef') # -> ['abc'] + Literal('abc').parse_string('ab') # -> Exception: Expected "abc" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __new__(cls, match_string: str = "", *, matchString: str = ""): + # Performance tuning: select a subclass with optimized parseImpl + if cls is Literal: + match_string = matchString or match_string + if not match_string: + return super().__new__(Empty) + if len(match_string) == 1: + return super().__new__(_SingleCharLiteral) + + # Default behavior + return super().__new__(cls) + + # Needed to make copy.copy() work correctly if we customize __new__ + def __getnewargs__(self): + return (self.match,) + + def __init__(self, match_string: str = "", *, matchString: str = ""): + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + self.firstMatchChar = match_string[:1] + self.errmsg = f"Expected {self.name}" + self.mayReturnEmpty = False + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class Empty(Literal): + """ + An empty token, will always match. + """ + + def __init__(self, match_string="", *, matchString=""): + super().__init__("") + self.mayReturnEmpty = True + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return "Empty" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + return loc, [] + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately preceded and followed by whitespace or + non-keyword characters. Compare with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``ident_chars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parse_string("start") # -> ['start'] + Keyword("start").parse_string("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + caseless: bool = False, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + self.firstMatchChar = match_string[:1] + if not self.firstMatchChar: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = f"Expected {type(self).__name__} {self.name}" + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + errmsg = self.errmsg or "" + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + elif ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", keyword was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars) -> None: + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + # Compatibility synonyms + setDefaultKeywordChars = staticmethod( + replaced_by_pep8("setDefaultKeywordChars", set_default_keyword_chars) + ) + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = f"Expected {self.name}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: typing.Optional[int] = None, + *, + maxMismatches: int = 1, + caseless=False, + ): + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = f"Expected {self.match_string!r} (with up to {self.maxMismatches} mismatches)" + self.caseless = caseless + self.mayIndexError = False + self.mayReturnEmpty = False + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:{self.match_string!r}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + + Parameters: + + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode``. + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capitalized_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + """ + + def __init__( + self, + init_chars: str = "", + body_chars: typing.Optional[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + initChars: typing.Optional[str] = None, + bodyChars: typing.Optional[str] = None, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + f"invalid {type(self).__name__}, initChars cannot be empty string" + ) + + initChars_set = set(initChars) + if excludeChars: + excludeChars_set = set(excludeChars) + initChars_set -= excludeChars_set + if bodyChars: + bodyChars = "".join(set(bodyChars) - excludeChars_set) + self.initChars = initChars_set + self.initCharsOrig = "".join(sorted(initChars_set)) + + if bodyChars: + self.bodyChars = set(bodyChars) + self.bodyCharsOrig = "".join(sorted(bodyChars)) + else: + self.bodyChars = initChars_set + self.bodyCharsOrig = self.initCharsOrig + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + if self.maxSpecified and min > max: + raise ValueError( + f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + min = max = exact + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asKeyword = asKeyword + if self.asKeyword: + self.errmsg += " as a keyword" + + # see if we can make a regex for this Word + if " " not in (self.initChars | self.bodyChars): + if len(self.initChars) == 1: + re_leading_fragment = re.escape(self.initCharsOrig) + else: + re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]" + + if self.bodyChars == self.initChars: + if max == 0 and self.minLen == 1: + repeat = "+" + elif max == 1: + repeat = "" + else: + if self.minLen != self.maxLen: + repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}" + else: + repeat = f"{{{self.minLen}}}" + self.reString = f"{re_leading_fragment}{repeat}" + else: + if max == 1: + re_body_fragment = "" + repeat = "" + else: + re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]" + if max == 0 and self.minLen == 1: + repeat = "*" + elif max == 2: + repeat = "?" if min <= 1 else "" + else: + if min != max: + repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}" + else: + repeat = f"{{{min - 1 if min > 0 else ''}}}" + + self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}" + + if self.asKeyword: + self.reString = rf"\b{self.reString}\b" + + try: + self.re = re.compile(self.reString) + except re.error: + self.re = None # type: ignore[assignment] + else: + self.re_match = self.re.match + self.parseImpl = self.parseImpl_regex # type: ignore[method-assign] + + def _generateDefaultName(self) -> str: + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + + return s + + if self.initChars != self.bodyChars: + base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})" + else: + base = f"W:({charsAsStr(self.initChars)})" + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + f"{{{self.minLen}}}" + elif self.maxLen == _MAX_INT: + return base + f"{{{self.minLen},...}}" + else: + return base + f"{{{self.minLen},{self.maxLen}}}" + return base + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + body_chars: set[str] = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in body_chars: + loc += 1 + + throw_exception = False + if loc - start < self.minLen: + throw_exception = True + elif self.maxSpecified and loc < instrlen and instring[loc] in body_chars: + throw_exception = True + elif self.asKeyword and ( + (start > 0 and instring[start - 1] in body_chars) + or (loc < instrlen and instring[loc] in body_chars) + ): + throw_exception = True + + if throw_exception: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def parseImpl_regex(self, instring, loc, do_actions=True) -> ParseImplReturnType: + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(Word): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, as_keyword=asKeyword, exclude_chars=excludeChars + ) + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept re's compiled using the regex module + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + *, + asGroupList: bool = False, + asMatch: bool = False, + ): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ + super().__init__() + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self._re = None + self.reString = self.pattern = pattern + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self._re = pattern + self.pattern = self.reString = pattern.pattern + + elif callable(pattern): + # defer creating this pattern until we really need it + self.pattern = pattern + self._re = None + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object," + " or a callable that takes no arguments and returns a string or a" + " compiled RE object" + ) + + self.flags = flags + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList # type: ignore [method-assign] + if self.asMatch: + self.parseImpl = self.parseImplAsMatch # type: ignore [method-assign] + + @cached_property + def re(self) -> re.Pattern: + if self._re: + return self._re + + if callable(self.pattern): + # replace self.pattern with the string returned by calling self.pattern() + self.pattern = cast(Callable[[], str], self.pattern)() + + # see if we got a compiled RE back instead of a str - if so, we're done + if hasattr(self.pattern, "pattern") and hasattr(self.pattern, "match"): + self._re = cast(re.Pattern[str], self.pattern) + self.pattern = self.reString = self._re.pattern + return self._re + + try: + self._re = re.compile(self.pattern, self.flags) + return self._re + except re.error: + raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") + + @cached_property + def re_match(self) -> Callable[[str, int], Any]: + return self.re.match + + @cached_property + def mayReturnEmpty(self) -> bool: # type: ignore[override] + return self.re_match("", 0) is not None + + def _generateDefaultName(self) -> str: + unescaped = repr(self.pattern).replace("\\\\", "\\") + return f"Re:({unescaped})" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + + for k, v in d.items(): + ret[k] = v + + return loc, ret + + def parseImplAsGroupList(self, instring, loc, do_actions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, do_actions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl: str) -> ParserElement: + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + # prints "

main title

" + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(as_group_list=True)") + + if self.asMatch and callable(repl): + raise TypeError( + "cannot use sub() with a callable with Regex(as_match=True)" + ) + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.search_string('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', end_quote_char='}}') + print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', esc_quote='""') + print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + + ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))) + + def __init__( + self, + quote_char: str = "", + esc_char: typing.Optional[str] = None, + esc_quote: typing.Optional[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: typing.Optional[str] = None, + convert_whitespace_escapes: bool = True, + *, + quoteChar: str = "", + escChar: typing.Optional[str] = None, + escQuote: typing.Optional[str] = None, + unquoteResults: bool = True, + endQuoteChar: typing.Optional[str] = None, + convertWhitespaceEscapes: bool = True, + ): + super().__init__() + esc_char = escChar or esc_char + esc_quote = escQuote or esc_quote + unquote_results = unquoteResults and unquote_results + end_quote_char = endQuoteChar or end_quote_char + convert_whitespace_escapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if end_quote_char is None: + end_quote_char = quote_char + else: + end_quote_char = end_quote_char.strip() + if not end_quote_char: + raise ValueError("end_quote_char cannot be the empty string") + + self.quote_char: str = quote_char + self.quote_char_len: int = len(quote_char) + self.first_quote_char: str = quote_char[0] + self.end_quote_char: str = end_quote_char + self.end_quote_char_len: int = len(end_quote_char) + self.esc_char: str = esc_char or "" + self.has_esc_char: bool = esc_char is not None + self.esc_quote: str = esc_quote or "" + self.unquote_results: bool = unquote_results + self.convert_whitespace_escapes: bool = convert_whitespace_escapes + self.multiline = multiline + self.re_flags = re.RegexFlag(0) + + # fmt: off + # build up re pattern for the content between the quote delimiters + inner_pattern: list[str] = [] + + if esc_quote: + inner_pattern.append(rf"(?:{re.escape(esc_quote)})") + + if esc_char: + inner_pattern.append(rf"(?:{re.escape(esc_char)}.)") + + if len(self.end_quote_char) > 1: + inner_pattern.append( + "(?:" + + "|".join( + f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))" + for i in range(len(self.end_quote_char) - 1, 0, -1) + ) + + ")" + ) + + if self.multiline: + self.re_flags |= re.MULTILINE | re.DOTALL + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}" + rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])" + ) + else: + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r" + rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])" + ) + + self.pattern = "".join( + [ + re.escape(self.quote_char), + "(?:", + '|'.join(inner_pattern), + ")*", + re.escape(self.end_quote_char), + ] + ) + + if self.unquote_results: + if self.convert_whitespace_escapes: + self.unquote_scan_re = re.compile( + rf"({'|'.join(re.escape(k) for k in self.ws_map)})" + rf"|(\\[0-7]{3}|\\0|\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4})" + rf"|({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags, + ) + else: + self.unquote_scan_re = re.compile( + rf"({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags + ) + # fmt: on + + try: + self.re = re.compile(self.pattern, self.re_flags) + self.reString = self.pattern + self.re_match = self.re.match + except re.error: + raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex") + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.mayReturnEmpty = True + + def _generateDefaultName(self) -> str: + if self.quote_char == self.end_quote_char and isinstance( + self.quote_char, str_type + ): + return f"string enclosed in {self.quote_char!r}" + + return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # check first character of opening quote to see if that is a match + # before doing the more complicated regex match + result = ( + instring[loc] == self.first_quote_char + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + # get ending loc and matched string from regex matching result + loc = result.end() + ret = result.group() + + def convert_escaped_numerics(s: str) -> str: + if s == "0": + return "\0" + if s.isdigit() and len(s) == 3: + return chr(int(s, base=8)) + elif s.startswith(("u", "x")): + return chr(int(s[1:], base=16)) + else: + return s + + if self.unquote_results: + # strip off quotes + ret = ret[self.quote_char_len : -self.end_quote_char_len] + + if isinstance(ret, str_type): + # fmt: off + if self.convert_whitespace_escapes: + # as we iterate over matches in the input string, + # collect from whichever match group of the unquote_scan_re + # regex matches (only 1 group will match at any given time) + ret = "".join( + # match group 1 matches \t, \n, etc. + self.ws_map[match.group(1)] if match.group(1) + # match group 2 matches escaped octal, null, hex, and Unicode + # sequences + else convert_escaped_numerics(match.group(2)[1:]) if match.group(2) + # match group 3 matches escaped characters + else match.group(3)[-1] if match.group(3) + # match group 4 matches any character + else match.group(4) + for match in self.unquote_scan_re.finditer(ret) + ) + else: + ret = "".join( + # match group 1 matches escaped characters + match.group(1)[-1] if match.group(1) + # match group 2 matches any character + else match.group(2) + for match in self.unquote_scan_re.finditer(ret) + ) + # fmt: on + + # replace escaped quotes + if self.esc_quote: + ret = ret.replace(self.esc_quote, self.end_quote_char) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(DelimitedList(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, + not_chars: str = "", + min: int = 1, + max: int = 0, + exact: int = 0, + *, + notChars: str = "", + ): + super().__init__() + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use" + " Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self.mayReturnEmpty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return f"!W:({self.notChars[: 16 - 3]}...)" + else: + return f"!W:({self.notChars})" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00A0": "", + "\u1680": "", + "\u180E": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200A": "", + "\u200B": "", + "\u202F": "", + "\u205F": "", + "\u3000": "", + } + + def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteStrs if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self.mayReturnEmpty = True + self.errmsg = f"Expected {self.name}" + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self) -> str: + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int): + super().__init__() + self.col = colno + + def preParse(self, instring: str, loc: int) -> int: + if col(loc, instring) == self.col: + return loc + + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + + return loc + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + rest_of_line).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self): + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.set_name("start of line") + + def preParse(self, instring: str, loc: int) -> int: + if loc == 0: + return loc + + ret = self.skipper.preParse(instring, loc) + + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + + return ret + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self): + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.set_name("end of line") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self): + super().__init__() + self.set_name("start of text") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # see if entire string up to here is just whitespace and ignoreables + if loc != 0 and loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self): + super().__init__() + self.set_name("end of text") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + if loc == len(instring): + return loc + 1, [] + if loc > len(instring): + return loc, [] + + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.set_name("start of a word") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.set_name("end of a word") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class Tag(Token): + """ + A meta-element for inserting a named result into the parsed + tokens that may be checked later in a parse action or while + processing the parsed results. Accepts an optional tag value, + defaulting to `True`. + + Example:: + + end_punc = "." | ("!" + Tag("enthusiastic"))) + greeting = "Hello," + Word(alphas) + end_punc + + result = greeting.parse_string("Hello, World.") + print(result.dump()) + + result = greeting.parse_string("Hello, World!") + print(result.dump()) + + prints:: + + ['Hello,', 'World', '.'] + + ['Hello,', 'World', '!'] + - enthusiastic: True + """ + + def __init__(self, tag_name: str, value: Any = True): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.leave_whitespace() + self.tag_name = tag_name + self.tag_value = value + self.add_parse_action(self._add_tag) + + def _add_tag(self, tokens: ParseResults): + tokens[self.tag_name] = self.tag_value + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:{self.tag_name}={self.tag_value!r}" + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(savelist) + self.exprs: list[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self) -> list[ParserElement]: + return self.exprs[:] + + def append(self, other) -> ParserElement: + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.exprs})" + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = f"Expected {self}" + + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self) -> ParserElement: + ret = super().copy() + ret = typing.cast(ParseExpression, ret) + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if not ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + return super()._setResultsName(name, list_all_matches) + + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, list_all_matches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class And(ParseExpression): + """ + Requires all given :class:`ParserElement` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = Word(alphas)[1, ...] + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self) -> str: + return "-" + + def __init__( + self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True + ): + exprs: list[ParserElement] = list(exprs_arg) + if exprs and Ellipsis in exprs: + tmp: list[ParserElement] = [] + for i, expr in enumerate(exprs): + if expr is not Ellipsis: + tmp.append(expr) + continue + + if i < len(exprs) - 1: + skipto_arg: ParserElement = typing.cast( + ParseExpression, (Empty() + exprs[i + 1]) + ).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + continue + + raise Exception("cannot construct And with sequence ending in ...") + exprs[:] = tmp + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + if not isinstance(self.exprs[0], White): + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.skipWhitespace = False + else: + self.mayReturnEmpty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + # collapse any _PendingSkip's + if self.exprs and any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + deleted_expr_marker = NoMatch() + for i, e in enumerate(self.exprs[:-1]): + if e is deleted_expr_marker: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = deleted_expr_marker + self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] + + super().streamline() + + # link any IndentedBlocks to the prior expression + prev: ParserElement + cur: ParserElement + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while True: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t, cur_=cur: setattr( + cur_, "parent_anchor", col(l, s) + ) + ) + break + subs = cur.recurse() + next_first = next(iter(subs), None) + if next_first is None: + break + cur = typing.cast(ParserElement, next_first) + + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, do_actions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, do_actions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, do_actions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, do_actions) + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self) -> str: + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"{{{inner}}}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParserElement` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + maxExcLoc = -1 + maxException = None + matches: list[tuple[int, ParserElement]] = [] + fatals: list[ParseFatalException] = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not do_actions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, do_actions) + + longest: tuple[int, typing.Optional[ParseResults]] = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, do_actions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any single error message + if maxExcLoc == loc: + maxException.msg = self.errmsg or "" + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, list_all_matches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParserElement` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse(instring, loc, do_actions) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any individual error message + if maxExcLoc == loc: + maxException.msg = self.errmsg or "" + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' | '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, list_all_matches) + + +class Each(ParseExpression): + """Requires all given :class:`ParserElement` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def __iand__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Each([self, other]) + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder: list[ParserElement] = [] + + keepMatching = True + failed: list[ParserElement] = [] + fatals: list[ParseFatalException] = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join([str(e) for e in tmpReqd]) + raise ParseException( + instring, + loc, + f"Missing one or more required elements ({missing})", + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, do_actions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self) -> str: + return f"{{{' & '.join(str(e) for e in self.exprs)}}}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + super().__init__(savelist) + if isinstance(expr, str_type): + expr_str = typing.cast(str, expr) + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr_str) # type: ignore[call-arg] + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr_str) + else: + expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg] + expr = typing.cast(ParserElement, expr) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self) -> list[ParserElement]: + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, do_actions=True): + if self.expr is None: + raise ParseException(instring, loc, "No expression defined", self) + + try: + return self.expr._parse(instring, loc, do_actions, callPreParse=False) + except ParseSyntaxException: + raise + except ParseBaseException as pbe: + pbe.pstr = pbe.pstr or instring + pbe.loc = pbe.loc or loc + pbe.parser_element = pbe.parser_element or self + if not isinstance(self, Forward) and self.customName is not None: + if self.errmsg: + pbe.msg = self.errmsg + raise + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + super().leave_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + super().ignore_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if not isinstance(other, Suppress) or other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + + return self + + def streamline(self) -> ParserElement: + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.expr})" + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + """ + + class _Indent(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = f"expected indent at column {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = f"expected indent at column greater than {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ): + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, do_actions=do_actions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr # type: ignore[misc, assignment] + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, do_actions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, do_actions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (AtLineStart('AAA') + rest_of_line).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, do_actions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, do_actions=do_actions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - ``expr`` - expression that must match prior to the current parse + location + - ``retreat`` - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + + def __init__(self, expr: Union[ParserElement, str], retreat: int = 0): + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + expr = typing.cast(str, expr) + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = f"not preceded by {expr}" + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, do_actions=True) -> ParseImplReturnType: + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg, self) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + return loc, ret + + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr: ParseBaseException = ParseException(instring, loc, self.errmsg, self) + + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + + """ + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + start = loc + loc, tokens = self.expr._parse(instring, start, do_actions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self.mayReturnEmpty = True + self.errmsg = f"Found unwanted token, {self.expr}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if self.expr.can_parse_next(instring, loc, do_actions=do_actions): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self) -> str: + return f"~{{{self.expr}}}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender) -> ParserElement: + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.try_parse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, do_actions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, do_actions) + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, list_all_matches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match one or more times + - ``stop_on`` - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stop_on attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parse_string(text).pprint() + """ + + def _generateDefaultName(self) -> str: + return f"{{{self.expr}}}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr, stopOn=stopOn or stop_on) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + try: + return super().parseImpl(instring, loc, do_actions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self) -> str: + return f"[{self.expr}]..." + + +class DelimitedList(ParseElementEnhance): + def __init__( + self, + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, + ): + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example:: + + DelimitedList(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + DelimitedList(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + expr = typing.cast(ParserElement, expr) + + if min is not None and min < 1: + raise ValueError("min must be greater than 0") + + if max is not None and min is not None and max < min: + raise ValueError("max must be greater than, or equal to min") + + self.content = expr + self.raw_delim = str(delim) + self.delim = delim + self.combine = combine + if not combine: + self.delim = Suppress(delim) + self.min = min or 1 + self.max = max + self.allow_trailing_delim = allow_trailing_delim + + delim_list_expr = self.content + (self.delim + self.content) * ( + self.min - 1, + None if self.max is None else self.max - 1, + ) + if self.allow_trailing_delim: + delim_list_expr += Opt(self.delim) + + if self.combine: + delim_list_expr = Combine(delim_list_expr) + + super().__init__(delim_list_expr, savelist=True) + + def _generateDefaultName(self) -> str: + content_expr = self.content.streamline() + return f"{content_expr} [{self.raw_delim} {content_expr}]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + + - ``expr`` - expression that must match zero or more times + - ``default`` (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ): + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + self_expr = self.expr + try: + loc, tokens = self_expr._parse( + instring, loc, do_actions, callPreParse=False + ) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] # type: ignore[assignment] + else: + tokens = [] # type: ignore[assignment] + return loc, tokens + + def _generateDefaultName(self) -> str: + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"[{inner}]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + + - ``expr`` - target expression marking the end of the data to be skipped + - ``include`` - if ``True``, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element + list) (default= ``False``). + - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: '6' + - desc: 'Intermittent system crash' + - issue_num: '101' + - sev: 'Critical' + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: '14' + - desc: "Spelling error on Login ('log|n')" + - issue_num: '94' + - sev: 'Cosmetic' + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: '47' + - desc: 'System slow when running too many reports' + - issue_num: '79' + - sev: 'Minor' + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: typing.Optional[Union[ParserElement, str]] = None, + fail_on: typing.Optional[Union[ParserElement, str]] = None, + *, + failOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = f"No match found for {self.expr}" + self.ignorer = Empty().leave_whitespace() + self._update_ignorer() + + def _update_ignorer(self): + # rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr + self.ignorer.ignoreExprs.clear() + for e in self.expr.ignoreExprs: + self.ignorer.ignore(e) + if self.ignoreExpr: + self.ignorer.ignore(self.ignoreExpr) + + def ignore(self, expr): + super().ignore(expr) + self._update_ignorer() + + def parseImpl(self, instring, loc, do_actions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.canParseNext if self.failOn is not None else None + ) + ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if ignorer_try_parse is not None: + # advance past ignore expressions + prev_tmploc = tmploc + while 1: + try: + tmploc = ignorer_try_parse(instring, tmploc) + except ParseBaseException: + break + # see if all ignorers matched, but didn't actually ignore anything + if tmploc == prev_tmploc: + break + prev_tmploc = tmploc + + try: + self_expr_parse(instring, tmploc, do_actions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, do_actions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the ``'<<'`` operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + + def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) # type: ignore[arg-type] + self.lshift_line = None + + def __lshift__(self, other) -> Forward: + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + + if not isinstance(other, ParserElement): + return NotImplemented + + self.expr = other + self.streamlined = other.streamlined + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] # type: ignore[assignment] + return self + + def __ilshift__(self, other) -> Forward: + if not isinstance(other, ParserElement): + return NotImplemented + + return self << other + + def __or__(self, other) -> ParserElement: + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "warn_on_match_first_with_lshift_operator:" + " using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "warn_on_assignment_to_Forward:" + " Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = ( + "parse_string", + "scan_string", + "search_string", + "transform_string", + ) + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "warn_on_parse_using_empty_Forward:" + " Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, do_actions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `do_actions=False` and only run `do_actions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, do_actions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `do_actions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if do_actions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match – do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if do_actions: + # replace the match for do_actions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, copy.copy(prev_result) + del memo[peek_key] + return prev_loc, copy.copy(prev_peek) + # the match did get better: see if we can improve further + if do_actions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = True + return self + + def streamline(self) -> ParserElement: + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + # Avoid infinite recursion by setting a temporary _defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + retString = "..." + try: + if self.expr is not None: + retString = str(self.expr)[:1000] + else: + retString = "None" + finally: + return f"{type(self).__name__}: {retString}" + + def copy(self) -> ParserElement: + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + # fmt: off + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_ + and self.expr is None + ): + warning = ( + "warn_name_set_on_empty_Forward:" + f" setting results name {name!r} on {type(self).__name__} expression" + " that has no contained expression" + ) + warnings.warn(warning, stacklevel=3) + # fmt: on + + return super()._setResultsName(name, list_all_matches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseElementEnhance`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False): + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parse_string('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: typing.Optional[str] = None, + ): + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other) -> ParserElement: + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Opt(DelimitedList(term)) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Opt(DelimitedList(term))) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.asList() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + # print attributes as plain groups + print(attr_expr[1, ...].parse_string(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names + result = Dict(Group(attr_expr)[1, ...]).parse_string(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.as_dict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + (',' + wd)[...] + print(wd_list1.parse_string(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + (Suppress(',') + wd)[...] + print(wd_list2.parse_string(source)) + + # Skipped text (using '...') can be suppressed as well + source = "lead in START relevant text END trailing text" + start_marker = Keyword("START") + end_marker = Keyword("END") + find_body = Suppress(...) + start_marker + ... + end_marker + print(find_body.parse_string(source) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + ['START', 'relevant text ', 'END'] + + (See also :class:`DelimitedList`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other) -> ParserElement: + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + + return super().__add__(other) + + def __sub__(self, other) -> ParserElement: + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self) -> ParserElement: + return self + + +def trace_parse_action(f: ParseAction) -> ParseAction: + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = wd[1, ...].set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}" + sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n") + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write( + f"< str: + r"""Helper to easily define string ranges for use in :class:`Word` + construction. Borrows syntax from regexp ``'[]'`` string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = lambda p: ( + p + if not isinstance(p, ParseResults) + else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + ) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) + except Exception as e: + return "" + + +def token_map(func, *args) -> ParseAction: + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + upperword[1, ...].run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + wd[1, ...].set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements() -> None: + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + calling_frame = sys._getframe(1) + if calling_frame is None: + return + calling_frame = typing.cast(types.FrameType, calling_frame) + for name, var in calling_frame.f_locals.items(): + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + (Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + | (Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("quoted string using single or double quotes") + +python_quoted_string = Combine( + (Regex(r'"""(?:[^"\\]|""(?!")|"(?!"")|\\.)*', flags=re.MULTILINE) + '"""').set_name( + "multiline double quoted string" + ) + ^ ( + Regex(r"'''(?:[^'\\]|''(?!')|'(?!'')|\\.)*", flags=re.MULTILINE) + "'''" + ).set_name("multiline single quoted string") + ^ (Regex(r'"(?:[^"\n\r\\]|(?:\\")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + ^ (Regex(r"'(?:[^'\n\r\\]|(?:\\')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("Python quoted string") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: list[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + +# Compatibility synonyms +# fmt: off +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action) +traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action) +conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action) +tokenMap = replaced_by_pep8("tokenMap", token_map) +# fmt: on diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/diagram/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/diagram/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56526b741b87902c40902e5b53a4d087ce2b39b8 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/diagram/__init__.py @@ -0,0 +1,744 @@ +# mypy: ignore-errors +from __future__ import annotations + +import itertools +import railroad +import pyparsing +import dataclasses +import typing +from typing import ( + Generic, + TypeVar, + Callable, + Iterable, +) +from jinja2 import Template +from io import StringIO +import inspect +import re + + +jinja2_template_source = """\ +{% if not embed %} + + + +{% endif %} + {% if not head %} + + {% else %} + {{ head | safe }} + {% endif %} +{% if not embed %} + + +{% endif %} + +{{ body | safe }} +{% for diagram in diagrams %} +
+

{{ diagram.title }}

+
{{ diagram.text }}
+
+ {{ diagram.svg }} +
+
+{% endfor %} +{% if not embed %} + + +{% endif %} +""" + +template = Template(jinja2_template_source) + + +_bookmark_lookup = {} +_bookmark_ids = itertools.count(start=1) + +def _make_bookmark(s: str) -> str: + """ + Converts a string into a valid HTML bookmark (ID or anchor name). + """ + if s in _bookmark_lookup: + return _bookmark_lookup[s] + + # Replace invalid characters with hyphens and ensure only valid characters + bookmark = re.sub(r'[^a-zA-Z0-9-]+', '-', s) + + # Ensure it starts with a letter by adding 'z' if necessary + if not bookmark[:1].isalpha(): + bookmark = f"z{bookmark}" + + # Convert to lowercase and strip hyphens + bookmark = bookmark.lower().strip('-') + + _bookmark_lookup[s] = bookmark = f"{bookmark}-{next(_bookmark_ids):04d}" + + return bookmark + + +def _collapse_verbose_regex(regex_str: str) -> str: + if "\n" not in regex_str: + return regex_str + collapsed = pyparsing.Regex(r"#.*$").suppress().transform_string(regex_str) + collapsed = re.sub(r"\s*\n\s*", "", collapsed) + return collapsed + + +@dataclasses.dataclass +class NamedDiagram: + """ + A simple structure for associating a name with a railroad diagram + """ + + name: str + index: int + diagram: railroad.DiagramItem = None + + @property + def bookmark(self): + bookmark = _make_bookmark(self.name) + return bookmark + + +T = TypeVar("T") + + +class EachItem(railroad.Group): + """ + Custom railroad item to compose a: + - Group containing a + - OneOrMore containing a + - Choice of the elements in the Each + with the group label indicating that all must be matched + """ + + all_label = "[ALL]" + + def __init__(self, *items): + choice_item = railroad.Choice(len(items) - 1, *items) + one_or_more_item = railroad.OneOrMore(item=choice_item) + super().__init__(one_or_more_item, label=self.all_label) + + +class AnnotatedItem(railroad.Group): + """ + Simple subclass of Group that creates an annotation label + """ + + def __init__(self, label: str, item): + super().__init__(item=item, label=f"[{label}]" if label else "") + + +class EditablePartial(Generic[T]): + """ + Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been + constructed. + """ + + # We need this here because the railroad constructors actually transform the data, so can't be called until the + # entire tree is assembled + + def __init__(self, func: Callable[..., T], args: list, kwargs: dict): + self.func = func + self.args = args + self.kwargs = kwargs + + @classmethod + def from_call(cls, func: Callable[..., T], *args, **kwargs) -> EditablePartial[T]: + """ + If you call this function in the same way that you would call the constructor, it will store the arguments + as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) + """ + return EditablePartial(func=func, args=list(args), kwargs=kwargs) + + @property + def name(self): + return self.kwargs["name"] + + def __call__(self) -> T: + """ + Evaluate the partial and return the result + """ + args = self.args.copy() + kwargs = self.kwargs.copy() + + # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. + # args=['list', 'of', 'things']) + arg_spec = inspect.getfullargspec(self.func) + if arg_spec.varargs in self.kwargs: + args += kwargs.pop(arg_spec.varargs) + + return self.func(*args, **kwargs) + + +def railroad_to_html(diagrams: list[NamedDiagram], embed=False, **kwargs) -> str: + """ + Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams + :params kwargs: kwargs to be passed in to the template + """ + data = [] + for diagram in diagrams: + if diagram.diagram is None: + continue + io = StringIO() + try: + css = kwargs.get("css") + diagram.diagram.writeStandalone(io.write, css=css) + except AttributeError: + diagram.diagram.writeSvg(io.write) + title = diagram.name + if diagram.index == 0: + title += " (root)" + data.append( + { + "title": title, "text": "", "svg": io.getvalue(), "bookmark": diagram.bookmark + } + ) + + return template.render(diagrams=data, embed=embed, **kwargs) + + +def resolve_partial(partial: EditablePartial[T]) -> T: + """ + Recursively resolves a collection of Partials into whatever type they are + """ + if isinstance(partial, EditablePartial): + partial.args = resolve_partial(partial.args) + partial.kwargs = resolve_partial(partial.kwargs) + return partial() + elif isinstance(partial, list): + return [resolve_partial(x) for x in partial] + elif isinstance(partial, dict): + return {key: resolve_partial(x) for key, x in partial.items()} + else: + return partial + + +def to_railroad( + element: pyparsing.ParserElement, + diagram_kwargs: typing.Optional[dict] = None, + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, +) -> list[NamedDiagram]: + """ + Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram + creation if you want to access the Railroad tree before it is converted to HTML + :param element: base element of the parser being diagrammed + :param diagram_kwargs: kwargs to pass to the Diagram() constructor + :param vertical: (optional) - int - limit at which number of alternatives should be + shown vertically instead of horizontally + :param show_results_names - bool to indicate whether results name annotations should be + included in the diagram + :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled + surrounding box + """ + # Convert the whole tree underneath the root + lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) + _to_diagram_element( + element, + lookup=lookup, + parent=None, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + root_id = id(element) + # Convert the root if it hasn't been already + if root_id in lookup: + if not element.customName: + lookup[root_id].name = "" + lookup[root_id].mark_for_extraction(root_id, lookup, force=True) + + # Now that we're finished, we can convert from intermediate structures into Railroad elements + diags = list(lookup.diagrams.values()) + if len(diags) > 1: + # collapse out duplicate diags with the same name + seen = set() + deduped_diags = [] + for d in diags: + # don't extract SkipTo elements, they are uninformative as subdiagrams + if d.name == "...": + continue + if d.name is not None and d.name not in seen: + seen.add(d.name) + deduped_diags.append(d) + resolved = [resolve_partial(partial) for partial in deduped_diags] + else: + # special case - if just one diagram, always display it, even if + # it has no name + resolved = [resolve_partial(partial) for partial in diags] + return sorted(resolved, key=lambda diag: diag.index) + + +def _should_vertical( + specification: int, exprs: Iterable[pyparsing.ParserElement] +) -> bool: + """ + Returns true if we should return a vertical list of elements + """ + if specification is None: + return False + else: + return len(_visible_exprs(exprs)) >= specification + + +@dataclasses.dataclass +class ElementState: + """ + State recorded for an individual pyparsing Element + """ + + #: The pyparsing element that this represents + element: pyparsing.ParserElement + #: The output Railroad element in an unconverted state + converted: EditablePartial + #: The parent Railroad element, which we store so that we can extract this if it's duplicated + parent: EditablePartial + #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram + number: int + #: The name of the element + name: str = None + #: The index of this inside its parent + parent_index: typing.Optional[int] = None + #: If true, we should extract this out into a subdiagram + extract: bool = False + #: If true, all of this element's children have been filled out + complete: bool = False + + def mark_for_extraction( + self, el_id: int, state: ConverterState, name: str = None, force: bool = False + ): + """ + Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram + :param el_id: id of the element + :param state: element/diagram state tracker + :param name: name to use for this element's text + :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the + root element when we know we're finished + """ + self.extract = True + + # Set the name + if not self.name: + if name: + # Allow forcing a custom name + self.name = name + elif self.element.customName: + self.name = self.element.customName + else: + self.name = "" + + # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children + # to be added + # Also, if this is just a string literal etc, don't bother extracting it + if force or (self.complete and _worth_extracting(self.element)): + state.extract_into_diagram(el_id) + + +class ConverterState: + """ + Stores some state that persists between recursions into the element tree + """ + + def __init__(self, diagram_kwargs: typing.Optional[dict] = None): + #: A dictionary mapping ParserElements to state relating to them + self._element_diagram_states: dict[int, ElementState] = {} + #: A dictionary mapping ParserElement IDs to subdiagrams generated from them + self.diagrams: dict[int, EditablePartial[NamedDiagram]] = {} + #: The index of the next unnamed element + self.unnamed_index: int = 1 + #: The index of the next element. This is used for sorting + self.index: int = 0 + #: Shared kwargs that are used to customize the construction of diagrams + self.diagram_kwargs: dict = diagram_kwargs or {} + self.extracted_diagram_names: set[str] = set() + + def __setitem__(self, key: int, value: ElementState): + self._element_diagram_states[key] = value + + def __getitem__(self, key: int) -> ElementState: + return self._element_diagram_states[key] + + def __delitem__(self, key: int): + del self._element_diagram_states[key] + + def __contains__(self, key: int): + return key in self._element_diagram_states + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def generate_unnamed(self) -> int: + """ + Generate a number used in the name of an otherwise unnamed diagram + """ + self.unnamed_index += 1 + return self.unnamed_index + + def generate_index(self) -> int: + """ + Generate a number used to index a diagram + """ + self.index += 1 + return self.index + + def extract_into_diagram(self, el_id: int): + """ + Used when we encounter the same token twice in the same tree. When this + happens, we replace all instances of that token with a terminal, and + create a new subdiagram for the token + """ + position = self[el_id] + + # Replace the original definition of this element with a regular block + if position.parent: + href = f"#{_make_bookmark(position.name)}" + ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name, href=href) + if "item" in position.parent.kwargs: + position.parent.kwargs["item"] = ret + elif "items" in position.parent.kwargs: + position.parent.kwargs["items"][position.parent_index] = ret + + # If the element we're extracting is a group, skip to its content but keep the title + if position.converted.func == railroad.Group: + content = position.converted.kwargs["item"] + else: + content = position.converted + + self.diagrams[el_id] = EditablePartial.from_call( + NamedDiagram, + name=position.name, + diagram=EditablePartial.from_call( + railroad.Diagram, content, **self.diagram_kwargs + ), + index=position.number, + ) + + del self[el_id] + + +def _worth_extracting(element: pyparsing.ParserElement) -> bool: + """ + Returns true if this element is worth having its own sub-diagram. Simply, if any of its children + themselves have children, then its complex enough to extract + """ + children = element.recurse() + return any(child.recurse() for child in children) + + +def _apply_diagram_item_enhancements(fn): + """ + decorator to ensure enhancements to a diagram item (such as results name annotations) + get applied on return from _to_diagram_element (we do this since there are several + returns in _to_diagram_element) + """ + + def _inner( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, + ) -> typing.Optional[EditablePartial]: + ret = fn( + element, + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + show_groups, + ) + + # apply annotation for results name, if present + if show_results_names and ret is not None: + element_results_name = element.resultsName + if element_results_name: + # add "*" to indicate if this is a "list all results" name + modal_tag = "" if element.modalResults else "*" + ret = EditablePartial.from_call( + railroad.Group, + item=ret, + label=f"{repr(element_results_name)}{modal_tag}", + ) + + return ret + + return _inner + + +def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): + non_diagramming_exprs = ( + pyparsing.ParseElementEnhance, + pyparsing.PositionToken, + pyparsing.And._ErrorStop, + ) + return [ + e + for e in exprs + if not isinstance(e, non_diagramming_exprs) + ] + + +@_apply_diagram_item_enhancements +def _to_diagram_element( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, + show_hidden: bool = False, +) -> typing.Optional[EditablePartial]: + """ + Recursively converts a PyParsing Element to a railroad Element + :param lookup: The shared converter state that keeps track of useful things + :param index: The index of this element within the parent + :param parent: The parent of this element in the output tree + :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), + it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never + do so + :param name_hint: If provided, this will override the generated name + :param show_results_names: bool flag indicating whether to add annotations for results names + :param show_groups: bool flag indicating whether to show groups using bounding box + :param show_hidden: bool flag indicating whether to show elements that are typically hidden + :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed + """ + exprs = element.recurse() + name = name_hint or element.customName or type(element).__name__ + + # Python's id() is used to provide a unique identifier for elements + el_id = id(element) + + element_results_name = element.resultsName + + # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram + if not element.customName: + if isinstance( + element, + ( + # pyparsing.TokenConverter, + pyparsing.Forward, + pyparsing.Located, + ), + ): + # However, if this element has a useful custom name, and its child does not, we can pass it on to the child + if exprs: + if not exprs[0].customName: + propagated_name = name + else: + propagated_name = None + + return _to_diagram_element( + element.expr, + parent=parent, + lookup=lookup, + vertical=vertical, + index=index, + name_hint=propagated_name, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # If the element isn't worth extracting, we always treat it as the first time we say it + if _worth_extracting(element): + looked_up = lookup.get(el_id) + if looked_up and looked_up.name is not None: + # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, + # so we have to extract it into a new diagram. + looked_up.mark_for_extraction(el_id, lookup, name=name_hint) + href = f"#{_make_bookmark(looked_up.name)}" + ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name, href=href) + return ret + + elif el_id in lookup.diagrams: + # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we + # just put in a marker element that refers to the sub-diagram + text = lookup.diagrams[el_id].kwargs["name"] + ret = EditablePartial.from_call( + railroad.NonTerminal, text=text, href=f"#{_make_bookmark(text)}" + ) + return ret + + # Recursively convert child elements + # Here we find the most relevant Railroad element for matching pyparsing Element + # We use ``items=[]`` here to hold the place for where the child elements will go once created + + # see if this element is normally hidden, and whether hidden elements are desired + # if not, just return None + if not element.show_in_diagram and not show_hidden: + return None + + if isinstance(element, pyparsing.And): + # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat + # (all will have the same name, and resultsName) + if not exprs: + return None + if len(set((e.name, e.resultsName) for e in exprs)) == 1 and len(exprs) > 2: + ret = EditablePartial.from_call( + railroad.OneOrMore, item="", repeat=str(len(exprs)) + ) + elif _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Stack, items=[]) + else: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): + if not exprs: + return None + if _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) + else: + ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) + elif isinstance(element, pyparsing.Each): + if not exprs: + return None + ret = EditablePartial.from_call(EachItem, items=[]) + elif isinstance(element, pyparsing.NotAny): + ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") + elif isinstance(element, pyparsing.FollowedBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") + elif isinstance(element, pyparsing.PrecededBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") + elif isinstance(element, pyparsing.Group): + if show_groups: + ret = EditablePartial.from_call(AnnotatedItem, label="", item="") + else: + ret = EditablePartial.from_call( + railroad.Group, item=None, label=element_results_name + ) + elif isinstance(element, pyparsing.TokenConverter): + label = type(element).__name__.lower() + if label == "tokenconverter": + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + else: + ret = EditablePartial.from_call(AnnotatedItem, label=label, item="") + elif isinstance(element, pyparsing.Opt): + ret = EditablePartial.from_call(railroad.Optional, item="") + elif isinstance(element, pyparsing.OneOrMore): + if element.not_ender is not None: + args = [ + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + show_groups, + ] + return _to_diagram_element( + (~element.not_ender.expr + element.expr)[1, ...].set_name(element.name), + *args, + ) + ret = EditablePartial.from_call(railroad.OneOrMore, item=None) + elif isinstance(element, pyparsing.ZeroOrMore): + if element.not_ender is not None: + args = [ + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + show_groups, + ] + return _to_diagram_element( + (~element.not_ender.expr + element.expr)[...].set_name(element.name), + *args, + ) + ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") + elif isinstance(element, pyparsing.Empty) and not element.customName: + # Skip unnamed "Empty" elements + ret = None + elif isinstance(element, pyparsing.ParseElementEnhance): + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif len(exprs) > 0 and not element_results_name: + ret = EditablePartial.from_call(railroad.Group, item="", label=name) + elif isinstance(element, pyparsing.Regex): + collapsed_patt = _collapse_verbose_regex(element.pattern) + ret = EditablePartial.from_call(railroad.Terminal, collapsed_patt) + elif len(exprs) > 0: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + else: + terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) + ret = terminal + + if ret is None: + return + + # Indicate this element's position in the tree so we can extract it if necessary + lookup[el_id] = ElementState( + element=element, + converted=ret, + parent=parent, + parent_index=index, + number=lookup.generate_index(), + ) + if element.customName: + lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) + + i = 0 + for expr in exprs: + # Add a placeholder index in case we have to extract the child before we even add it to the parent + if "items" in ret.kwargs: + ret.kwargs["items"].insert(i, None) + + item = _to_diagram_element( + expr, + parent=ret, + lookup=lookup, + vertical=vertical, + index=i, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # Some elements don't need to be shown in the diagram + if item is not None: + if "item" in ret.kwargs: + ret.kwargs["item"] = item + elif "items" in ret.kwargs: + # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal + ret.kwargs["items"][i] = item + i += 1 + elif "items" in ret.kwargs: + # If we're supposed to skip this element, remove it from the parent + del ret.kwargs["items"][i] + + # If all this items children are none, skip this item + if ret and ( + ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) + or ("item" in ret.kwargs and ret.kwargs["item"] is None) + ): + ret = EditablePartial.from_call(railroad.Terminal, name) + + # Mark this element as "complete", ie it has all of its children + if el_id in lookup: + lookup[el_id].complete = True + + if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: + lookup.extract_into_diagram(el_id) + if ret is not None: + text = lookup.diagrams[el_id].kwargs["name"] + href = f"#{_make_bookmark(text)}" + ret = EditablePartial.from_call( + railroad.NonTerminal, text=text, href=href + ) + + return ret diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/exceptions.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..57a1579d121e46e407efe73f5cc827d29eb352e8 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/exceptions.py @@ -0,0 +1,314 @@ +# exceptions.py +from __future__ import annotations + +import copy +import re +import sys +import typing +from functools import cached_property + +from .unicode import pyparsing_unicode as ppu +from .util import ( + _collapse_string_to_ranges, + col, + line, + lineno, + replaced_by_pep8, +) + + +class _ExceptionWordUnicodeSet( + ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic +): + pass + + +_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + loc: int + msg: str + pstr: str + parser_element: typing.Any # "ParserElement" + args: tuple[str, int, typing.Optional[str]] + + __slots__ = ( + "loc", + "msg", + "pstr", + "parser_element", + "args", + ) + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: typing.Optional[str] = None, + elem=None, + ): + if msg is None: + msg, pstr = pstr, "" + + self.loc = loc + self.msg = msg + self.pstr = pstr + self.parser_element = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc: Exception, depth: int = 16) -> str: + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret: list[str] = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(f"{' ' * (exc.column - 1)}^") + ret.append(f"{type(exc).__name__}: {exc}") + + if depth <= 0 or exc.__traceback__ is None: + return "\n".join(ret) + + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen: set[int] = set() + for ff in callers[-depth:]: + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") + + elif f_self is not None: + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__}") + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append(code.co_name) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe) -> ParseBaseException: + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element) + + @cached_property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @cached_property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @cached_property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def found(self) -> str: + if not self.pstr: + return "" + + if self.loc >= len(self.pstr): + return "end of text" + + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found_text = found_match.group(0) + else: + found_text = self.pstr[self.loc : self.loc + 1] + + return repr(found_text).replace(r"\\", "\\") + + # pre-PEP8 compatibility + @property + def parserElement(self): + return self.parser_element + + @parserElement.setter + def parserElement(self, elem): + self.parser_element = elem + + def copy(self): + return copy.copy(self) + + def formatted_message(self) -> str: + found_phrase = f", found {self.found}" if self.found else "" + return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})" + + def __str__(self) -> str: + return self.formatted_message() + + def __repr__(self): + return str(self) + + def mark_input_line( + self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<" + ) -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}" + return line_str.strip() + + def explain(self, depth: int = 16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + # an expression to parse 3 integers + expr = pp.Word(pp.nums) * 3 + try: + # a failing parse - the third integer is prefixed with "A" + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + # Compatibility synonyms + # fmt: off + markInputline = replaced_by_pep8("markInputline", mark_input_line) + # fmt: on + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + integer = Word(nums).set_name("integer") + try: + integer.parse_string("ABC") + except ParseException as pe: + print(pe) + print(f"column: {pe.column}") + + prints:: + + Expected integer (at char 0), (line:1, col:1) column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + + Deprecated: only used by deprecated method ParserElement.validate. + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return f"RecursiveGrammarException: {self.parseElementTrace}" diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/helpers.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..f781e8713271dbaff80969122e790af359c5021e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/helpers.py @@ -0,0 +1,1102 @@ +# helpers.py +import html.entities +import operator +import re +import sys +import typing + +from . import __diag__ +from .core import * +from .util import ( + _bslash, + _flatten, + _escape_regex_range_chars, + make_compressed_re, + replaced_by_pep8, +) + + +# +# global helpers +# +def counted_array( + expr: ParserElement, + int_expr: typing.Optional[ParserElement] = None, + *, + intExpr: typing.Optional[ParserElement] = None, +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] + + # if other fields must be parsed after the count but before the + # list items, give the fields results names and they will + # be preserved in the returned ParseResults: + count_with_metadata = integer + Word(alphas)("type") + typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") + result = typed_array.parse_string("3 bool True True False") + print(result.dump()) + + # prints + # ['True', 'True', 'False'] + # - items: ['True', 'True', 'False'] + # - type: 'bool' + """ + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name(f"(len) {expr}...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if not t: + rep << Empty() + return + + if len(t) == 1: + rep << t[0] + return + + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException( + s, l, f"Expected {matchTokens}, found{theseTokens}" + ) + + rep.set_parse_action(must_match_these_tokens, callDuringTry=True) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[typing.Iterable[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + *, + useRegex: bool = True, + asKeyword: bool = False, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - ``strs`` - a string of space-delimited literals, or a collection of + string literals + - ``caseless`` - treat all literals as caseless - (default= ``False``) + - ``use_regex`` - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if + creating a :class:`Regex` raises an exception) - (default= ``True``) + - ``as_keyword`` - enforce :class:`Keyword`-style matching on the + generated expressions - (default= ``False``) + - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, + but will be removed in a future release + + Example:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "warn_on_multiple_string_args_to_oneof:" + " More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + is_equal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + parse_element_class = CaselessKeyword if asKeyword else CaselessLiteral + else: + is_equal = operator.eq + masks = lambda a, b: b.startswith(a) + parse_element_class = Keyword if asKeyword else Literal + + symbols: list[str] + if isinstance(strs, str_type): + strs = typing.cast(str, strs) + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if is_equal(other, cur): + del symbols[i + j + 1] + break + if len(other) > len(cur) and masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]" + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = rf"\b(?:{patt})\b" + + ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except re.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst + return MatchFirst(parse_element_class(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + print(attr_expr[1, ...].parse_string(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + result = dict_of(attr_label, attr_value).parse_string(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.as_dict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, *, asString: bool = True +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns a string containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + ... + closer) + print(patt.search_string(src)[0]) + + prints:: + + [' bold text '] + ['text'] + """ + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + (DEPRECATED - future code should use the :class:`Located` class) + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leaveWhitespace()("locn_end") + ) + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: typing.Optional[ParserElement] = None, + ignore_expr: ParserElement = quoted_string(), + *, + ignoreExpr: ParserElement = quoted_string(), +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + Parameters: + + - ``opener`` - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - ``closer`` - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - ``content`` - expression for items within the nested lists + (default= ``None``) + - ``ignore_expr`` - expression for ignoring opening and closing delimiters + (default= :class:`quoted_string`) + - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility + but will be removed in a future release + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + opener = typing.cast(str, opener) + closer = typing.cast(str, closer) + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = empty.copy() + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ).set_parse_action(lambda t: t[0].strip()) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) + ) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.set_name(f"nested {opener}{closer} expression") + # don't override error message from content expressions + ret.errmsg = None + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name(f"<{resname}>") + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name(f"") + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement] +) -> tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement] +) -> tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag: ParserElement +any_close_tag: ParserElement +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +_most_common_entities = "nbsp lt gt amp quot apos cent pound euro copy".replace( + " ", "|" +) +common_html_entity = Regex( + lambda: f"&(?P{_most_common_entities}|{make_compressed_re(_htmlEntityMap)});" +).set_name("common HTML entity") + + +def replace_html_entity(s, l, t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + """Enumeration of operator associativity + - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`""" + + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + typing.Optional[ParseAction], + ], + tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: list[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> ParserElement: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + + - ``base_expr`` - expression representing the most basic operand to + be used in the expression + - ``op_list`` - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + - ``lpar`` - expression for matching left-parentheses; if passed as a + str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as + an expression (such as ``Literal('(')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses; if passed as a + str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as + an expression (such as ``Literal(')')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', full_dump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + (5+x)*y + [[[5, '+', 'x'], '*', 'y']] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + ret.set_name(f"{base_expr.name}_expression") + if isinstance(lpar, str): + lpar = Suppress(lpar) + if isinstance(rpar, str): + rpar = Suppress(rpar) + + nested_expr = (lpar + ret + rpar).set_name(f"nested_{base_expr.name}") + + # if lpar and rpar are not suppressed, wrap in group + if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)): + lastExpr = base_expr | Group(nested_expr) + else: + lastExpr = base_expr | nested_expr + + arity: int + rightLeftAssoc: opAssoc + pa: typing.Optional[ParseAction] + opExpr1: ParserElement + opExpr2: ParserElement + matchExpr: ParserElement + match_lookahead: ParserElement + for operDef in op_list: + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + opExpr = typing.cast(ParserElement, opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = f"{opExpr1}{opExpr2} operations" + else: + term_name = f"{opExpr} operations" + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr: ParserElement = Forward().set_name(term_name) + thisExpr = typing.cast(Forward, thisExpr) + match_lookahead = And([]) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + match_lookahead = _FB(lastExpr + opExpr) + matchExpr = Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + match_lookahead = _FB(lastExpr + opExpr + lastExpr) + matchExpr = Group(lastExpr + (opExpr + lastExpr)[1, ...]) + else: + match_lookahead = _FB(lastExpr + lastExpr) + matchExpr = Group(lastExpr[2, ...]) + elif arity == 3: + match_lookahead = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + matchExpr = Group( + lastExpr + (opExpr1 + lastExpr + opExpr2 + lastExpr)[1, ...] + ) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + match_lookahead = _FB(opExpr.expr + thisExpr) + matchExpr = Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + match_lookahead = _FB(lastExpr + opExpr + thisExpr) + matchExpr = Group(lastExpr + (opExpr + thisExpr)[1, ...]) + else: + match_lookahead = _FB(lastExpr + thisExpr) + matchExpr = Group(lastExpr + thisExpr[1, ...]) + elif arity == 3: + match_lookahead = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + matchExpr = Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + + # suppress lookahead expr from railroad diagrams + match_lookahead.show_in_diagram = False + + # TODO - determine why this statement can't be included in the following + # if pa block + matchExpr = match_lookahead + matchExpr + + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + + thisExpr <<= (matchExpr | lastExpr).setName(term_name) + lastExpr = thisExpr + + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + (DEPRECATED - use :class:`IndentedBlock` class instead) + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - ``blockStatementExpr`` - expression defining syntax of statement that + is repeated within the indented block + - ``indentStack`` - list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + - ``indent`` - boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = stmt[1, ...] + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, +# so may as well make them available +c_style_comment = Regex(r"/\*(?:[^*]|\*(?!/))*\*\/").set_name("C style comment") +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Regex( + r"(?:/\*(?:[^*]|\*(?!/))*\*\/)|(?://(?:\\\n|[^\n])*)" +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: list[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + + +# compatibility function, superseded by DelimitedList class +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """(DEPRECATED - use :class:`DelimitedList` class)""" + return DelimitedList( + expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim + ) + + +# Compatibility synonyms +# fmt: off +opAssoc = OpAssoc +anyOpenTag = any_open_tag +anyCloseTag = any_close_tag +commonHTMLEntity = common_html_entity +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment +delimitedList = replaced_by_pep8("delimitedList", DelimitedList) +delimited_list = replaced_by_pep8("delimited_list", DelimitedList) +countedArray = replaced_by_pep8("countedArray", counted_array) +matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal) +matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr) +oneOf = replaced_by_pep8("oneOf", one_of) +dictOf = replaced_by_pep8("dictOf", dict_of) +originalTextFor = replaced_by_pep8("originalTextFor", original_text_for) +nestedExpr = replaced_by_pep8("nestedExpr", nested_expr) +makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags) +makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags) +replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity) +infixNotation = replaced_by_pep8("infixNotation", infix_notation) +# fmt: on diff --git a/evalkit_eagle/lib/python3.10/site-packages/pyparsing/unicode.py b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..066486c28eea020d420c2e90fdda76f69f1c9ead --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/pyparsing/unicode.py @@ -0,0 +1,356 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = list[Union[tuple[int, int], tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls) -> list[str]: + ret: list[int] = [] + for cc in cls.__mro__: # type: ignore[attr-defined] + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return sorted(chr(c) for c in set(ret)) + + @_lazyclassproperty + def printables(cls) -> str: + """all non-whitespace characters in this range""" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls) -> str: + """all alphabetic characters in this range""" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls) -> str: + """all numeric digit characters in this range""" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls) -> str: + """all alphanumeric characters in this range""" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls) -> str: + """all characters in this range that are valid identifier characters, plus underscore '_'""" + return "".join( + sorted( + set(filter(str.isidentifier, cls._chars_for_ranges)) + | set( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls) -> str: + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9, and · (Unicode MIDDLE DOT) + """ + identifier_chars = set( + c for c in cls._chars_for_ranges if ("_" + c).isidentifier() + ) + return "".join( + sorted(identifier_chars | set(cls.identchars) | set("0123456789·")) + ) + + @_lazyclassproperty + def identifier(cls): + """ + a pyparsing Word expression for an identifier using this range's definitions for + identchars and identbodychars + """ + from pyparsing import Word + + return Word(cls.identchars, cls.identbodychars) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + # fmt: off + + # define ranges in language character sets + _ranges: UnicodeRangeList = [ + (0x0020, sys.maxunicode), + ] + + class BasicMultilingualPlane(unicode_set): + """Unicode set for the Basic Multilingual Plane""" + _ranges: UnicodeRangeList = [ + (0x0020, 0xFFFF), + ] + + class Latin1(unicode_set): + """Unicode set for Latin-1 Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + """Unicode set for Latin-A Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + """Unicode set for Latin-B Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + """Unicode set for Greek Unicode Character Ranges""" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + """Unicode set for Cyrillic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + """Unicode set for Chinese Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + """Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges""" + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + """Unicode set for Hiragana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + """Unicode set for Katakana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + 漢字 = Kanji + カタカナ = Katakana + ひらがな = Hiragana + + _ranges = ( + Kanji._ranges + + Hiragana._ranges + + Katakana._ranges + ) + + class Hangul(unicode_set): + """Unicode set for Hangul (Korean) Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + """Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range""" + + class Thai(unicode_set): + """Unicode set for Thai Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0E01, 0x0E3A), + (0x0E3F, 0x0E5B) + ] + + class Arabic(unicode_set): + """Unicode set for Arabic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + """Unicode set for Hebrew Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + """Unicode set for Devanagari Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0900, 0x097F), + (0xA8E0, 0xA8FF) + ] + + BMP = BasicMultilingualPlane + + # add language identifiers using language Unicode + العربية = Arabic + 中文 = Chinese + кириллица = Cyrillic + Ελληνικά = Greek + עִברִית = Hebrew + 日本語 = Japanese + 한국어 = Korean + ไทย = Thai + देवनागरी = Devanagari + + # fmt: on diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63f2f19e409c7f4b3c6c064022e8f104227873aa --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__init__.py @@ -0,0 +1,17 @@ +"""Top-level package for sniffio.""" + +__all__ = [ + "current_async_library", + "AsyncLibraryNotFoundError", + "current_async_library_cvar", + "thread_local", +] + +from ._version import __version__ + +from ._impl import ( + current_async_library, + AsyncLibraryNotFoundError, + current_async_library_cvar, + thread_local, +) diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efbeaa89d26fcc5bf729005bf68fdf5d5348fd74 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c414c823ccdf447614ef9e1a9b73760bf6ac9b88 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d76132da19746c6ba3aaa9d304722d435ff4985 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_impl.py b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a7bbf218ba985b87cd1d9b23da69222894c1dd --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_impl.py @@ -0,0 +1,95 @@ +from contextvars import ContextVar +from typing import Optional +import sys +import threading + +current_async_library_cvar = ContextVar( + "current_async_library_cvar", default=None +) # type: ContextVar[Optional[str]] + + +class _ThreadLocal(threading.local): + # Since threading.local provides no explicit mechanism is for setting + # a default for a value, a custom class with a class attribute is used + # instead. + name = None # type: Optional[str] + + +thread_local = _ThreadLocal() + + +class AsyncLibraryNotFoundError(RuntimeError): + pass + + +def current_async_library() -> str: + """Detect which async library is currently running. + + The following libraries are currently supported: + + ================ =========== ============================ + Library Requires Magic string + ================ =========== ============================ + **Trio** Trio v0.6+ ``"trio"`` + **Curio** - ``"curio"`` + **asyncio** ``"asyncio"`` + **Trio-asyncio** v0.8.2+ ``"trio"`` or ``"asyncio"``, + depending on current mode + ================ =========== ============================ + + Returns: + A string like ``"trio"``. + + Raises: + AsyncLibraryNotFoundError: if called from synchronous context, + or if the current async library was not recognized. + + Examples: + + .. code-block:: python3 + + from sniffio import current_async_library + + async def generic_sleep(seconds): + library = current_async_library() + if library == "trio": + import trio + await trio.sleep(seconds) + elif library == "asyncio": + import asyncio + await asyncio.sleep(seconds) + # ... and so on ... + else: + raise RuntimeError(f"Unsupported library {library!r}") + + """ + value = thread_local.name + if value is not None: + return value + + value = current_async_library_cvar.get() + if value is not None: + return value + + # Need to sniff for asyncio + if "asyncio" in sys.modules: + import asyncio + try: + current_task = asyncio.current_task # type: ignore[attr-defined] + except AttributeError: + current_task = asyncio.Task.current_task # type: ignore[attr-defined] + try: + if current_task() is not None: + return "asyncio" + except RuntimeError: + pass + + # Sniff for curio (for now) + if 'curio' in sys.modules: + from curio.meta import curio_running + if curio_running(): + return 'curio' + + raise AsyncLibraryNotFoundError( + "unknown async library, or not in async context" + ) diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bf7f7c2fab0d93be33c374443371d3ede021a84 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac4aa79e8e4a055be9f9b7a6abdfa86cbf989694 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py new file mode 100644 index 0000000000000000000000000000000000000000..02945a947527a9bfb396d0cd35ffee22eda665bb --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py @@ -0,0 +1,84 @@ +import os +import sys + +import pytest + +from .. import ( + current_async_library, AsyncLibraryNotFoundError, + current_async_library_cvar, thread_local +) + + +def test_basics_cvar(): + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + token = current_async_library_cvar.set("generic-lib") + try: + assert current_async_library() == "generic-lib" + finally: + current_async_library_cvar.reset(token) + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +def test_basics_tlocal(): + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + old_name, thread_local.name = thread_local.name, "generic-lib" + try: + assert current_async_library() == "generic-lib" + finally: + thread_local.name = old_name + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +def test_asyncio(): + import asyncio + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + ran = [] + + async def this_is_asyncio(): + assert current_async_library() == "asyncio" + # Call it a second time to exercise the caching logic + assert current_async_library() == "asyncio" + ran.append(True) + + asyncio.run(this_is_asyncio()) + assert ran == [True] + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason= + "curio broken on 3.12 (https://github.com/python-trio/sniffio/pull/42)", +) +def test_curio(): + import curio + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + ran = [] + + async def this_is_curio(): + assert current_async_library() == "curio" + # Call it a second time to exercise the caching logic + assert current_async_library() == "curio" + ran.append(True) + + curio.run(this_is_curio) + assert ran == [True] + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/_version.py b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..0495d10545c9fd515ed51e890309d2b66e2c30bb --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sniffio/_version.py @@ -0,0 +1,3 @@ +# This file is imported from __init__.py and exec'd from setup.py + +__version__ = "1.3.1" diff --git a/evalkit_eagle/lib/python3.10/site-packages/sniffio/py.typed b/evalkit_eagle/lib/python3.10/site-packages/sniffio/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391