Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_tf437/lib/python3.10/site-packages/antlr4/CommonTokenFactory.py +61 -0
- evalkit_tf437/lib/python3.10/site-packages/coloredlogs/__pycache__/cli.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/coloredlogs/__pycache__/tests.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/coloredlogs/converter/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA +233 -0
- evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD +181 -0
- evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL +5 -0
- evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt +1 -0
- evalkit_tf437/lib/python3.10/site-packages/ninja/__init__.py +55 -0
- evalkit_tf437/lib/python3.10/site-packages/ninja/_version.py +16 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libXau-00ec42fe.so.6.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-image-e82a276d.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-randr-a96a5a87.so.0.1.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-render-637b984a.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-shape-25c2b258.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-shm-7a199f70.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-sync-89374f40.so.1.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-util-4d666913.so.1.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-xinerama-ae147f87.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__init__.py +326 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/actions.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/exceptions.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/helpers.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/results.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/unicode.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/actions.py +206 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/common.py +434 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/exceptions.py +314 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/helpers.py +1086 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/py.typed +0 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/results.py +815 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/testing.py +362 -0
- evalkit_tf437/lib/python3.10/site-packages/pyparsing/unicode.py +356 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/__init__.py +17 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/_impl.py +95 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py +84 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio/_version.py +3 -0
evalkit_tf437/lib/python3.10/site-packages/antlr4/CommonTokenFactory.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
| 3 |
+
# Use of this file is governed by the BSD 3-clause license that
|
| 4 |
+
# can be found in the LICENSE.txt file in the project root.
|
| 5 |
+
#
|
| 6 |
+
|
| 7 |
+
#
|
| 8 |
+
# This default implementation of {@link TokenFactory} creates
|
| 9 |
+
# {@link CommonToken} objects.
|
| 10 |
+
#
|
| 11 |
+
from antlr4.Token import CommonToken
|
| 12 |
+
|
| 13 |
+
class TokenFactory(object):
|
| 14 |
+
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
class CommonTokenFactory(TokenFactory):
|
| 18 |
+
__slots__ = 'copyText'
|
| 19 |
+
|
| 20 |
+
#
|
| 21 |
+
# The default {@link CommonTokenFactory} instance.
|
| 22 |
+
#
|
| 23 |
+
# <p>
|
| 24 |
+
# This token factory does not explicitly copy token text when constructing
|
| 25 |
+
# tokens.</p>
|
| 26 |
+
#
|
| 27 |
+
DEFAULT = None
|
| 28 |
+
|
| 29 |
+
def __init__(self, copyText:bool=False):
|
| 30 |
+
# Indicates whether {@link CommonToken#setText} should be called after
|
| 31 |
+
# constructing tokens to explicitly set the text. This is useful for cases
|
| 32 |
+
# where the input stream might not be able to provide arbitrary substrings
|
| 33 |
+
# of text from the input after the lexer creates a token (e.g. the
|
| 34 |
+
# implementation of {@link CharStream#getText} in
|
| 35 |
+
# {@link UnbufferedCharStream} throws an
|
| 36 |
+
# {@link UnsupportedOperationException}). Explicitly setting the token text
|
| 37 |
+
# allows {@link Token#getText} to be called at any time regardless of the
|
| 38 |
+
# input stream implementation.
|
| 39 |
+
#
|
| 40 |
+
# <p>
|
| 41 |
+
# The default value is {@code false} to avoid the performance and memory
|
| 42 |
+
# overhead of copying text for every token unless explicitly requested.</p>
|
| 43 |
+
#
|
| 44 |
+
self.copyText = copyText
|
| 45 |
+
|
| 46 |
+
def create(self, source, type:int, text:str, channel:int, start:int, stop:int, line:int, column:int):
|
| 47 |
+
t = CommonToken(source, type, channel, start, stop)
|
| 48 |
+
t.line = line
|
| 49 |
+
t.column = column
|
| 50 |
+
if text is not None:
|
| 51 |
+
t.text = text
|
| 52 |
+
elif self.copyText and source[1] is not None:
|
| 53 |
+
t.text = source[1].getText(start,stop)
|
| 54 |
+
return t
|
| 55 |
+
|
| 56 |
+
def createThin(self, type:int, text:str):
|
| 57 |
+
t = CommonToken(type=type)
|
| 58 |
+
t.text = text
|
| 59 |
+
return t
|
| 60 |
+
|
| 61 |
+
CommonTokenFactory.DEFAULT = CommonTokenFactory()
|
evalkit_tf437/lib/python3.10/site-packages/coloredlogs/__pycache__/cli.cpython-310.pyc
ADDED
|
Binary file (3.11 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/coloredlogs/__pycache__/tests.cpython-310.pyc
ADDED
|
Binary file (25.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/coloredlogs/converter/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: mpmath
|
| 3 |
+
Version: 1.3.0
|
| 4 |
+
Summary: Python library for arbitrary-precision floating-point arithmetic
|
| 5 |
+
Home-page: http://mpmath.org/
|
| 6 |
+
Author: Fredrik Johansson
|
| 7 |
+
Author-email: fredrik.johansson@gmail.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Project-URL: Source, https://github.com/fredrik-johansson/mpmath
|
| 10 |
+
Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues
|
| 11 |
+
Project-URL: Documentation, http://mpmath.org/doc/current/
|
| 12 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 13 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
| 14 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 15 |
+
Classifier: Programming Language :: Python
|
| 16 |
+
Classifier: Programming Language :: Python :: 2
|
| 17 |
+
Classifier: Programming Language :: Python :: 2.7
|
| 18 |
+
Classifier: Programming Language :: Python :: 3
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.5
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 24 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 25 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 26 |
+
License-File: LICENSE
|
| 27 |
+
Provides-Extra: develop
|
| 28 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'develop'
|
| 29 |
+
Requires-Dist: pycodestyle ; extra == 'develop'
|
| 30 |
+
Requires-Dist: pytest-cov ; extra == 'develop'
|
| 31 |
+
Requires-Dist: codecov ; extra == 'develop'
|
| 32 |
+
Requires-Dist: wheel ; extra == 'develop'
|
| 33 |
+
Provides-Extra: docs
|
| 34 |
+
Requires-Dist: sphinx ; extra == 'docs'
|
| 35 |
+
Provides-Extra: gmpy
|
| 36 |
+
Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy'
|
| 37 |
+
Provides-Extra: tests
|
| 38 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'tests'
|
| 39 |
+
|
| 40 |
+
mpmath
|
| 41 |
+
======
|
| 42 |
+
|
| 43 |
+
|pypi version| |Build status| |Code coverage status| |Zenodo Badge|
|
| 44 |
+
|
| 45 |
+
.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg
|
| 46 |
+
:target: https://pypi.python.org/pypi/mpmath
|
| 47 |
+
.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg
|
| 48 |
+
:target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test
|
| 49 |
+
.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg
|
| 50 |
+
:target: https://codecov.io/gh/fredrik-johansson/mpmath
|
| 51 |
+
.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg
|
| 52 |
+
:target: https://zenodo.org/badge/latestdoi/2934512
|
| 53 |
+
|
| 54 |
+
A Python library for arbitrary-precision floating-point arithmetic.
|
| 55 |
+
|
| 56 |
+
Website: http://mpmath.org/
|
| 57 |
+
Main author: Fredrik Johansson <fredrik.johansson@gmail.com>
|
| 58 |
+
|
| 59 |
+
Mpmath is free software released under the New BSD License (see the
|
| 60 |
+
LICENSE file for details)
|
| 61 |
+
|
| 62 |
+
0. History and credits
|
| 63 |
+
----------------------
|
| 64 |
+
|
| 65 |
+
The following people (among others) have contributed major patches
|
| 66 |
+
or new features to mpmath:
|
| 67 |
+
|
| 68 |
+
* Pearu Peterson <pearu.peterson@gmail.com>
|
| 69 |
+
* Mario Pernici <mario.pernici@mi.infn.it>
|
| 70 |
+
* Ondrej Certik <ondrej@certik.cz>
|
| 71 |
+
* Vinzent Steinberg <vinzent.steinberg@gmail.cm>
|
| 72 |
+
* Nimish Telang <ntelang@gmail.com>
|
| 73 |
+
* Mike Taschuk <mtaschuk@ece.ualberta.ca>
|
| 74 |
+
* Case Van Horsen <casevh@gmail.com>
|
| 75 |
+
* Jorn Baayen <jorn.baayen@gmail.com>
|
| 76 |
+
* Chris Smith <smichr@gmail.com>
|
| 77 |
+
* Juan Arias de Reyna <arias@us.es>
|
| 78 |
+
* Ioannis Tziakos <itziakos@gmail.com>
|
| 79 |
+
* Aaron Meurer <asmeurer@gmail.com>
|
| 80 |
+
* Stefan Krastanov <krastanov.stefan@gmail.com>
|
| 81 |
+
* Ken Allen <ken.allen@sbcglobal.net>
|
| 82 |
+
* Timo Hartmann <thartmann15@gmail.com>
|
| 83 |
+
* Sergey B Kirpichev <skirpichev@gmail.com>
|
| 84 |
+
* Kris Kuhlman <kristopher.kuhlman@gmail.com>
|
| 85 |
+
* Paul Masson <paulmasson@analyticphysics.com>
|
| 86 |
+
* Michael Kagalenko <michael.kagalenko@gmail.com>
|
| 87 |
+
* Jonathan Warner <warnerjon12@gmail.com>
|
| 88 |
+
* Max Gaukler <max.gaukler@fau.de>
|
| 89 |
+
* Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
|
| 90 |
+
* Nike Dattani <nike@hpqc.org>
|
| 91 |
+
|
| 92 |
+
Numerous other people have contributed by reporting bugs,
|
| 93 |
+
requesting new features, or suggesting improvements to the
|
| 94 |
+
documentation.
|
| 95 |
+
|
| 96 |
+
For a detailed changelog, including individual contributions,
|
| 97 |
+
see the CHANGES file.
|
| 98 |
+
|
| 99 |
+
Fredrik's work on mpmath during summer 2008 was sponsored by Google
|
| 100 |
+
as part of the Google Summer of Code program.
|
| 101 |
+
|
| 102 |
+
Fredrik's work on mpmath during summer 2009 was sponsored by the
|
| 103 |
+
American Institute of Mathematics under the support of the National Science
|
| 104 |
+
Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms).
|
| 105 |
+
|
| 106 |
+
Any opinions, findings, and conclusions or recommendations expressed in this
|
| 107 |
+
material are those of the author(s) and do not necessarily reflect the
|
| 108 |
+
views of the sponsors.
|
| 109 |
+
|
| 110 |
+
Credit also goes to:
|
| 111 |
+
|
| 112 |
+
* The authors of the GMP library and the Python wrapper
|
| 113 |
+
gmpy, enabling mpmath to become much faster at
|
| 114 |
+
high precision
|
| 115 |
+
* The authors of MPFR, pari/gp, MPFUN, and other arbitrary-
|
| 116 |
+
precision libraries, whose documentation has been helpful
|
| 117 |
+
for implementing many of the algorithms in mpmath
|
| 118 |
+
* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik;
|
| 119 |
+
Wolfram Research for MathWorld and the Wolfram Functions site.
|
| 120 |
+
These are the main references used for special functions
|
| 121 |
+
implementations.
|
| 122 |
+
* George Brandl for developing the Sphinx documentation tool
|
| 123 |
+
used to build mpmath's documentation
|
| 124 |
+
|
| 125 |
+
Release history:
|
| 126 |
+
|
| 127 |
+
* Version 1.3.0 released on March 7, 2023
|
| 128 |
+
* Version 1.2.0 released on February 1, 2021
|
| 129 |
+
* Version 1.1.0 released on December 11, 2018
|
| 130 |
+
* Version 1.0.0 released on September 27, 2017
|
| 131 |
+
* Version 0.19 released on June 10, 2014
|
| 132 |
+
* Version 0.18 released on December 31, 2013
|
| 133 |
+
* Version 0.17 released on February 1, 2011
|
| 134 |
+
* Version 0.16 released on September 24, 2010
|
| 135 |
+
* Version 0.15 released on June 6, 2010
|
| 136 |
+
* Version 0.14 released on February 5, 2010
|
| 137 |
+
* Version 0.13 released on August 13, 2009
|
| 138 |
+
* Version 0.12 released on June 9, 2009
|
| 139 |
+
* Version 0.11 released on January 26, 2009
|
| 140 |
+
* Version 0.10 released on October 15, 2008
|
| 141 |
+
* Version 0.9 released on August 23, 2008
|
| 142 |
+
* Version 0.8 released on April 20, 2008
|
| 143 |
+
* Version 0.7 released on March 12, 2008
|
| 144 |
+
* Version 0.6 released on January 13, 2008
|
| 145 |
+
* Version 0.5 released on November 24, 2007
|
| 146 |
+
* Version 0.4 released on November 3, 2007
|
| 147 |
+
* Version 0.3 released on October 5, 2007
|
| 148 |
+
* Version 0.2 released on October 2, 2007
|
| 149 |
+
* Version 0.1 released on September 27, 2007
|
| 150 |
+
|
| 151 |
+
1. Download & installation
|
| 152 |
+
--------------------------
|
| 153 |
+
|
| 154 |
+
Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested
|
| 155 |
+
with CPython 2.7, 3.5 through 3.7 and for PyPy.
|
| 156 |
+
|
| 157 |
+
The latest release of mpmath can be downloaded from the mpmath
|
| 158 |
+
website and from https://github.com/fredrik-johansson/mpmath/releases
|
| 159 |
+
|
| 160 |
+
It should also be available in the Python Package Index at
|
| 161 |
+
https://pypi.python.org/pypi/mpmath
|
| 162 |
+
|
| 163 |
+
To install latest release of Mpmath with pip, simply run
|
| 164 |
+
|
| 165 |
+
``pip install mpmath``
|
| 166 |
+
|
| 167 |
+
Or unpack the mpmath archive and run
|
| 168 |
+
|
| 169 |
+
``python setup.py install``
|
| 170 |
+
|
| 171 |
+
Mpmath can also be installed using
|
| 172 |
+
|
| 173 |
+
``python -m easy_install mpmath``
|
| 174 |
+
|
| 175 |
+
The latest development code is available from
|
| 176 |
+
https://github.com/fredrik-johansson/mpmath
|
| 177 |
+
|
| 178 |
+
See the main documentation for more detailed instructions.
|
| 179 |
+
|
| 180 |
+
2. Running tests
|
| 181 |
+
----------------
|
| 182 |
+
|
| 183 |
+
The unit tests in mpmath/tests/ can be run via the script
|
| 184 |
+
runtests.py, but it is recommended to run them with py.test
|
| 185 |
+
(https://pytest.org/), especially
|
| 186 |
+
to generate more useful reports in case there are failures.
|
| 187 |
+
|
| 188 |
+
You may also want to check out the demo scripts in the demo
|
| 189 |
+
directory.
|
| 190 |
+
|
| 191 |
+
The master branch is automatically tested by Travis CI.
|
| 192 |
+
|
| 193 |
+
3. Documentation
|
| 194 |
+
----------------
|
| 195 |
+
|
| 196 |
+
Documentation in reStructuredText format is available in the
|
| 197 |
+
doc directory included with the source package. These files
|
| 198 |
+
are human-readable, but can be compiled to prettier HTML using
|
| 199 |
+
the build.py script (requires Sphinx, http://sphinx.pocoo.org/).
|
| 200 |
+
|
| 201 |
+
See setup.txt in the documentation for more information.
|
| 202 |
+
|
| 203 |
+
The most recent documentation is also available in HTML format:
|
| 204 |
+
|
| 205 |
+
http://mpmath.org/doc/current/
|
| 206 |
+
|
| 207 |
+
4. Known problems
|
| 208 |
+
-----------------
|
| 209 |
+
|
| 210 |
+
Mpmath is a work in progress. Major issues include:
|
| 211 |
+
|
| 212 |
+
* Some functions may return incorrect values when given extremely
|
| 213 |
+
large arguments or arguments very close to singularities.
|
| 214 |
+
|
| 215 |
+
* Directed rounding works for arithmetic operations. It is implemented
|
| 216 |
+
heuristically for other operations, and their results may be off by one
|
| 217 |
+
or two units in the last place (even if otherwise accurate).
|
| 218 |
+
|
| 219 |
+
* Some IEEE 754 features are not available. Inifinities and NaN are
|
| 220 |
+
partially supported; denormal rounding is currently not available
|
| 221 |
+
at all.
|
| 222 |
+
|
| 223 |
+
* The interface for switching precision and rounding is not finalized.
|
| 224 |
+
The current method is not threadsafe.
|
| 225 |
+
|
| 226 |
+
5. Help and bug reports
|
| 227 |
+
-----------------------
|
| 228 |
+
|
| 229 |
+
General questions and comments can be sent to the mpmath mailinglist,
|
| 230 |
+
mpmath@googlegroups.com
|
| 231 |
+
|
| 232 |
+
You can also report bugs and send patches to the mpmath issue tracker,
|
| 233 |
+
https://github.com/fredrik-johansson/mpmath/issues
|
evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537
|
| 3 |
+
mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630
|
| 4 |
+
mpmath-1.3.0.dist-info/RECORD,,
|
| 5 |
+
mpmath-1.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
| 7 |
+
mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7
|
| 8 |
+
mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765
|
| 9 |
+
mpmath/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
mpmath/__pycache__/ctx_base.cpython-310.pyc,,
|
| 11 |
+
mpmath/__pycache__/ctx_fp.cpython-310.pyc,,
|
| 12 |
+
mpmath/__pycache__/ctx_iv.cpython-310.pyc,,
|
| 13 |
+
mpmath/__pycache__/ctx_mp.cpython-310.pyc,,
|
| 14 |
+
mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,,
|
| 15 |
+
mpmath/__pycache__/function_docs.cpython-310.pyc,,
|
| 16 |
+
mpmath/__pycache__/identification.cpython-310.pyc,,
|
| 17 |
+
mpmath/__pycache__/math2.cpython-310.pyc,,
|
| 18 |
+
mpmath/__pycache__/rational.cpython-310.pyc,,
|
| 19 |
+
mpmath/__pycache__/usertools.cpython-310.pyc,,
|
| 20 |
+
mpmath/__pycache__/visualization.cpython-310.pyc,,
|
| 21 |
+
mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162
|
| 22 |
+
mpmath/calculus/__pycache__/__init__.cpython-310.pyc,,
|
| 23 |
+
mpmath/calculus/__pycache__/approximation.cpython-310.pyc,,
|
| 24 |
+
mpmath/calculus/__pycache__/calculus.cpython-310.pyc,,
|
| 25 |
+
mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,,
|
| 26 |
+
mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,,
|
| 27 |
+
mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,,
|
| 28 |
+
mpmath/calculus/__pycache__/odes.cpython-310.pyc,,
|
| 29 |
+
mpmath/calculus/__pycache__/optimization.cpython-310.pyc,,
|
| 30 |
+
mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,,
|
| 31 |
+
mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,,
|
| 32 |
+
mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817
|
| 33 |
+
mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112
|
| 34 |
+
mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226
|
| 35 |
+
mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306
|
| 36 |
+
mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056
|
| 37 |
+
mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908
|
| 38 |
+
mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856
|
| 39 |
+
mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877
|
| 40 |
+
mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432
|
| 41 |
+
mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985
|
| 42 |
+
mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572
|
| 43 |
+
mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211
|
| 44 |
+
mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452
|
| 45 |
+
mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815
|
| 46 |
+
mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512
|
| 47 |
+
mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330
|
| 48 |
+
mpmath/functions/__pycache__/__init__.cpython-310.pyc,,
|
| 49 |
+
mpmath/functions/__pycache__/bessel.cpython-310.pyc,,
|
| 50 |
+
mpmath/functions/__pycache__/elliptic.cpython-310.pyc,,
|
| 51 |
+
mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,,
|
| 52 |
+
mpmath/functions/__pycache__/factorials.cpython-310.pyc,,
|
| 53 |
+
mpmath/functions/__pycache__/functions.cpython-310.pyc,,
|
| 54 |
+
mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,,
|
| 55 |
+
mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,,
|
| 56 |
+
mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,,
|
| 57 |
+
mpmath/functions/__pycache__/rszeta.cpython-310.pyc,,
|
| 58 |
+
mpmath/functions/__pycache__/signals.cpython-310.pyc,,
|
| 59 |
+
mpmath/functions/__pycache__/theta.cpython-310.pyc,,
|
| 60 |
+
mpmath/functions/__pycache__/zeta.cpython-310.pyc,,
|
| 61 |
+
mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,,
|
| 62 |
+
mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938
|
| 63 |
+
mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237
|
| 64 |
+
mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644
|
| 65 |
+
mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273
|
| 66 |
+
mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100
|
| 67 |
+
mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570
|
| 68 |
+
mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097
|
| 69 |
+
mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633
|
| 70 |
+
mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184
|
| 71 |
+
mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703
|
| 72 |
+
mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320
|
| 73 |
+
mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410
|
| 74 |
+
mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858
|
| 75 |
+
mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253
|
| 76 |
+
mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790
|
| 77 |
+
mpmath/libmp/__pycache__/__init__.cpython-310.pyc,,
|
| 78 |
+
mpmath/libmp/__pycache__/backend.cpython-310.pyc,,
|
| 79 |
+
mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,,
|
| 80 |
+
mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,,
|
| 81 |
+
mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,,
|
| 82 |
+
mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,,
|
| 83 |
+
mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,,
|
| 84 |
+
mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,,
|
| 85 |
+
mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,,
|
| 86 |
+
mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360
|
| 87 |
+
mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469
|
| 88 |
+
mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861
|
| 89 |
+
mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624
|
| 90 |
+
mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688
|
| 91 |
+
mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875
|
| 92 |
+
mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021
|
| 93 |
+
mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622
|
| 94 |
+
mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561
|
| 95 |
+
mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94
|
| 96 |
+
mpmath/matrices/__pycache__/__init__.cpython-310.pyc,,
|
| 97 |
+
mpmath/matrices/__pycache__/calculus.cpython-310.pyc,,
|
| 98 |
+
mpmath/matrices/__pycache__/eigen.cpython-310.pyc,,
|
| 99 |
+
mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,,
|
| 100 |
+
mpmath/matrices/__pycache__/linalg.cpython-310.pyc,,
|
| 101 |
+
mpmath/matrices/__pycache__/matrices.cpython-310.pyc,,
|
| 102 |
+
mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609
|
| 103 |
+
mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394
|
| 104 |
+
mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534
|
| 105 |
+
mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958
|
| 106 |
+
mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331
|
| 107 |
+
mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976
|
| 108 |
+
mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 109 |
+
mpmath/tests/__pycache__/__init__.cpython-310.pyc,,
|
| 110 |
+
mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,,
|
| 111 |
+
mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,,
|
| 112 |
+
mpmath/tests/__pycache__/runtests.cpython-310.pyc,,
|
| 113 |
+
mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,,
|
| 114 |
+
mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,,
|
| 115 |
+
mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,,
|
| 116 |
+
mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,,
|
| 117 |
+
mpmath/tests/__pycache__/test_convert.cpython-310.pyc,,
|
| 118 |
+
mpmath/tests/__pycache__/test_diff.cpython-310.pyc,,
|
| 119 |
+
mpmath/tests/__pycache__/test_division.cpython-310.pyc,,
|
| 120 |
+
mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,,
|
| 121 |
+
mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,,
|
| 122 |
+
mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,,
|
| 123 |
+
mpmath/tests/__pycache__/test_fp.cpython-310.pyc,,
|
| 124 |
+
mpmath/tests/__pycache__/test_functions.cpython-310.pyc,,
|
| 125 |
+
mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,,
|
| 126 |
+
mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,,
|
| 127 |
+
mpmath/tests/__pycache__/test_hp.cpython-310.pyc,,
|
| 128 |
+
mpmath/tests/__pycache__/test_identify.cpython-310.pyc,,
|
| 129 |
+
mpmath/tests/__pycache__/test_interval.cpython-310.pyc,,
|
| 130 |
+
mpmath/tests/__pycache__/test_levin.cpython-310.pyc,,
|
| 131 |
+
mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,,
|
| 132 |
+
mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,,
|
| 133 |
+
mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,,
|
| 134 |
+
mpmath/tests/__pycache__/test_ode.cpython-310.pyc,,
|
| 135 |
+
mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,,
|
| 136 |
+
mpmath/tests/__pycache__/test_power.cpython-310.pyc,,
|
| 137 |
+
mpmath/tests/__pycache__/test_quad.cpython-310.pyc,,
|
| 138 |
+
mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,,
|
| 139 |
+
mpmath/tests/__pycache__/test_special.cpython-310.pyc,,
|
| 140 |
+
mpmath/tests/__pycache__/test_str.cpython-310.pyc,,
|
| 141 |
+
mpmath/tests/__pycache__/test_summation.cpython-310.pyc,,
|
| 142 |
+
mpmath/tests/__pycache__/test_trig.cpython-310.pyc,,
|
| 143 |
+
mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,,
|
| 144 |
+
mpmath/tests/__pycache__/torture.cpython-310.pyc,,
|
| 145 |
+
mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228
|
| 146 |
+
mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003
|
| 147 |
+
mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189
|
| 148 |
+
mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348
|
| 149 |
+
mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686
|
| 150 |
+
mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187
|
| 151 |
+
mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306
|
| 152 |
+
mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834
|
| 153 |
+
mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466
|
| 154 |
+
mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340
|
| 155 |
+
mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905
|
| 156 |
+
mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778
|
| 157 |
+
mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225
|
| 158 |
+
mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997
|
| 159 |
+
mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955
|
| 160 |
+
mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990
|
| 161 |
+
mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917
|
| 162 |
+
mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461
|
| 163 |
+
mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692
|
| 164 |
+
mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527
|
| 165 |
+
mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090
|
| 166 |
+
mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440
|
| 167 |
+
mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944
|
| 168 |
+
mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196
|
| 169 |
+
mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822
|
| 170 |
+
mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401
|
| 171 |
+
mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227
|
| 172 |
+
mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893
|
| 173 |
+
mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132
|
| 174 |
+
mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848
|
| 175 |
+
mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544
|
| 176 |
+
mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035
|
| 177 |
+
mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799
|
| 178 |
+
mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944
|
| 179 |
+
mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868
|
| 180 |
+
mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029
|
| 181 |
+
mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627
|
evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.38.4)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
evalkit_tf437/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
mpmath
|
evalkit_tf437/lib/python3.10/site-packages/ninja/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import os
|
| 3 |
+
import platform
|
| 4 |
+
import subprocess
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
from ._version import version as __version__
|
| 8 |
+
|
| 9 |
+
__all__ = ["__version__", "DATA", "BIN_DIR", "ninja"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def __dir__():
|
| 13 |
+
return __all__
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from .ninja_syntax import Writer, escape, expand
|
| 18 |
+
except ImportError:
|
| 19 |
+
# Support importing `ninja_syntax` from the source tree
|
| 20 |
+
if not os.path.exists(
|
| 21 |
+
os.path.join(os.path.dirname(__file__), 'ninja_syntax.py')):
|
| 22 |
+
sys.path.insert(0, os.path.abspath(os.path.join(
|
| 23 |
+
os.path.dirname(__file__), '../../Ninja-src/misc')))
|
| 24 |
+
from ninja_syntax import Writer, escape, expand # noqa: F401
|
| 25 |
+
|
| 26 |
+
DATA = os.path.join(os.path.dirname(__file__), 'data')
|
| 27 |
+
|
| 28 |
+
# Support running tests from the source tree
|
| 29 |
+
if not os.path.exists(DATA):
|
| 30 |
+
from skbuild.constants import CMAKE_INSTALL_DIR as SKBUILD_CMAKE_INSTALL_DIR
|
| 31 |
+
from skbuild.constants import set_skbuild_plat_name
|
| 32 |
+
|
| 33 |
+
if platform.system().lower() == "darwin":
|
| 34 |
+
# Since building the project specifying --plat-name or CMAKE_OSX_* variables
|
| 35 |
+
# leads to different SKBUILD_DIR, the code below attempt to guess the most
|
| 36 |
+
# likely plat-name.
|
| 37 |
+
_skbuild_dirs = os.listdir(os.path.join(os.path.dirname(__file__), '..', '..', '_skbuild'))
|
| 38 |
+
if _skbuild_dirs:
|
| 39 |
+
_likely_plat_name = '-'.join(_skbuild_dirs[0].split('-')[:3])
|
| 40 |
+
set_skbuild_plat_name(_likely_plat_name)
|
| 41 |
+
|
| 42 |
+
_data = os.path.abspath(os.path.join(
|
| 43 |
+
os.path.dirname(__file__), '..', '..', SKBUILD_CMAKE_INSTALL_DIR(), 'src/ninja/data'))
|
| 44 |
+
if os.path.exists(_data):
|
| 45 |
+
DATA = _data
|
| 46 |
+
|
| 47 |
+
BIN_DIR = os.path.join(DATA, 'bin')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _program(name, args):
|
| 51 |
+
return subprocess.call([os.path.join(BIN_DIR, name)] + args, close_fds=False)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def ninja():
|
| 55 |
+
raise SystemExit(_program('ninja', sys.argv[1:]))
|
evalkit_tf437/lib/python3.10/site-packages/ninja/_version.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# file generated by setuptools_scm
|
| 2 |
+
# don't change, don't track in version control
|
| 3 |
+
TYPE_CHECKING = False
|
| 4 |
+
if TYPE_CHECKING:
|
| 5 |
+
from typing import Tuple, Union
|
| 6 |
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
| 7 |
+
else:
|
| 8 |
+
VERSION_TUPLE = object
|
| 9 |
+
|
| 10 |
+
version: str
|
| 11 |
+
__version__: str
|
| 12 |
+
__version_tuple__: VERSION_TUPLE
|
| 13 |
+
version_tuple: VERSION_TUPLE
|
| 14 |
+
|
| 15 |
+
__version__ = version = '1.11.1.1'
|
| 16 |
+
__version_tuple__ = version_tuple = (1, 11, 1, 1)
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libXau-00ec42fe.so.6.0.0
ADDED
|
Binary file (17 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-image-e82a276d.so.0.0.0
ADDED
|
Binary file (25.6 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-randr-a96a5a87.so.0.1.0
ADDED
|
Binary file (93.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-render-637b984a.so.0.0.0
ADDED
|
Binary file (78.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-shape-25c2b258.so.0.0.0
ADDED
|
Binary file (21.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-shm-7a199f70.so.0.0.0
ADDED
|
Binary file (21.4 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-sync-89374f40.so.1.0.0
ADDED
|
Binary file (35.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-util-4d666913.so.1.0.0
ADDED
|
Binary file (26.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-xinerama-ae147f87.so.0.0.0
ADDED
|
Binary file (17.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.87 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (50.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__init__.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# module pyparsing.py
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2003-2022 Paul T. McGuire
|
| 4 |
+
#
|
| 5 |
+
# Permission is hereby granted, free of charge, to any person obtaining
|
| 6 |
+
# a copy of this software and associated documentation files (the
|
| 7 |
+
# "Software"), to deal in the Software without restriction, including
|
| 8 |
+
# without limitation the rights to use, copy, modify, merge, publish,
|
| 9 |
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
| 10 |
+
# permit persons to whom the Software is furnished to do so, subject to
|
| 11 |
+
# the following conditions:
|
| 12 |
+
#
|
| 13 |
+
# The above copyright notice and this permission notice shall be
|
| 14 |
+
# included in all copies or substantial portions of the Software.
|
| 15 |
+
#
|
| 16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
| 19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
| 20 |
+
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
| 21 |
+
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
| 22 |
+
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 23 |
+
#
|
| 24 |
+
|
| 25 |
+
__doc__ = """
|
| 26 |
+
pyparsing module - Classes and methods to define and execute parsing grammars
|
| 27 |
+
=============================================================================
|
| 28 |
+
|
| 29 |
+
The pyparsing module is an alternative approach to creating and
|
| 30 |
+
executing simple grammars, vs. the traditional lex/yacc approach, or the
|
| 31 |
+
use of regular expressions. With pyparsing, you don't need to learn
|
| 32 |
+
a new syntax for defining grammars or matching expressions - the parsing
|
| 33 |
+
module provides a library of classes that you use to construct the
|
| 34 |
+
grammar directly in Python.
|
| 35 |
+
|
| 36 |
+
Here is a program to parse "Hello, World!" (or any greeting of the form
|
| 37 |
+
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
|
| 38 |
+
:class:`Literal`, and :class:`And` elements
|
| 39 |
+
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
|
| 40 |
+
and the strings are auto-converted to :class:`Literal` expressions)::
|
| 41 |
+
|
| 42 |
+
from pyparsing import Word, alphas
|
| 43 |
+
|
| 44 |
+
# define grammar of a greeting
|
| 45 |
+
greet = Word(alphas) + "," + Word(alphas) + "!"
|
| 46 |
+
|
| 47 |
+
hello = "Hello, World!"
|
| 48 |
+
print(hello, "->", greet.parse_string(hello))
|
| 49 |
+
|
| 50 |
+
The program outputs the following::
|
| 51 |
+
|
| 52 |
+
Hello, World! -> ['Hello', ',', 'World', '!']
|
| 53 |
+
|
| 54 |
+
The Python representation of the grammar is quite readable, owing to the
|
| 55 |
+
self-explanatory class names, and the use of :class:`'+'<And>`,
|
| 56 |
+
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
|
| 57 |
+
|
| 58 |
+
The :class:`ParseResults` object returned from
|
| 59 |
+
:class:`ParserElement.parse_string` can be
|
| 60 |
+
accessed as a nested list, a dictionary, or an object with named
|
| 61 |
+
attributes.
|
| 62 |
+
|
| 63 |
+
The pyparsing module handles some of the problems that are typically
|
| 64 |
+
vexing when writing text parsers:
|
| 65 |
+
|
| 66 |
+
- extra or missing whitespace (the above program will also handle
|
| 67 |
+
"Hello,World!", "Hello , World !", etc.)
|
| 68 |
+
- quoted strings
|
| 69 |
+
- embedded comments
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
Getting Started -
|
| 73 |
+
-----------------
|
| 74 |
+
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
|
| 75 |
+
see the base classes that most other pyparsing
|
| 76 |
+
classes inherit from. Use the docstrings for examples of how to:
|
| 77 |
+
|
| 78 |
+
- construct literal match expressions from :class:`Literal` and
|
| 79 |
+
:class:`CaselessLiteral` classes
|
| 80 |
+
- construct character word-group expressions using the :class:`Word`
|
| 81 |
+
class
|
| 82 |
+
- see how to create repetitive expressions using :class:`ZeroOrMore`
|
| 83 |
+
and :class:`OneOrMore` classes
|
| 84 |
+
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
|
| 85 |
+
and :class:`'&'<Each>` operators to combine simple expressions into
|
| 86 |
+
more complex ones
|
| 87 |
+
- associate names with your parsed results using
|
| 88 |
+
:class:`ParserElement.set_results_name`
|
| 89 |
+
- access the parsed data, which is returned as a :class:`ParseResults`
|
| 90 |
+
object
|
| 91 |
+
- find some helpful expression short-cuts like :class:`DelimitedList`
|
| 92 |
+
and :class:`one_of`
|
| 93 |
+
- find more useful common expressions in the :class:`pyparsing_common`
|
| 94 |
+
namespace class
|
| 95 |
+
"""
|
| 96 |
+
from typing import NamedTuple
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class version_info(NamedTuple):
|
| 100 |
+
major: int
|
| 101 |
+
minor: int
|
| 102 |
+
micro: int
|
| 103 |
+
releaselevel: str
|
| 104 |
+
serial: int
|
| 105 |
+
|
| 106 |
+
@property
|
| 107 |
+
def __version__(self):
|
| 108 |
+
return (
|
| 109 |
+
f"{self.major}.{self.minor}.{self.micro}"
|
| 110 |
+
+ (
|
| 111 |
+
f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}",
|
| 112 |
+
"",
|
| 113 |
+
)[self.releaselevel == "final"]
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
def __str__(self):
|
| 117 |
+
return f"{__name__} {self.__version__} / {__version_time__}"
|
| 118 |
+
|
| 119 |
+
def __repr__(self):
|
| 120 |
+
return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})"
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
__version_info__ = version_info(3, 2, 0, "final", 1)
|
| 124 |
+
__version_time__ = "13 Oct 2024 09:46 UTC"
|
| 125 |
+
__version__ = __version_info__.__version__
|
| 126 |
+
__versionTime__ = __version_time__
|
| 127 |
+
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
|
| 128 |
+
|
| 129 |
+
from .util import *
|
| 130 |
+
from .exceptions import *
|
| 131 |
+
from .actions import *
|
| 132 |
+
from .core import __diag__, __compat__
|
| 133 |
+
from .results import *
|
| 134 |
+
from .core import *
|
| 135 |
+
from .core import _builtin_exprs as core_builtin_exprs
|
| 136 |
+
from .helpers import *
|
| 137 |
+
from .helpers import _builtin_exprs as helper_builtin_exprs
|
| 138 |
+
|
| 139 |
+
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
|
| 140 |
+
from .testing import pyparsing_test as testing
|
| 141 |
+
from .common import (
|
| 142 |
+
pyparsing_common as common,
|
| 143 |
+
_builtin_exprs as common_builtin_exprs,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Compatibility synonyms
|
| 147 |
+
if "pyparsing_unicode" not in globals():
|
| 148 |
+
pyparsing_unicode = unicode # type: ignore[misc]
|
| 149 |
+
if "pyparsing_common" not in globals():
|
| 150 |
+
pyparsing_common = common
|
| 151 |
+
if "pyparsing_test" not in globals():
|
| 152 |
+
pyparsing_test = testing
|
| 153 |
+
|
| 154 |
+
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
__all__ = [
|
| 158 |
+
"__version__",
|
| 159 |
+
"__version_time__",
|
| 160 |
+
"__author__",
|
| 161 |
+
"__compat__",
|
| 162 |
+
"__diag__",
|
| 163 |
+
"And",
|
| 164 |
+
"AtLineStart",
|
| 165 |
+
"AtStringStart",
|
| 166 |
+
"CaselessKeyword",
|
| 167 |
+
"CaselessLiteral",
|
| 168 |
+
"CharsNotIn",
|
| 169 |
+
"CloseMatch",
|
| 170 |
+
"Combine",
|
| 171 |
+
"DelimitedList",
|
| 172 |
+
"Dict",
|
| 173 |
+
"Each",
|
| 174 |
+
"Empty",
|
| 175 |
+
"FollowedBy",
|
| 176 |
+
"Forward",
|
| 177 |
+
"GoToColumn",
|
| 178 |
+
"Group",
|
| 179 |
+
"IndentedBlock",
|
| 180 |
+
"Keyword",
|
| 181 |
+
"LineEnd",
|
| 182 |
+
"LineStart",
|
| 183 |
+
"Literal",
|
| 184 |
+
"Located",
|
| 185 |
+
"PrecededBy",
|
| 186 |
+
"MatchFirst",
|
| 187 |
+
"NoMatch",
|
| 188 |
+
"NotAny",
|
| 189 |
+
"OneOrMore",
|
| 190 |
+
"OnlyOnce",
|
| 191 |
+
"OpAssoc",
|
| 192 |
+
"Opt",
|
| 193 |
+
"Optional",
|
| 194 |
+
"Or",
|
| 195 |
+
"ParseBaseException",
|
| 196 |
+
"ParseElementEnhance",
|
| 197 |
+
"ParseException",
|
| 198 |
+
"ParseExpression",
|
| 199 |
+
"ParseFatalException",
|
| 200 |
+
"ParseResults",
|
| 201 |
+
"ParseSyntaxException",
|
| 202 |
+
"ParserElement",
|
| 203 |
+
"PositionToken",
|
| 204 |
+
"QuotedString",
|
| 205 |
+
"RecursiveGrammarException",
|
| 206 |
+
"Regex",
|
| 207 |
+
"SkipTo",
|
| 208 |
+
"StringEnd",
|
| 209 |
+
"StringStart",
|
| 210 |
+
"Suppress",
|
| 211 |
+
"Tag",
|
| 212 |
+
"Token",
|
| 213 |
+
"TokenConverter",
|
| 214 |
+
"White",
|
| 215 |
+
"Word",
|
| 216 |
+
"WordEnd",
|
| 217 |
+
"WordStart",
|
| 218 |
+
"ZeroOrMore",
|
| 219 |
+
"Char",
|
| 220 |
+
"alphanums",
|
| 221 |
+
"alphas",
|
| 222 |
+
"alphas8bit",
|
| 223 |
+
"any_close_tag",
|
| 224 |
+
"any_open_tag",
|
| 225 |
+
"autoname_elements",
|
| 226 |
+
"c_style_comment",
|
| 227 |
+
"col",
|
| 228 |
+
"common_html_entity",
|
| 229 |
+
"condition_as_parse_action",
|
| 230 |
+
"counted_array",
|
| 231 |
+
"cpp_style_comment",
|
| 232 |
+
"dbl_quoted_string",
|
| 233 |
+
"dbl_slash_comment",
|
| 234 |
+
"delimited_list",
|
| 235 |
+
"dict_of",
|
| 236 |
+
"empty",
|
| 237 |
+
"hexnums",
|
| 238 |
+
"html_comment",
|
| 239 |
+
"identchars",
|
| 240 |
+
"identbodychars",
|
| 241 |
+
"infix_notation",
|
| 242 |
+
"java_style_comment",
|
| 243 |
+
"line",
|
| 244 |
+
"line_end",
|
| 245 |
+
"line_start",
|
| 246 |
+
"lineno",
|
| 247 |
+
"make_html_tags",
|
| 248 |
+
"make_xml_tags",
|
| 249 |
+
"match_only_at_col",
|
| 250 |
+
"match_previous_expr",
|
| 251 |
+
"match_previous_literal",
|
| 252 |
+
"nested_expr",
|
| 253 |
+
"null_debug_action",
|
| 254 |
+
"nums",
|
| 255 |
+
"one_of",
|
| 256 |
+
"original_text_for",
|
| 257 |
+
"printables",
|
| 258 |
+
"punc8bit",
|
| 259 |
+
"pyparsing_common",
|
| 260 |
+
"pyparsing_test",
|
| 261 |
+
"pyparsing_unicode",
|
| 262 |
+
"python_style_comment",
|
| 263 |
+
"quoted_string",
|
| 264 |
+
"remove_quotes",
|
| 265 |
+
"replace_with",
|
| 266 |
+
"replace_html_entity",
|
| 267 |
+
"rest_of_line",
|
| 268 |
+
"sgl_quoted_string",
|
| 269 |
+
"srange",
|
| 270 |
+
"string_end",
|
| 271 |
+
"string_start",
|
| 272 |
+
"token_map",
|
| 273 |
+
"trace_parse_action",
|
| 274 |
+
"ungroup",
|
| 275 |
+
"unicode_set",
|
| 276 |
+
"unicode_string",
|
| 277 |
+
"with_attribute",
|
| 278 |
+
"with_class",
|
| 279 |
+
# pre-PEP8 compatibility names
|
| 280 |
+
"__versionTime__",
|
| 281 |
+
"anyCloseTag",
|
| 282 |
+
"anyOpenTag",
|
| 283 |
+
"cStyleComment",
|
| 284 |
+
"commonHTMLEntity",
|
| 285 |
+
"conditionAsParseAction",
|
| 286 |
+
"countedArray",
|
| 287 |
+
"cppStyleComment",
|
| 288 |
+
"dblQuotedString",
|
| 289 |
+
"dblSlashComment",
|
| 290 |
+
"delimitedList",
|
| 291 |
+
"dictOf",
|
| 292 |
+
"htmlComment",
|
| 293 |
+
"indentedBlock",
|
| 294 |
+
"infixNotation",
|
| 295 |
+
"javaStyleComment",
|
| 296 |
+
"lineEnd",
|
| 297 |
+
"lineStart",
|
| 298 |
+
"locatedExpr",
|
| 299 |
+
"makeHTMLTags",
|
| 300 |
+
"makeXMLTags",
|
| 301 |
+
"matchOnlyAtCol",
|
| 302 |
+
"matchPreviousExpr",
|
| 303 |
+
"matchPreviousLiteral",
|
| 304 |
+
"nestedExpr",
|
| 305 |
+
"nullDebugAction",
|
| 306 |
+
"oneOf",
|
| 307 |
+
"opAssoc",
|
| 308 |
+
"originalTextFor",
|
| 309 |
+
"pythonStyleComment",
|
| 310 |
+
"quotedString",
|
| 311 |
+
"removeQuotes",
|
| 312 |
+
"replaceHTMLEntity",
|
| 313 |
+
"replaceWith",
|
| 314 |
+
"restOfLine",
|
| 315 |
+
"sglQuotedString",
|
| 316 |
+
"stringEnd",
|
| 317 |
+
"stringStart",
|
| 318 |
+
"tokenMap",
|
| 319 |
+
"traceParseAction",
|
| 320 |
+
"unicodeString",
|
| 321 |
+
"withAttribute",
|
| 322 |
+
"withClass",
|
| 323 |
+
"common",
|
| 324 |
+
"unicode",
|
| 325 |
+
"testing",
|
| 326 |
+
]
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (7.09 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/actions.cpython-310.pyc
ADDED
|
Binary file (7.21 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/helpers.cpython-310.pyc
ADDED
|
Binary file (34.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/results.cpython-310.pyc
ADDED
|
Binary file (26.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/__pycache__/unicode.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/actions.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# actions.py
|
| 2 |
+
|
| 3 |
+
from .exceptions import ParseException
|
| 4 |
+
from .util import col, replaced_by_pep8
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class OnlyOnce:
|
| 8 |
+
"""
|
| 9 |
+
Wrapper for parse actions, to ensure they are only called once.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, method_call):
|
| 13 |
+
from .core import _trim_arity
|
| 14 |
+
|
| 15 |
+
self.callable = _trim_arity(method_call)
|
| 16 |
+
self.called = False
|
| 17 |
+
|
| 18 |
+
def __call__(self, s, l, t):
|
| 19 |
+
if not self.called:
|
| 20 |
+
results = self.callable(s, l, t)
|
| 21 |
+
self.called = True
|
| 22 |
+
return results
|
| 23 |
+
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
|
| 24 |
+
|
| 25 |
+
def reset(self):
|
| 26 |
+
"""
|
| 27 |
+
Allow the associated parse action to be called once more.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
self.called = False
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def match_only_at_col(n):
|
| 34 |
+
"""
|
| 35 |
+
Helper method for defining parse actions that require matching at
|
| 36 |
+
a specific column in the input text.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def verify_col(strg, locn, toks):
|
| 40 |
+
if col(locn, strg) != n:
|
| 41 |
+
raise ParseException(strg, locn, f"matched token not at column {n}")
|
| 42 |
+
|
| 43 |
+
return verify_col
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def replace_with(repl_str):
|
| 47 |
+
"""
|
| 48 |
+
Helper method for common parse actions that simply return
|
| 49 |
+
a literal value. Especially useful when used with
|
| 50 |
+
:class:`transform_string<ParserElement.transform_string>` ().
|
| 51 |
+
|
| 52 |
+
Example::
|
| 53 |
+
|
| 54 |
+
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
|
| 55 |
+
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
|
| 56 |
+
term = na | num
|
| 57 |
+
|
| 58 |
+
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
|
| 59 |
+
"""
|
| 60 |
+
return lambda s, l, t: [repl_str]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def remove_quotes(s, l, t):
|
| 64 |
+
"""
|
| 65 |
+
Helper parse action for removing quotation marks from parsed
|
| 66 |
+
quoted strings.
|
| 67 |
+
|
| 68 |
+
Example::
|
| 69 |
+
|
| 70 |
+
# by default, quotation marks are included in parsed results
|
| 71 |
+
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
|
| 72 |
+
|
| 73 |
+
# use remove_quotes to strip quotation marks from parsed results
|
| 74 |
+
quoted_string.set_parse_action(remove_quotes)
|
| 75 |
+
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
|
| 76 |
+
"""
|
| 77 |
+
return t[0][1:-1]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def with_attribute(*args, **attr_dict):
|
| 81 |
+
"""
|
| 82 |
+
Helper to create a validating parse action to be used with start
|
| 83 |
+
tags created with :class:`make_xml_tags` or
|
| 84 |
+
:class:`make_html_tags`. Use ``with_attribute`` to qualify
|
| 85 |
+
a starting tag with a required attribute value, to avoid false
|
| 86 |
+
matches on common tags such as ``<TD>`` or ``<DIV>``.
|
| 87 |
+
|
| 88 |
+
Call ``with_attribute`` with a series of attribute names and
|
| 89 |
+
values. Specify the list of filter attributes names and values as:
|
| 90 |
+
|
| 91 |
+
- keyword arguments, as in ``(align="right")``, or
|
| 92 |
+
- as an explicit dict with ``**`` operator, when an attribute
|
| 93 |
+
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
|
| 94 |
+
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
|
| 95 |
+
|
| 96 |
+
For attribute names with a namespace prefix, you must use the second
|
| 97 |
+
form. Attribute names are matched insensitive to upper/lower case.
|
| 98 |
+
|
| 99 |
+
If just testing for ``class`` (with or without a namespace), use
|
| 100 |
+
:class:`with_class`.
|
| 101 |
+
|
| 102 |
+
To verify that the attribute exists, but without specifying a value,
|
| 103 |
+
pass ``with_attribute.ANY_VALUE`` as the value.
|
| 104 |
+
|
| 105 |
+
Example::
|
| 106 |
+
|
| 107 |
+
html = '''
|
| 108 |
+
<div>
|
| 109 |
+
Some text
|
| 110 |
+
<div type="grid">1 4 0 1 0</div>
|
| 111 |
+
<div type="graph">1,3 2,3 1,1</div>
|
| 112 |
+
<div>this has no type</div>
|
| 113 |
+
</div>
|
| 114 |
+
'''
|
| 115 |
+
div,div_end = make_html_tags("div")
|
| 116 |
+
|
| 117 |
+
# only match div tag having a type attribute with value "grid"
|
| 118 |
+
div_grid = div().set_parse_action(with_attribute(type="grid"))
|
| 119 |
+
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
| 120 |
+
for grid_header in grid_expr.search_string(html):
|
| 121 |
+
print(grid_header.body)
|
| 122 |
+
|
| 123 |
+
# construct a match with any div tag having a type attribute, regardless of the value
|
| 124 |
+
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
|
| 125 |
+
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
| 126 |
+
for div_header in div_expr.search_string(html):
|
| 127 |
+
print(div_header.body)
|
| 128 |
+
|
| 129 |
+
prints::
|
| 130 |
+
|
| 131 |
+
1 4 0 1 0
|
| 132 |
+
|
| 133 |
+
1 4 0 1 0
|
| 134 |
+
1,3 2,3 1,1
|
| 135 |
+
"""
|
| 136 |
+
if args:
|
| 137 |
+
attrs = args[:]
|
| 138 |
+
else:
|
| 139 |
+
attrs = attr_dict.items()
|
| 140 |
+
attrs = [(k, v) for k, v in attrs]
|
| 141 |
+
|
| 142 |
+
def pa(s, l, tokens):
|
| 143 |
+
for attrName, attrValue in attrs:
|
| 144 |
+
if attrName not in tokens:
|
| 145 |
+
raise ParseException(s, l, "no matching attribute " + attrName)
|
| 146 |
+
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
|
| 147 |
+
raise ParseException(
|
| 148 |
+
s,
|
| 149 |
+
l,
|
| 150 |
+
f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}",
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
return pa
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
with_attribute.ANY_VALUE = object() # type: ignore [attr-defined]
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def with_class(classname, namespace=""):
|
| 160 |
+
"""
|
| 161 |
+
Simplified version of :class:`with_attribute` when
|
| 162 |
+
matching on a div class - made difficult because ``class`` is
|
| 163 |
+
a reserved word in Python.
|
| 164 |
+
|
| 165 |
+
Example::
|
| 166 |
+
|
| 167 |
+
html = '''
|
| 168 |
+
<div>
|
| 169 |
+
Some text
|
| 170 |
+
<div class="grid">1 4 0 1 0</div>
|
| 171 |
+
<div class="graph">1,3 2,3 1,1</div>
|
| 172 |
+
<div>this <div> has no class</div>
|
| 173 |
+
</div>
|
| 174 |
+
|
| 175 |
+
'''
|
| 176 |
+
div,div_end = make_html_tags("div")
|
| 177 |
+
div_grid = div().set_parse_action(with_class("grid"))
|
| 178 |
+
|
| 179 |
+
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
| 180 |
+
for grid_header in grid_expr.search_string(html):
|
| 181 |
+
print(grid_header.body)
|
| 182 |
+
|
| 183 |
+
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
|
| 184 |
+
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
| 185 |
+
for div_header in div_expr.search_string(html):
|
| 186 |
+
print(div_header.body)
|
| 187 |
+
|
| 188 |
+
prints::
|
| 189 |
+
|
| 190 |
+
1 4 0 1 0
|
| 191 |
+
|
| 192 |
+
1 4 0 1 0
|
| 193 |
+
1,3 2,3 1,1
|
| 194 |
+
"""
|
| 195 |
+
classattr = f"{namespace}:class" if namespace else "class"
|
| 196 |
+
return with_attribute(**{classattr: classname})
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# Compatibility synonyms
|
| 200 |
+
# fmt: off
|
| 201 |
+
replaceWith = replaced_by_pep8("replaceWith", replace_with)
|
| 202 |
+
removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes)
|
| 203 |
+
withAttribute = replaced_by_pep8("withAttribute", with_attribute)
|
| 204 |
+
withClass = replaced_by_pep8("withClass", with_class)
|
| 205 |
+
matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col)
|
| 206 |
+
# fmt: on
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/common.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# common.py
|
| 2 |
+
from .core import *
|
| 3 |
+
from .helpers import DelimitedList, any_open_tag, any_close_tag
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# some other useful expressions - using lower-case class name since we are really using this as a namespace
|
| 8 |
+
class pyparsing_common:
|
| 9 |
+
"""Here are some common low-level expressions that may be useful in
|
| 10 |
+
jump-starting parser development:
|
| 11 |
+
|
| 12 |
+
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
|
| 13 |
+
:class:`scientific notation<sci_real>`)
|
| 14 |
+
- common :class:`programming identifiers<identifier>`
|
| 15 |
+
- network addresses (:class:`MAC<mac_address>`,
|
| 16 |
+
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
|
| 17 |
+
- ISO8601 :class:`dates<iso8601_date>` and
|
| 18 |
+
:class:`datetime<iso8601_datetime>`
|
| 19 |
+
- :class:`UUID<uuid>`
|
| 20 |
+
- :class:`comma-separated list<comma_separated_list>`
|
| 21 |
+
- :class:`url`
|
| 22 |
+
|
| 23 |
+
Parse actions:
|
| 24 |
+
|
| 25 |
+
- :class:`convert_to_integer`
|
| 26 |
+
- :class:`convert_to_float`
|
| 27 |
+
- :class:`convert_to_date`
|
| 28 |
+
- :class:`convert_to_datetime`
|
| 29 |
+
- :class:`strip_html_tags`
|
| 30 |
+
- :class:`upcase_tokens`
|
| 31 |
+
- :class:`downcase_tokens`
|
| 32 |
+
|
| 33 |
+
Example::
|
| 34 |
+
|
| 35 |
+
pyparsing_common.number.run_tests('''
|
| 36 |
+
# any int or real number, returned as the appropriate type
|
| 37 |
+
100
|
| 38 |
+
-100
|
| 39 |
+
+100
|
| 40 |
+
3.14159
|
| 41 |
+
6.02e23
|
| 42 |
+
1e-12
|
| 43 |
+
''')
|
| 44 |
+
|
| 45 |
+
pyparsing_common.fnumber.run_tests('''
|
| 46 |
+
# any int or real number, returned as float
|
| 47 |
+
100
|
| 48 |
+
-100
|
| 49 |
+
+100
|
| 50 |
+
3.14159
|
| 51 |
+
6.02e23
|
| 52 |
+
1e-12
|
| 53 |
+
''')
|
| 54 |
+
|
| 55 |
+
pyparsing_common.hex_integer.run_tests('''
|
| 56 |
+
# hex numbers
|
| 57 |
+
100
|
| 58 |
+
FF
|
| 59 |
+
''')
|
| 60 |
+
|
| 61 |
+
pyparsing_common.fraction.run_tests('''
|
| 62 |
+
# fractions
|
| 63 |
+
1/2
|
| 64 |
+
-3/4
|
| 65 |
+
''')
|
| 66 |
+
|
| 67 |
+
pyparsing_common.mixed_integer.run_tests('''
|
| 68 |
+
# mixed fractions
|
| 69 |
+
1
|
| 70 |
+
1/2
|
| 71 |
+
-3/4
|
| 72 |
+
1-3/4
|
| 73 |
+
''')
|
| 74 |
+
|
| 75 |
+
import uuid
|
| 76 |
+
pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID))
|
| 77 |
+
pyparsing_common.uuid.run_tests('''
|
| 78 |
+
# uuid
|
| 79 |
+
12345678-1234-5678-1234-567812345678
|
| 80 |
+
''')
|
| 81 |
+
|
| 82 |
+
prints::
|
| 83 |
+
|
| 84 |
+
# any int or real number, returned as the appropriate type
|
| 85 |
+
100
|
| 86 |
+
[100]
|
| 87 |
+
|
| 88 |
+
-100
|
| 89 |
+
[-100]
|
| 90 |
+
|
| 91 |
+
+100
|
| 92 |
+
[100]
|
| 93 |
+
|
| 94 |
+
3.14159
|
| 95 |
+
[3.14159]
|
| 96 |
+
|
| 97 |
+
6.02e23
|
| 98 |
+
[6.02e+23]
|
| 99 |
+
|
| 100 |
+
1e-12
|
| 101 |
+
[1e-12]
|
| 102 |
+
|
| 103 |
+
# any int or real number, returned as float
|
| 104 |
+
100
|
| 105 |
+
[100.0]
|
| 106 |
+
|
| 107 |
+
-100
|
| 108 |
+
[-100.0]
|
| 109 |
+
|
| 110 |
+
+100
|
| 111 |
+
[100.0]
|
| 112 |
+
|
| 113 |
+
3.14159
|
| 114 |
+
[3.14159]
|
| 115 |
+
|
| 116 |
+
6.02e23
|
| 117 |
+
[6.02e+23]
|
| 118 |
+
|
| 119 |
+
1e-12
|
| 120 |
+
[1e-12]
|
| 121 |
+
|
| 122 |
+
# hex numbers
|
| 123 |
+
100
|
| 124 |
+
[256]
|
| 125 |
+
|
| 126 |
+
FF
|
| 127 |
+
[255]
|
| 128 |
+
|
| 129 |
+
# fractions
|
| 130 |
+
1/2
|
| 131 |
+
[0.5]
|
| 132 |
+
|
| 133 |
+
-3/4
|
| 134 |
+
[-0.75]
|
| 135 |
+
|
| 136 |
+
# mixed fractions
|
| 137 |
+
1
|
| 138 |
+
[1]
|
| 139 |
+
|
| 140 |
+
1/2
|
| 141 |
+
[0.5]
|
| 142 |
+
|
| 143 |
+
-3/4
|
| 144 |
+
[-0.75]
|
| 145 |
+
|
| 146 |
+
1-3/4
|
| 147 |
+
[1.75]
|
| 148 |
+
|
| 149 |
+
# uuid
|
| 150 |
+
12345678-1234-5678-1234-567812345678
|
| 151 |
+
[UUID('12345678-1234-5678-1234-567812345678')]
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
convert_to_integer = token_map(int)
|
| 155 |
+
"""
|
| 156 |
+
Parse action for converting parsed integers to Python int
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
convert_to_float = token_map(float)
|
| 160 |
+
"""
|
| 161 |
+
Parse action for converting parsed numbers to Python float
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
|
| 165 |
+
"""expression that parses an unsigned integer, returns an int"""
|
| 166 |
+
|
| 167 |
+
hex_integer = (
|
| 168 |
+
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
|
| 169 |
+
)
|
| 170 |
+
"""expression that parses a hexadecimal integer, returns an int"""
|
| 171 |
+
|
| 172 |
+
signed_integer = (
|
| 173 |
+
Regex(r"[+-]?\d+")
|
| 174 |
+
.set_name("signed integer")
|
| 175 |
+
.set_parse_action(convert_to_integer)
|
| 176 |
+
)
|
| 177 |
+
"""expression that parses an integer with optional leading sign, returns an int"""
|
| 178 |
+
|
| 179 |
+
fraction = (
|
| 180 |
+
signed_integer().set_parse_action(convert_to_float)
|
| 181 |
+
+ "/"
|
| 182 |
+
+ signed_integer().set_parse_action(convert_to_float)
|
| 183 |
+
).set_name("fraction")
|
| 184 |
+
"""fractional expression of an integer divided by an integer, returns a float"""
|
| 185 |
+
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
|
| 186 |
+
|
| 187 |
+
mixed_integer = (
|
| 188 |
+
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
|
| 189 |
+
).set_name("fraction or mixed integer-fraction")
|
| 190 |
+
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
|
| 191 |
+
mixed_integer.add_parse_action(sum)
|
| 192 |
+
|
| 193 |
+
real = (
|
| 194 |
+
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
|
| 195 |
+
.set_name("real number")
|
| 196 |
+
.set_parse_action(convert_to_float)
|
| 197 |
+
)
|
| 198 |
+
"""expression that parses a floating point number and returns a float"""
|
| 199 |
+
|
| 200 |
+
sci_real = (
|
| 201 |
+
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
|
| 202 |
+
.set_name("real number with scientific notation")
|
| 203 |
+
.set_parse_action(convert_to_float)
|
| 204 |
+
)
|
| 205 |
+
"""expression that parses a floating point number with optional
|
| 206 |
+
scientific notation and returns a float"""
|
| 207 |
+
|
| 208 |
+
# streamlining this expression makes the docs nicer-looking
|
| 209 |
+
number = (sci_real | real | signed_integer).set_name("number").streamline()
|
| 210 |
+
"""any numeric expression, returns the corresponding Python type"""
|
| 211 |
+
|
| 212 |
+
fnumber = (
|
| 213 |
+
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
|
| 214 |
+
.set_name("fnumber")
|
| 215 |
+
.set_parse_action(convert_to_float)
|
| 216 |
+
)
|
| 217 |
+
"""any int or real number, returned as float"""
|
| 218 |
+
|
| 219 |
+
ieee_float = (
|
| 220 |
+
Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)")
|
| 221 |
+
.set_name("ieee_float")
|
| 222 |
+
.set_parse_action(convert_to_float)
|
| 223 |
+
)
|
| 224 |
+
"""any floating-point literal (int, real number, infinity, or NaN), returned as float"""
|
| 225 |
+
|
| 226 |
+
identifier = Word(identchars, identbodychars).set_name("identifier")
|
| 227 |
+
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
|
| 228 |
+
|
| 229 |
+
ipv4_address = Regex(
|
| 230 |
+
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
|
| 231 |
+
).set_name("IPv4 address")
|
| 232 |
+
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
|
| 233 |
+
|
| 234 |
+
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
|
| 235 |
+
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
|
| 236 |
+
"full IPv6 address"
|
| 237 |
+
)
|
| 238 |
+
_short_ipv6_address = (
|
| 239 |
+
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
| 240 |
+
+ "::"
|
| 241 |
+
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
| 242 |
+
).set_name("short IPv6 address")
|
| 243 |
+
_short_ipv6_address.add_condition(
|
| 244 |
+
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
|
| 245 |
+
)
|
| 246 |
+
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
|
| 247 |
+
ipv6_address = Combine(
|
| 248 |
+
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
|
| 249 |
+
"IPv6 address"
|
| 250 |
+
)
|
| 251 |
+
).set_name("IPv6 address")
|
| 252 |
+
"IPv6 address (long, short, or mixed form)"
|
| 253 |
+
|
| 254 |
+
mac_address = Regex(
|
| 255 |
+
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
|
| 256 |
+
).set_name("MAC address")
|
| 257 |
+
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
|
| 258 |
+
|
| 259 |
+
@staticmethod
|
| 260 |
+
def convert_to_date(fmt: str = "%Y-%m-%d"):
|
| 261 |
+
"""
|
| 262 |
+
Helper to create a parse action for converting parsed date string to Python datetime.date
|
| 263 |
+
|
| 264 |
+
Params -
|
| 265 |
+
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
|
| 266 |
+
|
| 267 |
+
Example::
|
| 268 |
+
|
| 269 |
+
date_expr = pyparsing_common.iso8601_date.copy()
|
| 270 |
+
date_expr.set_parse_action(pyparsing_common.convert_to_date())
|
| 271 |
+
print(date_expr.parse_string("1999-12-31"))
|
| 272 |
+
|
| 273 |
+
prints::
|
| 274 |
+
|
| 275 |
+
[datetime.date(1999, 12, 31)]
|
| 276 |
+
"""
|
| 277 |
+
|
| 278 |
+
def cvt_fn(ss, ll, tt):
|
| 279 |
+
try:
|
| 280 |
+
return datetime.strptime(tt[0], fmt).date()
|
| 281 |
+
except ValueError as ve:
|
| 282 |
+
raise ParseException(ss, ll, str(ve))
|
| 283 |
+
|
| 284 |
+
return cvt_fn
|
| 285 |
+
|
| 286 |
+
@staticmethod
|
| 287 |
+
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
|
| 288 |
+
"""Helper to create a parse action for converting parsed
|
| 289 |
+
datetime string to Python datetime.datetime
|
| 290 |
+
|
| 291 |
+
Params -
|
| 292 |
+
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
|
| 293 |
+
|
| 294 |
+
Example::
|
| 295 |
+
|
| 296 |
+
dt_expr = pyparsing_common.iso8601_datetime.copy()
|
| 297 |
+
dt_expr.set_parse_action(pyparsing_common.convert_to_datetime())
|
| 298 |
+
print(dt_expr.parse_string("1999-12-31T23:59:59.999"))
|
| 299 |
+
|
| 300 |
+
prints::
|
| 301 |
+
|
| 302 |
+
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
|
| 303 |
+
"""
|
| 304 |
+
|
| 305 |
+
def cvt_fn(s, l, t):
|
| 306 |
+
try:
|
| 307 |
+
return datetime.strptime(t[0], fmt)
|
| 308 |
+
except ValueError as ve:
|
| 309 |
+
raise ParseException(s, l, str(ve))
|
| 310 |
+
|
| 311 |
+
return cvt_fn
|
| 312 |
+
|
| 313 |
+
iso8601_date = Regex(
|
| 314 |
+
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
|
| 315 |
+
).set_name("ISO8601 date")
|
| 316 |
+
"ISO8601 date (``yyyy-mm-dd``)"
|
| 317 |
+
|
| 318 |
+
iso8601_datetime = Regex(
|
| 319 |
+
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
|
| 320 |
+
).set_name("ISO8601 datetime")
|
| 321 |
+
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
|
| 322 |
+
|
| 323 |
+
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
|
| 324 |
+
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
|
| 325 |
+
|
| 326 |
+
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
|
| 327 |
+
|
| 328 |
+
@staticmethod
|
| 329 |
+
def strip_html_tags(s: str, l: int, tokens: ParseResults):
|
| 330 |
+
"""Parse action to remove HTML tags from web page HTML source
|
| 331 |
+
|
| 332 |
+
Example::
|
| 333 |
+
|
| 334 |
+
# strip HTML links from normal text
|
| 335 |
+
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
| 336 |
+
td, td_end = make_html_tags("TD")
|
| 337 |
+
table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end
|
| 338 |
+
print(table_text.parse_string(text).body)
|
| 339 |
+
|
| 340 |
+
Prints::
|
| 341 |
+
|
| 342 |
+
More info at the pyparsing wiki page
|
| 343 |
+
"""
|
| 344 |
+
return pyparsing_common._html_stripper.transform_string(tokens[0])
|
| 345 |
+
|
| 346 |
+
_commasepitem = (
|
| 347 |
+
Combine(
|
| 348 |
+
OneOrMore(
|
| 349 |
+
~Literal(",")
|
| 350 |
+
+ ~LineEnd()
|
| 351 |
+
+ Word(printables, exclude_chars=",")
|
| 352 |
+
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
|
| 353 |
+
)
|
| 354 |
+
)
|
| 355 |
+
.streamline()
|
| 356 |
+
.set_name("commaItem")
|
| 357 |
+
)
|
| 358 |
+
comma_separated_list = DelimitedList(
|
| 359 |
+
Opt(quoted_string.copy() | _commasepitem, default="")
|
| 360 |
+
).set_name("comma separated list")
|
| 361 |
+
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
|
| 362 |
+
|
| 363 |
+
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
|
| 364 |
+
"""Parse action to convert tokens to upper case."""
|
| 365 |
+
|
| 366 |
+
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
|
| 367 |
+
"""Parse action to convert tokens to lower case."""
|
| 368 |
+
|
| 369 |
+
# fmt: off
|
| 370 |
+
url = Regex(
|
| 371 |
+
# https://mathiasbynens.be/demo/url-regex
|
| 372 |
+
# https://gist.github.com/dperini/729294
|
| 373 |
+
r"(?P<url>" +
|
| 374 |
+
# protocol identifier (optional)
|
| 375 |
+
# short syntax // still required
|
| 376 |
+
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
|
| 377 |
+
# user:pass BasicAuth (optional)
|
| 378 |
+
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
|
| 379 |
+
r"(?P<host>" +
|
| 380 |
+
# IP address exclusion
|
| 381 |
+
# private & local networks
|
| 382 |
+
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
|
| 383 |
+
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
|
| 384 |
+
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
|
| 385 |
+
# IP address dotted notation octets
|
| 386 |
+
# excludes loopback network 0.0.0.0
|
| 387 |
+
# excludes reserved space >= 224.0.0.0
|
| 388 |
+
# excludes network & broadcast addresses
|
| 389 |
+
# (first & last IP address of each class)
|
| 390 |
+
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
|
| 391 |
+
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
|
| 392 |
+
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
|
| 393 |
+
r"|" +
|
| 394 |
+
# host & domain names, may end with dot
|
| 395 |
+
# can be replaced by a shortest alternative
|
| 396 |
+
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
|
| 397 |
+
r"(?:" +
|
| 398 |
+
r"(?:" +
|
| 399 |
+
r"[a-z0-9\u00a1-\uffff]" +
|
| 400 |
+
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
|
| 401 |
+
r")?" +
|
| 402 |
+
r"[a-z0-9\u00a1-\uffff]\." +
|
| 403 |
+
r")+" +
|
| 404 |
+
# TLD identifier name, may end with dot
|
| 405 |
+
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
|
| 406 |
+
r")" +
|
| 407 |
+
# port number (optional)
|
| 408 |
+
r"(:(?P<port>\d{2,5}))?" +
|
| 409 |
+
# resource path (optional)
|
| 410 |
+
r"(?P<path>\/[^?# ]*)?" +
|
| 411 |
+
# query string (optional)
|
| 412 |
+
r"(\?(?P<query>[^#]*))?" +
|
| 413 |
+
# fragment (optional)
|
| 414 |
+
r"(#(?P<fragment>\S*))?" +
|
| 415 |
+
r")"
|
| 416 |
+
).set_name("url")
|
| 417 |
+
"""URL (http/https/ftp scheme)"""
|
| 418 |
+
# fmt: on
|
| 419 |
+
|
| 420 |
+
# pre-PEP8 compatibility names
|
| 421 |
+
# fmt: off
|
| 422 |
+
convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer))
|
| 423 |
+
convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float))
|
| 424 |
+
convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date))
|
| 425 |
+
convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime))
|
| 426 |
+
stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags))
|
| 427 |
+
upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens))
|
| 428 |
+
downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens))
|
| 429 |
+
# fmt: on
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
_builtin_exprs = [
|
| 433 |
+
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
|
| 434 |
+
]
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/exceptions.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# exceptions.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import copy
|
| 5 |
+
import re
|
| 6 |
+
import sys
|
| 7 |
+
import typing
|
| 8 |
+
from functools import cached_property
|
| 9 |
+
|
| 10 |
+
from .unicode import pyparsing_unicode as ppu
|
| 11 |
+
from .util import (
|
| 12 |
+
_collapse_string_to_ranges,
|
| 13 |
+
col,
|
| 14 |
+
line,
|
| 15 |
+
lineno,
|
| 16 |
+
replaced_by_pep8,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class _ExceptionWordUnicodeSet(
|
| 21 |
+
ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic
|
| 22 |
+
):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums)
|
| 27 |
+
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ParseBaseException(Exception):
|
| 31 |
+
"""base exception class for all parsing runtime exceptions"""
|
| 32 |
+
|
| 33 |
+
loc: int
|
| 34 |
+
msg: str
|
| 35 |
+
pstr: str
|
| 36 |
+
parser_element: typing.Any # "ParserElement"
|
| 37 |
+
args: tuple[str, int, typing.Optional[str]]
|
| 38 |
+
|
| 39 |
+
__slots__ = (
|
| 40 |
+
"loc",
|
| 41 |
+
"msg",
|
| 42 |
+
"pstr",
|
| 43 |
+
"parser_element",
|
| 44 |
+
"args",
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# Performance tuning: we construct a *lot* of these, so keep this
|
| 48 |
+
# constructor as small and fast as possible
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
pstr: str,
|
| 52 |
+
loc: int = 0,
|
| 53 |
+
msg: typing.Optional[str] = None,
|
| 54 |
+
elem=None,
|
| 55 |
+
):
|
| 56 |
+
if msg is None:
|
| 57 |
+
msg, pstr = pstr, ""
|
| 58 |
+
|
| 59 |
+
self.loc = loc
|
| 60 |
+
self.msg = msg
|
| 61 |
+
self.pstr = pstr
|
| 62 |
+
self.parser_element = elem
|
| 63 |
+
self.args = (pstr, loc, msg)
|
| 64 |
+
|
| 65 |
+
@staticmethod
|
| 66 |
+
def explain_exception(exc: Exception, depth: int = 16) -> str:
|
| 67 |
+
"""
|
| 68 |
+
Method to take an exception and translate the Python internal traceback into a list
|
| 69 |
+
of the pyparsing expressions that caused the exception to be raised.
|
| 70 |
+
|
| 71 |
+
Parameters:
|
| 72 |
+
|
| 73 |
+
- exc - exception raised during parsing (need not be a ParseException, in support
|
| 74 |
+
of Python exceptions that might be raised in a parse action)
|
| 75 |
+
- depth (default=16) - number of levels back in the stack trace to list expression
|
| 76 |
+
and function names; if None, the full stack trace names will be listed; if 0, only
|
| 77 |
+
the failing input line, marker, and exception string will be shown
|
| 78 |
+
|
| 79 |
+
Returns a multi-line string listing the ParserElements and/or function names in the
|
| 80 |
+
exception's stack trace.
|
| 81 |
+
"""
|
| 82 |
+
import inspect
|
| 83 |
+
from .core import ParserElement
|
| 84 |
+
|
| 85 |
+
if depth is None:
|
| 86 |
+
depth = sys.getrecursionlimit()
|
| 87 |
+
ret: list[str] = []
|
| 88 |
+
if isinstance(exc, ParseBaseException):
|
| 89 |
+
ret.append(exc.line)
|
| 90 |
+
ret.append(f"{' ' * (exc.column - 1)}^")
|
| 91 |
+
ret.append(f"{type(exc).__name__}: {exc}")
|
| 92 |
+
|
| 93 |
+
if depth <= 0 or exc.__traceback__ is None:
|
| 94 |
+
return "\n".join(ret)
|
| 95 |
+
|
| 96 |
+
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
|
| 97 |
+
seen: set[int] = set()
|
| 98 |
+
for ff in callers[-depth:]:
|
| 99 |
+
frm = ff[0]
|
| 100 |
+
|
| 101 |
+
f_self = frm.f_locals.get("self", None)
|
| 102 |
+
if isinstance(f_self, ParserElement):
|
| 103 |
+
if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")):
|
| 104 |
+
continue
|
| 105 |
+
if id(f_self) in seen:
|
| 106 |
+
continue
|
| 107 |
+
seen.add(id(f_self))
|
| 108 |
+
|
| 109 |
+
self_type = type(f_self)
|
| 110 |
+
ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}")
|
| 111 |
+
|
| 112 |
+
elif f_self is not None:
|
| 113 |
+
self_type = type(f_self)
|
| 114 |
+
ret.append(f"{self_type.__module__}.{self_type.__name__}")
|
| 115 |
+
|
| 116 |
+
else:
|
| 117 |
+
code = frm.f_code
|
| 118 |
+
if code.co_name in ("wrapper", "<module>"):
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
ret.append(code.co_name)
|
| 122 |
+
|
| 123 |
+
depth -= 1
|
| 124 |
+
if not depth:
|
| 125 |
+
break
|
| 126 |
+
|
| 127 |
+
return "\n".join(ret)
|
| 128 |
+
|
| 129 |
+
@classmethod
|
| 130 |
+
def _from_exception(cls, pe) -> ParseBaseException:
|
| 131 |
+
"""
|
| 132 |
+
internal factory method to simplify creating one type of ParseException
|
| 133 |
+
from another - avoids having __init__ signature conflicts among subclasses
|
| 134 |
+
"""
|
| 135 |
+
return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element)
|
| 136 |
+
|
| 137 |
+
@cached_property
|
| 138 |
+
def line(self) -> str:
|
| 139 |
+
"""
|
| 140 |
+
Return the line of text where the exception occurred.
|
| 141 |
+
"""
|
| 142 |
+
return line(self.loc, self.pstr)
|
| 143 |
+
|
| 144 |
+
@cached_property
|
| 145 |
+
def lineno(self) -> int:
|
| 146 |
+
"""
|
| 147 |
+
Return the 1-based line number of text where the exception occurred.
|
| 148 |
+
"""
|
| 149 |
+
return lineno(self.loc, self.pstr)
|
| 150 |
+
|
| 151 |
+
@cached_property
|
| 152 |
+
def col(self) -> int:
|
| 153 |
+
"""
|
| 154 |
+
Return the 1-based column on the line of text where the exception occurred.
|
| 155 |
+
"""
|
| 156 |
+
return col(self.loc, self.pstr)
|
| 157 |
+
|
| 158 |
+
@cached_property
|
| 159 |
+
def column(self) -> int:
|
| 160 |
+
"""
|
| 161 |
+
Return the 1-based column on the line of text where the exception occurred.
|
| 162 |
+
"""
|
| 163 |
+
return col(self.loc, self.pstr)
|
| 164 |
+
|
| 165 |
+
@cached_property
|
| 166 |
+
def found(self) -> str:
|
| 167 |
+
if not self.pstr:
|
| 168 |
+
return ""
|
| 169 |
+
|
| 170 |
+
if self.loc >= len(self.pstr):
|
| 171 |
+
return "end of text"
|
| 172 |
+
|
| 173 |
+
# pull out next word at error location
|
| 174 |
+
found_match = _exception_word_extractor.match(self.pstr, self.loc)
|
| 175 |
+
if found_match is not None:
|
| 176 |
+
found_text = found_match.group(0)
|
| 177 |
+
else:
|
| 178 |
+
found_text = self.pstr[self.loc : self.loc + 1]
|
| 179 |
+
|
| 180 |
+
return repr(found_text).replace(r"\\", "\\")
|
| 181 |
+
|
| 182 |
+
# pre-PEP8 compatibility
|
| 183 |
+
@property
|
| 184 |
+
def parserElement(self):
|
| 185 |
+
return self.parser_element
|
| 186 |
+
|
| 187 |
+
@parserElement.setter
|
| 188 |
+
def parserElement(self, elem):
|
| 189 |
+
self.parser_element = elem
|
| 190 |
+
|
| 191 |
+
def copy(self):
|
| 192 |
+
return copy.copy(self)
|
| 193 |
+
|
| 194 |
+
def formatted_message(self) -> str:
|
| 195 |
+
found_phrase = f", found {self.found}" if self.found else ""
|
| 196 |
+
return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})"
|
| 197 |
+
|
| 198 |
+
def __str__(self) -> str:
|
| 199 |
+
return self.formatted_message()
|
| 200 |
+
|
| 201 |
+
def __repr__(self):
|
| 202 |
+
return str(self)
|
| 203 |
+
|
| 204 |
+
def mark_input_line(
|
| 205 |
+
self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<"
|
| 206 |
+
) -> str:
|
| 207 |
+
"""
|
| 208 |
+
Extracts the exception line from the input string, and marks
|
| 209 |
+
the location of the exception with a special symbol.
|
| 210 |
+
"""
|
| 211 |
+
markerString = marker_string if marker_string is not None else markerString
|
| 212 |
+
line_str = self.line
|
| 213 |
+
line_column = self.column - 1
|
| 214 |
+
if markerString:
|
| 215 |
+
line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}"
|
| 216 |
+
return line_str.strip()
|
| 217 |
+
|
| 218 |
+
def explain(self, depth: int = 16) -> str:
|
| 219 |
+
"""
|
| 220 |
+
Method to translate the Python internal traceback into a list
|
| 221 |
+
of the pyparsing expressions that caused the exception to be raised.
|
| 222 |
+
|
| 223 |
+
Parameters:
|
| 224 |
+
|
| 225 |
+
- depth (default=16) - number of levels back in the stack trace to list expression
|
| 226 |
+
and function names; if None, the full stack trace names will be listed; if 0, only
|
| 227 |
+
the failing input line, marker, and exception string will be shown
|
| 228 |
+
|
| 229 |
+
Returns a multi-line string listing the ParserElements and/or function names in the
|
| 230 |
+
exception's stack trace.
|
| 231 |
+
|
| 232 |
+
Example::
|
| 233 |
+
|
| 234 |
+
# an expression to parse 3 integers
|
| 235 |
+
expr = pp.Word(pp.nums) * 3
|
| 236 |
+
try:
|
| 237 |
+
# a failing parse - the third integer is prefixed with "A"
|
| 238 |
+
expr.parse_string("123 456 A789")
|
| 239 |
+
except pp.ParseException as pe:
|
| 240 |
+
print(pe.explain(depth=0))
|
| 241 |
+
|
| 242 |
+
prints::
|
| 243 |
+
|
| 244 |
+
123 456 A789
|
| 245 |
+
^
|
| 246 |
+
ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
|
| 247 |
+
|
| 248 |
+
Note: the diagnostic output will include string representations of the expressions
|
| 249 |
+
that failed to parse. These representations will be more helpful if you use `set_name` to
|
| 250 |
+
give identifiable names to your expressions. Otherwise they will use the default string
|
| 251 |
+
forms, which may be cryptic to read.
|
| 252 |
+
|
| 253 |
+
Note: pyparsing's default truncation of exception tracebacks may also truncate the
|
| 254 |
+
stack of expressions that are displayed in the ``explain`` output. To get the full listing
|
| 255 |
+
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
|
| 256 |
+
"""
|
| 257 |
+
return self.explain_exception(self, depth)
|
| 258 |
+
|
| 259 |
+
# Compatibility synonyms
|
| 260 |
+
# fmt: off
|
| 261 |
+
markInputline = replaced_by_pep8("markInputline", mark_input_line)
|
| 262 |
+
# fmt: on
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class ParseException(ParseBaseException):
|
| 266 |
+
"""
|
| 267 |
+
Exception thrown when a parse expression doesn't match the input string
|
| 268 |
+
|
| 269 |
+
Example::
|
| 270 |
+
|
| 271 |
+
integer = Word(nums).set_name("integer")
|
| 272 |
+
try:
|
| 273 |
+
integer.parse_string("ABC")
|
| 274 |
+
except ParseException as pe:
|
| 275 |
+
print(pe)
|
| 276 |
+
print(f"column: {pe.column}")
|
| 277 |
+
|
| 278 |
+
prints::
|
| 279 |
+
|
| 280 |
+
Expected integer (at char 0), (line:1, col:1) column: 1
|
| 281 |
+
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class ParseFatalException(ParseBaseException):
|
| 286 |
+
"""
|
| 287 |
+
User-throwable exception thrown when inconsistent parse content
|
| 288 |
+
is found; stops all parsing immediately
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class ParseSyntaxException(ParseFatalException):
|
| 293 |
+
"""
|
| 294 |
+
Just like :class:`ParseFatalException`, but thrown internally
|
| 295 |
+
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
|
| 296 |
+
that parsing is to stop immediately because an unbacktrackable
|
| 297 |
+
syntax error has been found.
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class RecursiveGrammarException(Exception):
|
| 302 |
+
"""
|
| 303 |
+
Exception thrown by :class:`ParserElement.validate` if the
|
| 304 |
+
grammar could be left-recursive; parser may need to enable
|
| 305 |
+
left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
|
| 306 |
+
|
| 307 |
+
Deprecated: only used by deprecated method ParserElement.validate.
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
def __init__(self, parseElementList):
|
| 311 |
+
self.parseElementTrace = parseElementList
|
| 312 |
+
|
| 313 |
+
def __str__(self) -> str:
|
| 314 |
+
return f"RecursiveGrammarException: {self.parseElementTrace}"
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/helpers.py
ADDED
|
@@ -0,0 +1,1086 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# helpers.py
|
| 2 |
+
import html.entities
|
| 3 |
+
import operator
|
| 4 |
+
import re
|
| 5 |
+
import sys
|
| 6 |
+
import typing
|
| 7 |
+
|
| 8 |
+
from . import __diag__
|
| 9 |
+
from .core import *
|
| 10 |
+
from .util import (
|
| 11 |
+
_bslash,
|
| 12 |
+
_flatten,
|
| 13 |
+
_escape_regex_range_chars,
|
| 14 |
+
make_compressed_re,
|
| 15 |
+
replaced_by_pep8,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
#
|
| 20 |
+
# global helpers
|
| 21 |
+
#
|
| 22 |
+
def counted_array(
|
| 23 |
+
expr: ParserElement,
|
| 24 |
+
int_expr: typing.Optional[ParserElement] = None,
|
| 25 |
+
*,
|
| 26 |
+
intExpr: typing.Optional[ParserElement] = None,
|
| 27 |
+
) -> ParserElement:
|
| 28 |
+
"""Helper to define a counted list of expressions.
|
| 29 |
+
|
| 30 |
+
This helper defines a pattern of the form::
|
| 31 |
+
|
| 32 |
+
integer expr expr expr...
|
| 33 |
+
|
| 34 |
+
where the leading integer tells how many expr expressions follow.
|
| 35 |
+
The matched tokens returns the array of expr tokens as a list - the
|
| 36 |
+
leading count token is suppressed.
|
| 37 |
+
|
| 38 |
+
If ``int_expr`` is specified, it should be a pyparsing expression
|
| 39 |
+
that produces an integer value.
|
| 40 |
+
|
| 41 |
+
Example::
|
| 42 |
+
|
| 43 |
+
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
|
| 44 |
+
|
| 45 |
+
# in this parser, the leading integer value is given in binary,
|
| 46 |
+
# '10' indicating that 2 values are in the array
|
| 47 |
+
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
|
| 48 |
+
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
|
| 49 |
+
|
| 50 |
+
# if other fields must be parsed after the count but before the
|
| 51 |
+
# list items, give the fields results names and they will
|
| 52 |
+
# be preserved in the returned ParseResults:
|
| 53 |
+
count_with_metadata = integer + Word(alphas)("type")
|
| 54 |
+
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
|
| 55 |
+
result = typed_array.parse_string("3 bool True True False")
|
| 56 |
+
print(result.dump())
|
| 57 |
+
|
| 58 |
+
# prints
|
| 59 |
+
# ['True', 'True', 'False']
|
| 60 |
+
# - items: ['True', 'True', 'False']
|
| 61 |
+
# - type: 'bool'
|
| 62 |
+
"""
|
| 63 |
+
intExpr = intExpr or int_expr
|
| 64 |
+
array_expr = Forward()
|
| 65 |
+
|
| 66 |
+
def count_field_parse_action(s, l, t):
|
| 67 |
+
nonlocal array_expr
|
| 68 |
+
n = t[0]
|
| 69 |
+
array_expr <<= (expr * n) if n else Empty()
|
| 70 |
+
# clear list contents, but keep any named results
|
| 71 |
+
del t[:]
|
| 72 |
+
|
| 73 |
+
if intExpr is None:
|
| 74 |
+
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
|
| 75 |
+
else:
|
| 76 |
+
intExpr = intExpr.copy()
|
| 77 |
+
intExpr.set_name("arrayLen")
|
| 78 |
+
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
|
| 79 |
+
return (intExpr + array_expr).set_name(f"(len) {expr}...")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def match_previous_literal(expr: ParserElement) -> ParserElement:
|
| 83 |
+
"""Helper to define an expression that is indirectly defined from
|
| 84 |
+
the tokens matched in a previous expression, that is, it looks for
|
| 85 |
+
a 'repeat' of a previous expression. For example::
|
| 86 |
+
|
| 87 |
+
first = Word(nums)
|
| 88 |
+
second = match_previous_literal(first)
|
| 89 |
+
match_expr = first + ":" + second
|
| 90 |
+
|
| 91 |
+
will match ``"1:1"``, but not ``"1:2"``. Because this
|
| 92 |
+
matches a previous literal, will also match the leading
|
| 93 |
+
``"1:1"`` in ``"1:10"``. If this is not desired, use
|
| 94 |
+
:class:`match_previous_expr`. Do *not* use with packrat parsing
|
| 95 |
+
enabled.
|
| 96 |
+
"""
|
| 97 |
+
rep = Forward()
|
| 98 |
+
|
| 99 |
+
def copy_token_to_repeater(s, l, t):
|
| 100 |
+
if not t:
|
| 101 |
+
rep << Empty()
|
| 102 |
+
return
|
| 103 |
+
|
| 104 |
+
if len(t) == 1:
|
| 105 |
+
rep << t[0]
|
| 106 |
+
return
|
| 107 |
+
|
| 108 |
+
# flatten t tokens
|
| 109 |
+
tflat = _flatten(t.as_list())
|
| 110 |
+
rep << And(Literal(tt) for tt in tflat)
|
| 111 |
+
|
| 112 |
+
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
| 113 |
+
rep.set_name("(prev) " + str(expr))
|
| 114 |
+
return rep
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def match_previous_expr(expr: ParserElement) -> ParserElement:
|
| 118 |
+
"""Helper to define an expression that is indirectly defined from
|
| 119 |
+
the tokens matched in a previous expression, that is, it looks for
|
| 120 |
+
a 'repeat' of a previous expression. For example::
|
| 121 |
+
|
| 122 |
+
first = Word(nums)
|
| 123 |
+
second = match_previous_expr(first)
|
| 124 |
+
match_expr = first + ":" + second
|
| 125 |
+
|
| 126 |
+
will match ``"1:1"``, but not ``"1:2"``. Because this
|
| 127 |
+
matches by expressions, will *not* match the leading ``"1:1"``
|
| 128 |
+
in ``"1:10"``; the expressions are evaluated first, and then
|
| 129 |
+
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
|
| 130 |
+
with packrat parsing enabled.
|
| 131 |
+
"""
|
| 132 |
+
rep = Forward()
|
| 133 |
+
e2 = expr.copy()
|
| 134 |
+
rep <<= e2
|
| 135 |
+
|
| 136 |
+
def copy_token_to_repeater(s, l, t):
|
| 137 |
+
matchTokens = _flatten(t.as_list())
|
| 138 |
+
|
| 139 |
+
def must_match_these_tokens(s, l, t):
|
| 140 |
+
theseTokens = _flatten(t.as_list())
|
| 141 |
+
if theseTokens != matchTokens:
|
| 142 |
+
raise ParseException(
|
| 143 |
+
s, l, f"Expected {matchTokens}, found{theseTokens}"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
|
| 147 |
+
|
| 148 |
+
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
| 149 |
+
rep.set_name("(prev) " + str(expr))
|
| 150 |
+
return rep
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def one_of(
|
| 154 |
+
strs: Union[typing.Iterable[str], str],
|
| 155 |
+
caseless: bool = False,
|
| 156 |
+
use_regex: bool = True,
|
| 157 |
+
as_keyword: bool = False,
|
| 158 |
+
*,
|
| 159 |
+
useRegex: bool = True,
|
| 160 |
+
asKeyword: bool = False,
|
| 161 |
+
) -> ParserElement:
|
| 162 |
+
"""Helper to quickly define a set of alternative :class:`Literal` s,
|
| 163 |
+
and makes sure to do longest-first testing when there is a conflict,
|
| 164 |
+
regardless of the input order, but returns
|
| 165 |
+
a :class:`MatchFirst` for best performance.
|
| 166 |
+
|
| 167 |
+
Parameters:
|
| 168 |
+
|
| 169 |
+
- ``strs`` - a string of space-delimited literals, or a collection of
|
| 170 |
+
string literals
|
| 171 |
+
- ``caseless`` - treat all literals as caseless - (default= ``False``)
|
| 172 |
+
- ``use_regex`` - as an optimization, will
|
| 173 |
+
generate a :class:`Regex` object; otherwise, will generate
|
| 174 |
+
a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if
|
| 175 |
+
creating a :class:`Regex` raises an exception) - (default= ``True``)
|
| 176 |
+
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
|
| 177 |
+
generated expressions - (default= ``False``)
|
| 178 |
+
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
|
| 179 |
+
but will be removed in a future release
|
| 180 |
+
|
| 181 |
+
Example::
|
| 182 |
+
|
| 183 |
+
comp_oper = one_of("< = > <= >= !=")
|
| 184 |
+
var = Word(alphas)
|
| 185 |
+
number = Word(nums)
|
| 186 |
+
term = var | number
|
| 187 |
+
comparison_expr = term + comp_oper + term
|
| 188 |
+
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
|
| 189 |
+
|
| 190 |
+
prints::
|
| 191 |
+
|
| 192 |
+
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
|
| 193 |
+
"""
|
| 194 |
+
asKeyword = asKeyword or as_keyword
|
| 195 |
+
useRegex = useRegex and use_regex
|
| 196 |
+
|
| 197 |
+
if (
|
| 198 |
+
isinstance(caseless, str_type)
|
| 199 |
+
and __diag__.warn_on_multiple_string_args_to_oneof
|
| 200 |
+
):
|
| 201 |
+
warnings.warn(
|
| 202 |
+
"More than one string argument passed to one_of, pass"
|
| 203 |
+
" choices as a list or space-delimited string",
|
| 204 |
+
stacklevel=2,
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
if caseless:
|
| 208 |
+
is_equal = lambda a, b: a.upper() == b.upper()
|
| 209 |
+
masks = lambda a, b: b.upper().startswith(a.upper())
|
| 210 |
+
parse_element_class = CaselessKeyword if asKeyword else CaselessLiteral
|
| 211 |
+
else:
|
| 212 |
+
is_equal = operator.eq
|
| 213 |
+
masks = lambda a, b: b.startswith(a)
|
| 214 |
+
parse_element_class = Keyword if asKeyword else Literal
|
| 215 |
+
|
| 216 |
+
symbols: list[str]
|
| 217 |
+
if isinstance(strs, str_type):
|
| 218 |
+
strs = typing.cast(str, strs)
|
| 219 |
+
symbols = strs.split()
|
| 220 |
+
elif isinstance(strs, Iterable):
|
| 221 |
+
symbols = list(strs)
|
| 222 |
+
else:
|
| 223 |
+
raise TypeError("Invalid argument to one_of, expected string or iterable")
|
| 224 |
+
if not symbols:
|
| 225 |
+
return NoMatch()
|
| 226 |
+
|
| 227 |
+
# reorder given symbols to take care to avoid masking longer choices with shorter ones
|
| 228 |
+
# (but only if the given symbols are not just single characters)
|
| 229 |
+
i = 0
|
| 230 |
+
while i < len(symbols) - 1:
|
| 231 |
+
cur = symbols[i]
|
| 232 |
+
for j, other in enumerate(symbols[i + 1 :]):
|
| 233 |
+
if is_equal(other, cur):
|
| 234 |
+
del symbols[i + j + 1]
|
| 235 |
+
break
|
| 236 |
+
if len(other) > len(cur) and masks(cur, other):
|
| 237 |
+
del symbols[i + j + 1]
|
| 238 |
+
symbols.insert(i, other)
|
| 239 |
+
break
|
| 240 |
+
else:
|
| 241 |
+
i += 1
|
| 242 |
+
|
| 243 |
+
if useRegex:
|
| 244 |
+
re_flags: int = re.IGNORECASE if caseless else 0
|
| 245 |
+
|
| 246 |
+
try:
|
| 247 |
+
if all(len(sym) == 1 for sym in symbols):
|
| 248 |
+
# symbols are just single characters, create range regex pattern
|
| 249 |
+
patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]"
|
| 250 |
+
else:
|
| 251 |
+
patt = "|".join(re.escape(sym) for sym in symbols)
|
| 252 |
+
|
| 253 |
+
# wrap with \b word break markers if defining as keywords
|
| 254 |
+
if asKeyword:
|
| 255 |
+
patt = rf"\b(?:{patt})\b"
|
| 256 |
+
|
| 257 |
+
ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
|
| 258 |
+
|
| 259 |
+
if caseless:
|
| 260 |
+
# add parse action to return symbols as specified, not in random
|
| 261 |
+
# casing as found in input string
|
| 262 |
+
symbol_map = {sym.lower(): sym for sym in symbols}
|
| 263 |
+
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
|
| 264 |
+
|
| 265 |
+
return ret
|
| 266 |
+
|
| 267 |
+
except re.error:
|
| 268 |
+
warnings.warn(
|
| 269 |
+
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
# last resort, just use MatchFirst
|
| 273 |
+
return MatchFirst(parse_element_class(sym) for sym in symbols).set_name(
|
| 274 |
+
" | ".join(symbols)
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
|
| 279 |
+
"""Helper to easily and clearly define a dictionary by specifying
|
| 280 |
+
the respective patterns for the key and value. Takes care of
|
| 281 |
+
defining the :class:`Dict`, :class:`ZeroOrMore`, and
|
| 282 |
+
:class:`Group` tokens in the proper order. The key pattern
|
| 283 |
+
can include delimiting markers or punctuation, as long as they are
|
| 284 |
+
suppressed, thereby leaving the significant key text. The value
|
| 285 |
+
pattern can include named results, so that the :class:`Dict` results
|
| 286 |
+
can include named token fields.
|
| 287 |
+
|
| 288 |
+
Example::
|
| 289 |
+
|
| 290 |
+
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
|
| 291 |
+
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
|
| 292 |
+
print(attr_expr[1, ...].parse_string(text).dump())
|
| 293 |
+
|
| 294 |
+
attr_label = label
|
| 295 |
+
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
|
| 296 |
+
|
| 297 |
+
# similar to Dict, but simpler call format
|
| 298 |
+
result = dict_of(attr_label, attr_value).parse_string(text)
|
| 299 |
+
print(result.dump())
|
| 300 |
+
print(result['shape'])
|
| 301 |
+
print(result.shape) # object attribute access works too
|
| 302 |
+
print(result.as_dict())
|
| 303 |
+
|
| 304 |
+
prints::
|
| 305 |
+
|
| 306 |
+
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
|
| 307 |
+
- color: 'light blue'
|
| 308 |
+
- posn: 'upper left'
|
| 309 |
+
- shape: 'SQUARE'
|
| 310 |
+
- texture: 'burlap'
|
| 311 |
+
SQUARE
|
| 312 |
+
SQUARE
|
| 313 |
+
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
|
| 314 |
+
"""
|
| 315 |
+
return Dict(OneOrMore(Group(key + value)))
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def original_text_for(
|
| 319 |
+
expr: ParserElement, as_string: bool = True, *, asString: bool = True
|
| 320 |
+
) -> ParserElement:
|
| 321 |
+
"""Helper to return the original, untokenized text for a given
|
| 322 |
+
expression. Useful to restore the parsed fields of an HTML start
|
| 323 |
+
tag into the raw tag text itself, or to revert separate tokens with
|
| 324 |
+
intervening whitespace back to the original matching input text. By
|
| 325 |
+
default, returns a string containing the original parsed text.
|
| 326 |
+
|
| 327 |
+
If the optional ``as_string`` argument is passed as
|
| 328 |
+
``False``, then the return value is
|
| 329 |
+
a :class:`ParseResults` containing any results names that
|
| 330 |
+
were originally matched, and a single token containing the original
|
| 331 |
+
matched text from the input string. So if the expression passed to
|
| 332 |
+
:class:`original_text_for` contains expressions with defined
|
| 333 |
+
results names, you must set ``as_string`` to ``False`` if you
|
| 334 |
+
want to preserve those results name values.
|
| 335 |
+
|
| 336 |
+
The ``asString`` pre-PEP8 argument is retained for compatibility,
|
| 337 |
+
but will be removed in a future release.
|
| 338 |
+
|
| 339 |
+
Example::
|
| 340 |
+
|
| 341 |
+
src = "this is test <b> bold <i>text</i> </b> normal text "
|
| 342 |
+
for tag in ("b", "i"):
|
| 343 |
+
opener, closer = make_html_tags(tag)
|
| 344 |
+
patt = original_text_for(opener + ... + closer)
|
| 345 |
+
print(patt.search_string(src)[0])
|
| 346 |
+
|
| 347 |
+
prints::
|
| 348 |
+
|
| 349 |
+
['<b> bold <i>text</i> </b>']
|
| 350 |
+
['<i>text</i>']
|
| 351 |
+
"""
|
| 352 |
+
asString = asString and as_string
|
| 353 |
+
|
| 354 |
+
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
|
| 355 |
+
endlocMarker = locMarker.copy()
|
| 356 |
+
endlocMarker.callPreparse = False
|
| 357 |
+
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
|
| 358 |
+
if asString:
|
| 359 |
+
extractText = lambda s, l, t: s[t._original_start : t._original_end]
|
| 360 |
+
else:
|
| 361 |
+
|
| 362 |
+
def extractText(s, l, t):
|
| 363 |
+
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
|
| 364 |
+
|
| 365 |
+
matchExpr.set_parse_action(extractText)
|
| 366 |
+
matchExpr.ignoreExprs = expr.ignoreExprs
|
| 367 |
+
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
|
| 368 |
+
return matchExpr
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def ungroup(expr: ParserElement) -> ParserElement:
|
| 372 |
+
"""Helper to undo pyparsing's default grouping of And expressions,
|
| 373 |
+
even if all but one are non-empty.
|
| 374 |
+
"""
|
| 375 |
+
return TokenConverter(expr).add_parse_action(lambda t: t[0])
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def locatedExpr(expr: ParserElement) -> ParserElement:
|
| 379 |
+
"""
|
| 380 |
+
(DEPRECATED - future code should use the :class:`Located` class)
|
| 381 |
+
Helper to decorate a returned token with its starting and ending
|
| 382 |
+
locations in the input string.
|
| 383 |
+
|
| 384 |
+
This helper adds the following results names:
|
| 385 |
+
|
| 386 |
+
- ``locn_start`` - location where matched expression begins
|
| 387 |
+
- ``locn_end`` - location where matched expression ends
|
| 388 |
+
- ``value`` - the actual parsed results
|
| 389 |
+
|
| 390 |
+
Be careful if the input text contains ``<TAB>`` characters, you
|
| 391 |
+
may want to call :class:`ParserElement.parse_with_tabs`
|
| 392 |
+
|
| 393 |
+
Example::
|
| 394 |
+
|
| 395 |
+
wd = Word(alphas)
|
| 396 |
+
for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
|
| 397 |
+
print(match)
|
| 398 |
+
|
| 399 |
+
prints::
|
| 400 |
+
|
| 401 |
+
[[0, 'ljsdf', 5]]
|
| 402 |
+
[[8, 'lksdjjf', 15]]
|
| 403 |
+
[[18, 'lkkjj', 23]]
|
| 404 |
+
"""
|
| 405 |
+
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
|
| 406 |
+
return Group(
|
| 407 |
+
locator("locn_start")
|
| 408 |
+
+ expr("value")
|
| 409 |
+
+ locator.copy().leaveWhitespace()("locn_end")
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def nested_expr(
|
| 414 |
+
opener: Union[str, ParserElement] = "(",
|
| 415 |
+
closer: Union[str, ParserElement] = ")",
|
| 416 |
+
content: typing.Optional[ParserElement] = None,
|
| 417 |
+
ignore_expr: ParserElement = quoted_string(),
|
| 418 |
+
*,
|
| 419 |
+
ignoreExpr: ParserElement = quoted_string(),
|
| 420 |
+
) -> ParserElement:
|
| 421 |
+
"""Helper method for defining nested lists enclosed in opening and
|
| 422 |
+
closing delimiters (``"("`` and ``")"`` are the default).
|
| 423 |
+
|
| 424 |
+
Parameters:
|
| 425 |
+
|
| 426 |
+
- ``opener`` - opening character for a nested list
|
| 427 |
+
(default= ``"("``); can also be a pyparsing expression
|
| 428 |
+
- ``closer`` - closing character for a nested list
|
| 429 |
+
(default= ``")"``); can also be a pyparsing expression
|
| 430 |
+
- ``content`` - expression for items within the nested lists
|
| 431 |
+
(default= ``None``)
|
| 432 |
+
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
|
| 433 |
+
(default= :class:`quoted_string`)
|
| 434 |
+
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
|
| 435 |
+
but will be removed in a future release
|
| 436 |
+
|
| 437 |
+
If an expression is not provided for the content argument, the
|
| 438 |
+
nested expression will capture all whitespace-delimited content
|
| 439 |
+
between delimiters as a list of separate values.
|
| 440 |
+
|
| 441 |
+
Use the ``ignore_expr`` argument to define expressions that may
|
| 442 |
+
contain opening or closing characters that should not be treated as
|
| 443 |
+
opening or closing characters for nesting, such as quoted_string or
|
| 444 |
+
a comment expression. Specify multiple expressions using an
|
| 445 |
+
:class:`Or` or :class:`MatchFirst`. The default is
|
| 446 |
+
:class:`quoted_string`, but if no expressions are to be ignored, then
|
| 447 |
+
pass ``None`` for this argument.
|
| 448 |
+
|
| 449 |
+
Example::
|
| 450 |
+
|
| 451 |
+
data_type = one_of("void int short long char float double")
|
| 452 |
+
decl_data_type = Combine(data_type + Opt(Word('*')))
|
| 453 |
+
ident = Word(alphas+'_', alphanums+'_')
|
| 454 |
+
number = pyparsing_common.number
|
| 455 |
+
arg = Group(decl_data_type + ident)
|
| 456 |
+
LPAR, RPAR = map(Suppress, "()")
|
| 457 |
+
|
| 458 |
+
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
|
| 459 |
+
|
| 460 |
+
c_function = (decl_data_type("type")
|
| 461 |
+
+ ident("name")
|
| 462 |
+
+ LPAR + Opt(DelimitedList(arg), [])("args") + RPAR
|
| 463 |
+
+ code_body("body"))
|
| 464 |
+
c_function.ignore(c_style_comment)
|
| 465 |
+
|
| 466 |
+
source_code = '''
|
| 467 |
+
int is_odd(int x) {
|
| 468 |
+
return (x%2);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
int dec_to_hex(char hchar) {
|
| 472 |
+
if (hchar >= '0' && hchar <= '9') {
|
| 473 |
+
return (ord(hchar)-ord('0'));
|
| 474 |
+
} else {
|
| 475 |
+
return (10+ord(hchar)-ord('A'));
|
| 476 |
+
}
|
| 477 |
+
}
|
| 478 |
+
'''
|
| 479 |
+
for func in c_function.search_string(source_code):
|
| 480 |
+
print("%(name)s (%(type)s) args: %(args)s" % func)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
prints::
|
| 484 |
+
|
| 485 |
+
is_odd (int) args: [['int', 'x']]
|
| 486 |
+
dec_to_hex (int) args: [['char', 'hchar']]
|
| 487 |
+
"""
|
| 488 |
+
if ignoreExpr != ignore_expr:
|
| 489 |
+
ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
|
| 490 |
+
if opener == closer:
|
| 491 |
+
raise ValueError("opening and closing strings cannot be the same")
|
| 492 |
+
if content is None:
|
| 493 |
+
if isinstance(opener, str_type) and isinstance(closer, str_type):
|
| 494 |
+
opener = typing.cast(str, opener)
|
| 495 |
+
closer = typing.cast(str, closer)
|
| 496 |
+
if len(opener) == 1 and len(closer) == 1:
|
| 497 |
+
if ignoreExpr is not None:
|
| 498 |
+
content = Combine(
|
| 499 |
+
OneOrMore(
|
| 500 |
+
~ignoreExpr
|
| 501 |
+
+ CharsNotIn(
|
| 502 |
+
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
|
| 503 |
+
exact=1,
|
| 504 |
+
)
|
| 505 |
+
)
|
| 506 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 507 |
+
else:
|
| 508 |
+
content = empty.copy() + CharsNotIn(
|
| 509 |
+
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
|
| 510 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 511 |
+
else:
|
| 512 |
+
if ignoreExpr is not None:
|
| 513 |
+
content = Combine(
|
| 514 |
+
OneOrMore(
|
| 515 |
+
~ignoreExpr
|
| 516 |
+
+ ~Literal(opener)
|
| 517 |
+
+ ~Literal(closer)
|
| 518 |
+
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
| 519 |
+
)
|
| 520 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 521 |
+
else:
|
| 522 |
+
content = Combine(
|
| 523 |
+
OneOrMore(
|
| 524 |
+
~Literal(opener)
|
| 525 |
+
+ ~Literal(closer)
|
| 526 |
+
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
| 527 |
+
)
|
| 528 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 529 |
+
else:
|
| 530 |
+
raise ValueError(
|
| 531 |
+
"opening and closing arguments must be strings if no content expression is given"
|
| 532 |
+
)
|
| 533 |
+
ret = Forward()
|
| 534 |
+
if ignoreExpr is not None:
|
| 535 |
+
ret <<= Group(
|
| 536 |
+
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
|
| 537 |
+
)
|
| 538 |
+
else:
|
| 539 |
+
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
|
| 540 |
+
ret.set_name(f"nested {opener}{closer} expression")
|
| 541 |
+
# don't override error message from content expressions
|
| 542 |
+
ret.errmsg = None
|
| 543 |
+
return ret
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
|
| 547 |
+
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
|
| 548 |
+
if isinstance(tagStr, str_type):
|
| 549 |
+
resname = tagStr
|
| 550 |
+
tagStr = Keyword(tagStr, caseless=not xml)
|
| 551 |
+
else:
|
| 552 |
+
resname = tagStr.name
|
| 553 |
+
|
| 554 |
+
tagAttrName = Word(alphas, alphanums + "_-:")
|
| 555 |
+
if xml:
|
| 556 |
+
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
|
| 557 |
+
openTag = (
|
| 558 |
+
suppress_LT
|
| 559 |
+
+ tagStr("tag")
|
| 560 |
+
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
|
| 561 |
+
+ Opt("/", default=[False])("empty").set_parse_action(
|
| 562 |
+
lambda s, l, t: t[0] == "/"
|
| 563 |
+
)
|
| 564 |
+
+ suppress_GT
|
| 565 |
+
)
|
| 566 |
+
else:
|
| 567 |
+
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
|
| 568 |
+
printables, exclude_chars=">"
|
| 569 |
+
)
|
| 570 |
+
openTag = (
|
| 571 |
+
suppress_LT
|
| 572 |
+
+ tagStr("tag")
|
| 573 |
+
+ Dict(
|
| 574 |
+
ZeroOrMore(
|
| 575 |
+
Group(
|
| 576 |
+
tagAttrName.set_parse_action(lambda t: t[0].lower())
|
| 577 |
+
+ Opt(Suppress("=") + tagAttrValue)
|
| 578 |
+
)
|
| 579 |
+
)
|
| 580 |
+
)
|
| 581 |
+
+ Opt("/", default=[False])("empty").set_parse_action(
|
| 582 |
+
lambda s, l, t: t[0] == "/"
|
| 583 |
+
)
|
| 584 |
+
+ suppress_GT
|
| 585 |
+
)
|
| 586 |
+
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
|
| 587 |
+
|
| 588 |
+
openTag.set_name(f"<{resname}>")
|
| 589 |
+
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
|
| 590 |
+
openTag.add_parse_action(
|
| 591 |
+
lambda t: t.__setitem__(
|
| 592 |
+
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
|
| 593 |
+
)
|
| 594 |
+
)
|
| 595 |
+
closeTag = closeTag(
|
| 596 |
+
"end" + "".join(resname.replace(":", " ").title().split())
|
| 597 |
+
).set_name(f"</{resname}>")
|
| 598 |
+
openTag.tag = resname
|
| 599 |
+
closeTag.tag = resname
|
| 600 |
+
openTag.tag_body = SkipTo(closeTag())
|
| 601 |
+
return openTag, closeTag
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def make_html_tags(
|
| 605 |
+
tag_str: Union[str, ParserElement]
|
| 606 |
+
) -> tuple[ParserElement, ParserElement]:
|
| 607 |
+
"""Helper to construct opening and closing tag expressions for HTML,
|
| 608 |
+
given a tag name. Matches tags in either upper or lower case,
|
| 609 |
+
attributes with namespaces and with quoted or unquoted values.
|
| 610 |
+
|
| 611 |
+
Example::
|
| 612 |
+
|
| 613 |
+
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
| 614 |
+
# make_html_tags returns pyparsing expressions for the opening and
|
| 615 |
+
# closing tags as a 2-tuple
|
| 616 |
+
a, a_end = make_html_tags("A")
|
| 617 |
+
link_expr = a + SkipTo(a_end)("link_text") + a_end
|
| 618 |
+
|
| 619 |
+
for link in link_expr.search_string(text):
|
| 620 |
+
# attributes in the <A> tag (like "href" shown here) are
|
| 621 |
+
# also accessible as named results
|
| 622 |
+
print(link.link_text, '->', link.href)
|
| 623 |
+
|
| 624 |
+
prints::
|
| 625 |
+
|
| 626 |
+
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
|
| 627 |
+
"""
|
| 628 |
+
return _makeTags(tag_str, False)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def make_xml_tags(
|
| 632 |
+
tag_str: Union[str, ParserElement]
|
| 633 |
+
) -> tuple[ParserElement, ParserElement]:
|
| 634 |
+
"""Helper to construct opening and closing tag expressions for XML,
|
| 635 |
+
given a tag name. Matches tags only in the given upper/lower case.
|
| 636 |
+
|
| 637 |
+
Example: similar to :class:`make_html_tags`
|
| 638 |
+
"""
|
| 639 |
+
return _makeTags(tag_str, True)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
any_open_tag: ParserElement
|
| 643 |
+
any_close_tag: ParserElement
|
| 644 |
+
any_open_tag, any_close_tag = make_html_tags(
|
| 645 |
+
Word(alphas, alphanums + "_:").set_name("any tag")
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
|
| 649 |
+
_most_common_entities = "nbsp lt gt amp quot apos cent pound euro copy".replace(
|
| 650 |
+
" ", "|"
|
| 651 |
+
)
|
| 652 |
+
common_html_entity = Regex(
|
| 653 |
+
lambda: f"&(?P<entity>{_most_common_entities}|{make_compressed_re(_htmlEntityMap)});"
|
| 654 |
+
).set_name("common HTML entity")
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def replace_html_entity(s, l, t):
|
| 658 |
+
"""Helper parser action to replace common HTML entities with their special characters"""
|
| 659 |
+
return _htmlEntityMap.get(t.entity)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
class OpAssoc(Enum):
|
| 663 |
+
"""Enumeration of operator associativity
|
| 664 |
+
- used in constructing InfixNotationOperatorSpec for :class:`infix_notation`"""
|
| 665 |
+
|
| 666 |
+
LEFT = 1
|
| 667 |
+
RIGHT = 2
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
InfixNotationOperatorArgType = Union[
|
| 671 |
+
ParserElement, str, tuple[Union[ParserElement, str], Union[ParserElement, str]]
|
| 672 |
+
]
|
| 673 |
+
InfixNotationOperatorSpec = Union[
|
| 674 |
+
tuple[
|
| 675 |
+
InfixNotationOperatorArgType,
|
| 676 |
+
int,
|
| 677 |
+
OpAssoc,
|
| 678 |
+
typing.Optional[ParseAction],
|
| 679 |
+
],
|
| 680 |
+
tuple[
|
| 681 |
+
InfixNotationOperatorArgType,
|
| 682 |
+
int,
|
| 683 |
+
OpAssoc,
|
| 684 |
+
],
|
| 685 |
+
]
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def infix_notation(
|
| 689 |
+
base_expr: ParserElement,
|
| 690 |
+
op_list: list[InfixNotationOperatorSpec],
|
| 691 |
+
lpar: Union[str, ParserElement] = Suppress("("),
|
| 692 |
+
rpar: Union[str, ParserElement] = Suppress(")"),
|
| 693 |
+
) -> ParserElement:
|
| 694 |
+
"""Helper method for constructing grammars of expressions made up of
|
| 695 |
+
operators working in a precedence hierarchy. Operators may be unary
|
| 696 |
+
or binary, left- or right-associative. Parse actions can also be
|
| 697 |
+
attached to operator expressions. The generated parser will also
|
| 698 |
+
recognize the use of parentheses to override operator precedences
|
| 699 |
+
(see example below).
|
| 700 |
+
|
| 701 |
+
Note: if you define a deep operator list, you may see performance
|
| 702 |
+
issues when using infix_notation. See
|
| 703 |
+
:class:`ParserElement.enable_packrat` for a mechanism to potentially
|
| 704 |
+
improve your parser performance.
|
| 705 |
+
|
| 706 |
+
Parameters:
|
| 707 |
+
|
| 708 |
+
- ``base_expr`` - expression representing the most basic operand to
|
| 709 |
+
be used in the expression
|
| 710 |
+
- ``op_list`` - list of tuples, one for each operator precedence level
|
| 711 |
+
in the expression grammar; each tuple is of the form ``(op_expr,
|
| 712 |
+
num_operands, right_left_assoc, (optional)parse_action)``, where:
|
| 713 |
+
|
| 714 |
+
- ``op_expr`` is the pyparsing expression for the operator; may also
|
| 715 |
+
be a string, which will be converted to a Literal; if ``num_operands``
|
| 716 |
+
is 3, ``op_expr`` is a tuple of two expressions, for the two
|
| 717 |
+
operators separating the 3 terms
|
| 718 |
+
- ``num_operands`` is the number of terms for this operator (must be 1,
|
| 719 |
+
2, or 3)
|
| 720 |
+
- ``right_left_assoc`` is the indicator whether the operator is right
|
| 721 |
+
or left associative, using the pyparsing-defined constants
|
| 722 |
+
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
|
| 723 |
+
- ``parse_action`` is the parse action to be associated with
|
| 724 |
+
expressions matching this operator expression (the parse action
|
| 725 |
+
tuple member may be omitted); if the parse action is passed
|
| 726 |
+
a tuple or list of functions, this is equivalent to calling
|
| 727 |
+
``set_parse_action(*fn)``
|
| 728 |
+
(:class:`ParserElement.set_parse_action`)
|
| 729 |
+
- ``lpar`` - expression for matching left-parentheses; if passed as a
|
| 730 |
+
str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as
|
| 731 |
+
an expression (such as ``Literal('(')``), then it will be kept in
|
| 732 |
+
the parsed results, and grouped with them. (default= ``Suppress('(')``)
|
| 733 |
+
- ``rpar`` - expression for matching right-parentheses; if passed as a
|
| 734 |
+
str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as
|
| 735 |
+
an expression (such as ``Literal(')')``), then it will be kept in
|
| 736 |
+
the parsed results, and grouped with them. (default= ``Suppress(')')``)
|
| 737 |
+
|
| 738 |
+
Example::
|
| 739 |
+
|
| 740 |
+
# simple example of four-function arithmetic with ints and
|
| 741 |
+
# variable names
|
| 742 |
+
integer = pyparsing_common.signed_integer
|
| 743 |
+
varname = pyparsing_common.identifier
|
| 744 |
+
|
| 745 |
+
arith_expr = infix_notation(integer | varname,
|
| 746 |
+
[
|
| 747 |
+
('-', 1, OpAssoc.RIGHT),
|
| 748 |
+
(one_of('* /'), 2, OpAssoc.LEFT),
|
| 749 |
+
(one_of('+ -'), 2, OpAssoc.LEFT),
|
| 750 |
+
])
|
| 751 |
+
|
| 752 |
+
arith_expr.run_tests('''
|
| 753 |
+
5+3*6
|
| 754 |
+
(5+3)*6
|
| 755 |
+
-2--11
|
| 756 |
+
''', full_dump=False)
|
| 757 |
+
|
| 758 |
+
prints::
|
| 759 |
+
|
| 760 |
+
5+3*6
|
| 761 |
+
[[5, '+', [3, '*', 6]]]
|
| 762 |
+
|
| 763 |
+
(5+3)*6
|
| 764 |
+
[[[5, '+', 3], '*', 6]]
|
| 765 |
+
|
| 766 |
+
(5+x)*y
|
| 767 |
+
[[[5, '+', 'x'], '*', 'y']]
|
| 768 |
+
|
| 769 |
+
-2--11
|
| 770 |
+
[[['-', 2], '-', ['-', 11]]]
|
| 771 |
+
"""
|
| 772 |
+
|
| 773 |
+
# captive version of FollowedBy that does not do parse actions or capture results names
|
| 774 |
+
class _FB(FollowedBy):
|
| 775 |
+
def parseImpl(self, instring, loc, doActions=True):
|
| 776 |
+
self.expr.try_parse(instring, loc)
|
| 777 |
+
return loc, []
|
| 778 |
+
|
| 779 |
+
_FB.__name__ = "FollowedBy>"
|
| 780 |
+
|
| 781 |
+
ret = Forward()
|
| 782 |
+
if isinstance(lpar, str):
|
| 783 |
+
lpar = Suppress(lpar)
|
| 784 |
+
if isinstance(rpar, str):
|
| 785 |
+
rpar = Suppress(rpar)
|
| 786 |
+
|
| 787 |
+
# if lpar and rpar are not suppressed, wrap in group
|
| 788 |
+
if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)):
|
| 789 |
+
lastExpr = base_expr | Group(lpar + ret + rpar).set_name(
|
| 790 |
+
f"nested_{base_expr.name}"
|
| 791 |
+
)
|
| 792 |
+
else:
|
| 793 |
+
lastExpr = base_expr | (lpar + ret + rpar).set_name(f"nested_{base_expr.name}")
|
| 794 |
+
root_expr = lastExpr
|
| 795 |
+
|
| 796 |
+
arity: int
|
| 797 |
+
rightLeftAssoc: opAssoc
|
| 798 |
+
pa: typing.Optional[ParseAction]
|
| 799 |
+
opExpr1: ParserElement
|
| 800 |
+
opExpr2: ParserElement
|
| 801 |
+
for operDef in op_list:
|
| 802 |
+
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment]
|
| 803 |
+
if isinstance(opExpr, str_type):
|
| 804 |
+
opExpr = ParserElement._literalStringClass(opExpr)
|
| 805 |
+
opExpr = typing.cast(ParserElement, opExpr)
|
| 806 |
+
if arity == 3:
|
| 807 |
+
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
|
| 808 |
+
raise ValueError(
|
| 809 |
+
"if numterms=3, opExpr must be a tuple or list of two expressions"
|
| 810 |
+
)
|
| 811 |
+
opExpr1, opExpr2 = opExpr
|
| 812 |
+
term_name = f"{opExpr1}{opExpr2} term"
|
| 813 |
+
else:
|
| 814 |
+
term_name = f"{opExpr} term"
|
| 815 |
+
|
| 816 |
+
if not 1 <= arity <= 3:
|
| 817 |
+
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
|
| 818 |
+
|
| 819 |
+
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
|
| 820 |
+
raise ValueError("operator must indicate right or left associativity")
|
| 821 |
+
|
| 822 |
+
thisExpr: ParserElement = Forward().set_name(term_name)
|
| 823 |
+
thisExpr = typing.cast(Forward, thisExpr)
|
| 824 |
+
if rightLeftAssoc is OpAssoc.LEFT:
|
| 825 |
+
if arity == 1:
|
| 826 |
+
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
|
| 827 |
+
elif arity == 2:
|
| 828 |
+
if opExpr is not None:
|
| 829 |
+
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
|
| 830 |
+
lastExpr + (opExpr + lastExpr)[1, ...]
|
| 831 |
+
)
|
| 832 |
+
else:
|
| 833 |
+
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
|
| 834 |
+
elif arity == 3:
|
| 835 |
+
matchExpr = _FB(
|
| 836 |
+
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
|
| 837 |
+
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
|
| 838 |
+
elif rightLeftAssoc is OpAssoc.RIGHT:
|
| 839 |
+
if arity == 1:
|
| 840 |
+
# try to avoid LR with this extra test
|
| 841 |
+
if not isinstance(opExpr, Opt):
|
| 842 |
+
opExpr = Opt(opExpr)
|
| 843 |
+
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
|
| 844 |
+
elif arity == 2:
|
| 845 |
+
if opExpr is not None:
|
| 846 |
+
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
|
| 847 |
+
lastExpr + (opExpr + thisExpr)[1, ...]
|
| 848 |
+
)
|
| 849 |
+
else:
|
| 850 |
+
matchExpr = _FB(lastExpr + thisExpr) + Group(
|
| 851 |
+
lastExpr + thisExpr[1, ...]
|
| 852 |
+
)
|
| 853 |
+
elif arity == 3:
|
| 854 |
+
matchExpr = _FB(
|
| 855 |
+
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
|
| 856 |
+
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
|
| 857 |
+
if pa:
|
| 858 |
+
if isinstance(pa, (tuple, list)):
|
| 859 |
+
matchExpr.set_parse_action(*pa)
|
| 860 |
+
else:
|
| 861 |
+
matchExpr.set_parse_action(pa)
|
| 862 |
+
thisExpr <<= (matchExpr | lastExpr).setName(term_name)
|
| 863 |
+
lastExpr = thisExpr
|
| 864 |
+
ret <<= lastExpr
|
| 865 |
+
root_expr.set_name("base_expr")
|
| 866 |
+
return ret
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
|
| 870 |
+
"""
|
| 871 |
+
(DEPRECATED - use :class:`IndentedBlock` class instead)
|
| 872 |
+
Helper method for defining space-delimited indentation blocks,
|
| 873 |
+
such as those used to define block statements in Python source code.
|
| 874 |
+
|
| 875 |
+
Parameters:
|
| 876 |
+
|
| 877 |
+
- ``blockStatementExpr`` - expression defining syntax of statement that
|
| 878 |
+
is repeated within the indented block
|
| 879 |
+
- ``indentStack`` - list created by caller to manage indentation stack
|
| 880 |
+
(multiple ``statementWithIndentedBlock`` expressions within a single
|
| 881 |
+
grammar should share a common ``indentStack``)
|
| 882 |
+
- ``indent`` - boolean indicating whether block must be indented beyond
|
| 883 |
+
the current level; set to ``False`` for block of left-most statements
|
| 884 |
+
(default= ``True``)
|
| 885 |
+
|
| 886 |
+
A valid block must contain at least one ``blockStatement``.
|
| 887 |
+
|
| 888 |
+
(Note that indentedBlock uses internal parse actions which make it
|
| 889 |
+
incompatible with packrat parsing.)
|
| 890 |
+
|
| 891 |
+
Example::
|
| 892 |
+
|
| 893 |
+
data = '''
|
| 894 |
+
def A(z):
|
| 895 |
+
A1
|
| 896 |
+
B = 100
|
| 897 |
+
G = A2
|
| 898 |
+
A2
|
| 899 |
+
A3
|
| 900 |
+
B
|
| 901 |
+
def BB(a,b,c):
|
| 902 |
+
BB1
|
| 903 |
+
def BBA():
|
| 904 |
+
bba1
|
| 905 |
+
bba2
|
| 906 |
+
bba3
|
| 907 |
+
C
|
| 908 |
+
D
|
| 909 |
+
def spam(x,y):
|
| 910 |
+
def eggs(z):
|
| 911 |
+
pass
|
| 912 |
+
'''
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
indentStack = [1]
|
| 916 |
+
stmt = Forward()
|
| 917 |
+
|
| 918 |
+
identifier = Word(alphas, alphanums)
|
| 919 |
+
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
|
| 920 |
+
func_body = indentedBlock(stmt, indentStack)
|
| 921 |
+
funcDef = Group(funcDecl + func_body)
|
| 922 |
+
|
| 923 |
+
rvalue = Forward()
|
| 924 |
+
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
|
| 925 |
+
rvalue << (funcCall | identifier | Word(nums))
|
| 926 |
+
assignment = Group(identifier + "=" + rvalue)
|
| 927 |
+
stmt << (funcDef | assignment | identifier)
|
| 928 |
+
|
| 929 |
+
module_body = stmt[1, ...]
|
| 930 |
+
|
| 931 |
+
parseTree = module_body.parseString(data)
|
| 932 |
+
parseTree.pprint()
|
| 933 |
+
|
| 934 |
+
prints::
|
| 935 |
+
|
| 936 |
+
[['def',
|
| 937 |
+
'A',
|
| 938 |
+
['(', 'z', ')'],
|
| 939 |
+
':',
|
| 940 |
+
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
|
| 941 |
+
'B',
|
| 942 |
+
['def',
|
| 943 |
+
'BB',
|
| 944 |
+
['(', 'a', 'b', 'c', ')'],
|
| 945 |
+
':',
|
| 946 |
+
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
|
| 947 |
+
'C',
|
| 948 |
+
'D',
|
| 949 |
+
['def',
|
| 950 |
+
'spam',
|
| 951 |
+
['(', 'x', 'y', ')'],
|
| 952 |
+
':',
|
| 953 |
+
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
|
| 954 |
+
"""
|
| 955 |
+
backup_stacks.append(indentStack[:])
|
| 956 |
+
|
| 957 |
+
def reset_stack():
|
| 958 |
+
indentStack[:] = backup_stacks[-1]
|
| 959 |
+
|
| 960 |
+
def checkPeerIndent(s, l, t):
|
| 961 |
+
if l >= len(s):
|
| 962 |
+
return
|
| 963 |
+
curCol = col(l, s)
|
| 964 |
+
if curCol != indentStack[-1]:
|
| 965 |
+
if curCol > indentStack[-1]:
|
| 966 |
+
raise ParseException(s, l, "illegal nesting")
|
| 967 |
+
raise ParseException(s, l, "not a peer entry")
|
| 968 |
+
|
| 969 |
+
def checkSubIndent(s, l, t):
|
| 970 |
+
curCol = col(l, s)
|
| 971 |
+
if curCol > indentStack[-1]:
|
| 972 |
+
indentStack.append(curCol)
|
| 973 |
+
else:
|
| 974 |
+
raise ParseException(s, l, "not a subentry")
|
| 975 |
+
|
| 976 |
+
def checkUnindent(s, l, t):
|
| 977 |
+
if l >= len(s):
|
| 978 |
+
return
|
| 979 |
+
curCol = col(l, s)
|
| 980 |
+
if not (indentStack and curCol in indentStack):
|
| 981 |
+
raise ParseException(s, l, "not an unindent")
|
| 982 |
+
if curCol < indentStack[-1]:
|
| 983 |
+
indentStack.pop()
|
| 984 |
+
|
| 985 |
+
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
|
| 986 |
+
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
|
| 987 |
+
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
|
| 988 |
+
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
|
| 989 |
+
if indent:
|
| 990 |
+
smExpr = Group(
|
| 991 |
+
Opt(NL)
|
| 992 |
+
+ INDENT
|
| 993 |
+
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
| 994 |
+
+ UNDENT
|
| 995 |
+
)
|
| 996 |
+
else:
|
| 997 |
+
smExpr = Group(
|
| 998 |
+
Opt(NL)
|
| 999 |
+
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
| 1000 |
+
+ Opt(UNDENT)
|
| 1001 |
+
)
|
| 1002 |
+
|
| 1003 |
+
# add a parse action to remove backup_stack from list of backups
|
| 1004 |
+
smExpr.add_parse_action(
|
| 1005 |
+
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
|
| 1006 |
+
)
|
| 1007 |
+
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
|
| 1008 |
+
blockStatementExpr.ignore(_bslash + LineEnd())
|
| 1009 |
+
return smExpr.set_name("indented block")
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
|
| 1013 |
+
c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
|
| 1014 |
+
"C style comment"
|
| 1015 |
+
)
|
| 1016 |
+
"Comment of the form ``/* ... */``"
|
| 1017 |
+
|
| 1018 |
+
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
|
| 1019 |
+
"Comment of the form ``<!-- ... -->``"
|
| 1020 |
+
|
| 1021 |
+
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
|
| 1022 |
+
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
|
| 1023 |
+
"Comment of the form ``// ... (to end of line)``"
|
| 1024 |
+
|
| 1025 |
+
cpp_style_comment = Combine(
|
| 1026 |
+
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
|
| 1027 |
+
).set_name("C++ style comment")
|
| 1028 |
+
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
|
| 1029 |
+
|
| 1030 |
+
java_style_comment = cpp_style_comment
|
| 1031 |
+
"Same as :class:`cpp_style_comment`"
|
| 1032 |
+
|
| 1033 |
+
python_style_comment = Regex(r"#.*").set_name("Python style comment")
|
| 1034 |
+
"Comment of the form ``# ... (to end of line)``"
|
| 1035 |
+
|
| 1036 |
+
|
| 1037 |
+
# build list of built-in expressions, for future reference if a global default value
|
| 1038 |
+
# gets updated
|
| 1039 |
+
_builtin_exprs: list[ParserElement] = [
|
| 1040 |
+
v for v in vars().values() if isinstance(v, ParserElement)
|
| 1041 |
+
]
|
| 1042 |
+
|
| 1043 |
+
|
| 1044 |
+
# compatibility function, superseded by DelimitedList class
|
| 1045 |
+
def delimited_list(
|
| 1046 |
+
expr: Union[str, ParserElement],
|
| 1047 |
+
delim: Union[str, ParserElement] = ",",
|
| 1048 |
+
combine: bool = False,
|
| 1049 |
+
min: typing.Optional[int] = None,
|
| 1050 |
+
max: typing.Optional[int] = None,
|
| 1051 |
+
*,
|
| 1052 |
+
allow_trailing_delim: bool = False,
|
| 1053 |
+
) -> ParserElement:
|
| 1054 |
+
"""(DEPRECATED - use :class:`DelimitedList` class)"""
|
| 1055 |
+
return DelimitedList(
|
| 1056 |
+
expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim
|
| 1057 |
+
)
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
# Compatibility synonyms
|
| 1061 |
+
# fmt: off
|
| 1062 |
+
opAssoc = OpAssoc
|
| 1063 |
+
anyOpenTag = any_open_tag
|
| 1064 |
+
anyCloseTag = any_close_tag
|
| 1065 |
+
commonHTMLEntity = common_html_entity
|
| 1066 |
+
cStyleComment = c_style_comment
|
| 1067 |
+
htmlComment = html_comment
|
| 1068 |
+
restOfLine = rest_of_line
|
| 1069 |
+
dblSlashComment = dbl_slash_comment
|
| 1070 |
+
cppStyleComment = cpp_style_comment
|
| 1071 |
+
javaStyleComment = java_style_comment
|
| 1072 |
+
pythonStyleComment = python_style_comment
|
| 1073 |
+
delimitedList = replaced_by_pep8("delimitedList", DelimitedList)
|
| 1074 |
+
delimited_list = replaced_by_pep8("delimited_list", DelimitedList)
|
| 1075 |
+
countedArray = replaced_by_pep8("countedArray", counted_array)
|
| 1076 |
+
matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal)
|
| 1077 |
+
matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr)
|
| 1078 |
+
oneOf = replaced_by_pep8("oneOf", one_of)
|
| 1079 |
+
dictOf = replaced_by_pep8("dictOf", dict_of)
|
| 1080 |
+
originalTextFor = replaced_by_pep8("originalTextFor", original_text_for)
|
| 1081 |
+
nestedExpr = replaced_by_pep8("nestedExpr", nested_expr)
|
| 1082 |
+
makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags)
|
| 1083 |
+
makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags)
|
| 1084 |
+
replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity)
|
| 1085 |
+
infixNotation = replaced_by_pep8("infixNotation", infix_notation)
|
| 1086 |
+
# fmt: on
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/py.typed
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/results.py
ADDED
|
@@ -0,0 +1,815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# results.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import collections
|
| 5 |
+
from collections.abc import (
|
| 6 |
+
MutableMapping,
|
| 7 |
+
Mapping,
|
| 8 |
+
MutableSequence,
|
| 9 |
+
Iterator,
|
| 10 |
+
Iterable,
|
| 11 |
+
)
|
| 12 |
+
import pprint
|
| 13 |
+
from typing import Any
|
| 14 |
+
|
| 15 |
+
from .util import replaced_by_pep8
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
str_type: tuple[type, ...] = (str, bytes)
|
| 19 |
+
_generator_type = type((_ for _ in ()))
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class _ParseResultsWithOffset:
|
| 23 |
+
tup: tuple[ParseResults, int]
|
| 24 |
+
__slots__ = ["tup"]
|
| 25 |
+
|
| 26 |
+
def __init__(self, p1: ParseResults, p2: int):
|
| 27 |
+
self.tup: tuple[ParseResults, int] = (p1, p2)
|
| 28 |
+
|
| 29 |
+
def __getitem__(self, i):
|
| 30 |
+
return self.tup[i]
|
| 31 |
+
|
| 32 |
+
def __getstate__(self):
|
| 33 |
+
return self.tup
|
| 34 |
+
|
| 35 |
+
def __setstate__(self, *args):
|
| 36 |
+
self.tup = args[0]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ParseResults:
|
| 40 |
+
"""Structured parse results, to provide multiple means of access to
|
| 41 |
+
the parsed data:
|
| 42 |
+
|
| 43 |
+
- as a list (``len(results)``)
|
| 44 |
+
- by list index (``results[0], results[1]``, etc.)
|
| 45 |
+
- by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
|
| 46 |
+
|
| 47 |
+
Example::
|
| 48 |
+
|
| 49 |
+
integer = Word(nums)
|
| 50 |
+
date_str = (integer.set_results_name("year") + '/'
|
| 51 |
+
+ integer.set_results_name("month") + '/'
|
| 52 |
+
+ integer.set_results_name("day"))
|
| 53 |
+
# equivalent form:
|
| 54 |
+
# date_str = (integer("year") + '/'
|
| 55 |
+
# + integer("month") + '/'
|
| 56 |
+
# + integer("day"))
|
| 57 |
+
|
| 58 |
+
# parse_string returns a ParseResults object
|
| 59 |
+
result = date_str.parse_string("1999/12/31")
|
| 60 |
+
|
| 61 |
+
def test(s, fn=repr):
|
| 62 |
+
print(f"{s} -> {fn(eval(s))}")
|
| 63 |
+
test("list(result)")
|
| 64 |
+
test("result[0]")
|
| 65 |
+
test("result['month']")
|
| 66 |
+
test("result.day")
|
| 67 |
+
test("'month' in result")
|
| 68 |
+
test("'minutes' in result")
|
| 69 |
+
test("result.dump()", str)
|
| 70 |
+
|
| 71 |
+
prints::
|
| 72 |
+
|
| 73 |
+
list(result) -> ['1999', '/', '12', '/', '31']
|
| 74 |
+
result[0] -> '1999'
|
| 75 |
+
result['month'] -> '12'
|
| 76 |
+
result.day -> '31'
|
| 77 |
+
'month' in result -> True
|
| 78 |
+
'minutes' in result -> False
|
| 79 |
+
result.dump() -> ['1999', '/', '12', '/', '31']
|
| 80 |
+
- day: '31'
|
| 81 |
+
- month: '12'
|
| 82 |
+
- year: '1999'
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
_null_values: tuple[Any, ...] = (None, [], ())
|
| 86 |
+
|
| 87 |
+
_name: str
|
| 88 |
+
_parent: ParseResults
|
| 89 |
+
_all_names: set[str]
|
| 90 |
+
_modal: bool
|
| 91 |
+
_toklist: list[Any]
|
| 92 |
+
_tokdict: dict[str, Any]
|
| 93 |
+
|
| 94 |
+
__slots__ = (
|
| 95 |
+
"_name",
|
| 96 |
+
"_parent",
|
| 97 |
+
"_all_names",
|
| 98 |
+
"_modal",
|
| 99 |
+
"_toklist",
|
| 100 |
+
"_tokdict",
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
class List(list):
|
| 104 |
+
"""
|
| 105 |
+
Simple wrapper class to distinguish parsed list results that should be preserved
|
| 106 |
+
as actual Python lists, instead of being converted to :class:`ParseResults`::
|
| 107 |
+
|
| 108 |
+
LBRACK, RBRACK = map(pp.Suppress, "[]")
|
| 109 |
+
element = pp.Forward()
|
| 110 |
+
item = ppc.integer
|
| 111 |
+
element_list = LBRACK + pp.DelimitedList(element) + RBRACK
|
| 112 |
+
|
| 113 |
+
# add parse actions to convert from ParseResults to actual Python collection types
|
| 114 |
+
def as_python_list(t):
|
| 115 |
+
return pp.ParseResults.List(t.as_list())
|
| 116 |
+
element_list.add_parse_action(as_python_list)
|
| 117 |
+
|
| 118 |
+
element <<= item | element_list
|
| 119 |
+
|
| 120 |
+
element.run_tests('''
|
| 121 |
+
100
|
| 122 |
+
[2,3,4]
|
| 123 |
+
[[2, 1],3,4]
|
| 124 |
+
[(2, 1),3,4]
|
| 125 |
+
(2,3,4)
|
| 126 |
+
''', post_parse=lambda s, r: (r[0], type(r[0])))
|
| 127 |
+
|
| 128 |
+
prints::
|
| 129 |
+
|
| 130 |
+
100
|
| 131 |
+
(100, <class 'int'>)
|
| 132 |
+
|
| 133 |
+
[2,3,4]
|
| 134 |
+
([2, 3, 4], <class 'list'>)
|
| 135 |
+
|
| 136 |
+
[[2, 1],3,4]
|
| 137 |
+
([[2, 1], 3, 4], <class 'list'>)
|
| 138 |
+
|
| 139 |
+
(Used internally by :class:`Group` when `aslist=True`.)
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __new__(cls, contained=None):
|
| 143 |
+
if contained is None:
|
| 144 |
+
contained = []
|
| 145 |
+
|
| 146 |
+
if not isinstance(contained, list):
|
| 147 |
+
raise TypeError(
|
| 148 |
+
f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}"
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
return list.__new__(cls)
|
| 152 |
+
|
| 153 |
+
def __new__(cls, toklist=None, name=None, **kwargs):
|
| 154 |
+
if isinstance(toklist, ParseResults):
|
| 155 |
+
return toklist
|
| 156 |
+
self = object.__new__(cls)
|
| 157 |
+
self._name = None
|
| 158 |
+
self._parent = None
|
| 159 |
+
self._all_names = set()
|
| 160 |
+
|
| 161 |
+
if toklist is None:
|
| 162 |
+
self._toklist = []
|
| 163 |
+
elif isinstance(toklist, (list, _generator_type)):
|
| 164 |
+
self._toklist = (
|
| 165 |
+
[toklist[:]]
|
| 166 |
+
if isinstance(toklist, ParseResults.List)
|
| 167 |
+
else list(toklist)
|
| 168 |
+
)
|
| 169 |
+
else:
|
| 170 |
+
self._toklist = [toklist]
|
| 171 |
+
self._tokdict = dict()
|
| 172 |
+
return self
|
| 173 |
+
|
| 174 |
+
# Performance tuning: we construct a *lot* of these, so keep this
|
| 175 |
+
# constructor as small and fast as possible
|
| 176 |
+
def __init__(
|
| 177 |
+
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
|
| 178 |
+
) -> None:
|
| 179 |
+
self._tokdict: dict[str, _ParseResultsWithOffset]
|
| 180 |
+
self._modal = modal
|
| 181 |
+
|
| 182 |
+
if name is None or name == "":
|
| 183 |
+
return
|
| 184 |
+
|
| 185 |
+
if isinstance(name, int):
|
| 186 |
+
name = str(name)
|
| 187 |
+
|
| 188 |
+
if not modal:
|
| 189 |
+
self._all_names = {name}
|
| 190 |
+
|
| 191 |
+
self._name = name
|
| 192 |
+
|
| 193 |
+
if toklist in self._null_values:
|
| 194 |
+
return
|
| 195 |
+
|
| 196 |
+
if isinstance(toklist, (str_type, type)):
|
| 197 |
+
toklist = [toklist]
|
| 198 |
+
|
| 199 |
+
if asList:
|
| 200 |
+
if isinstance(toklist, ParseResults):
|
| 201 |
+
self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0)
|
| 202 |
+
else:
|
| 203 |
+
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
|
| 204 |
+
self[name]._name = name
|
| 205 |
+
return
|
| 206 |
+
|
| 207 |
+
try:
|
| 208 |
+
self[name] = toklist[0]
|
| 209 |
+
except (KeyError, TypeError, IndexError):
|
| 210 |
+
if toklist is not self:
|
| 211 |
+
self[name] = toklist
|
| 212 |
+
else:
|
| 213 |
+
self._name = name
|
| 214 |
+
|
| 215 |
+
def __getitem__(self, i):
|
| 216 |
+
if isinstance(i, (int, slice)):
|
| 217 |
+
return self._toklist[i]
|
| 218 |
+
|
| 219 |
+
if i not in self._all_names:
|
| 220 |
+
return self._tokdict[i][-1][0]
|
| 221 |
+
|
| 222 |
+
return ParseResults([v[0] for v in self._tokdict[i]])
|
| 223 |
+
|
| 224 |
+
def __setitem__(self, k, v, isinstance=isinstance):
|
| 225 |
+
if isinstance(v, _ParseResultsWithOffset):
|
| 226 |
+
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
|
| 227 |
+
sub = v[0]
|
| 228 |
+
elif isinstance(k, (int, slice)):
|
| 229 |
+
self._toklist[k] = v
|
| 230 |
+
sub = v
|
| 231 |
+
else:
|
| 232 |
+
self._tokdict[k] = self._tokdict.get(k, []) + [
|
| 233 |
+
_ParseResultsWithOffset(v, 0)
|
| 234 |
+
]
|
| 235 |
+
sub = v
|
| 236 |
+
if isinstance(sub, ParseResults):
|
| 237 |
+
sub._parent = self
|
| 238 |
+
|
| 239 |
+
def __delitem__(self, i):
|
| 240 |
+
if not isinstance(i, (int, slice)):
|
| 241 |
+
del self._tokdict[i]
|
| 242 |
+
return
|
| 243 |
+
|
| 244 |
+
mylen = len(self._toklist)
|
| 245 |
+
del self._toklist[i]
|
| 246 |
+
|
| 247 |
+
# convert int to slice
|
| 248 |
+
if isinstance(i, int):
|
| 249 |
+
if i < 0:
|
| 250 |
+
i += mylen
|
| 251 |
+
i = slice(i, i + 1)
|
| 252 |
+
# get removed indices
|
| 253 |
+
removed = list(range(*i.indices(mylen)))
|
| 254 |
+
removed.reverse()
|
| 255 |
+
# fixup indices in token dictionary
|
| 256 |
+
for occurrences in self._tokdict.values():
|
| 257 |
+
for j in removed:
|
| 258 |
+
for k, (value, position) in enumerate(occurrences):
|
| 259 |
+
occurrences[k] = _ParseResultsWithOffset(
|
| 260 |
+
value, position - (position > j)
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
def __contains__(self, k) -> bool:
|
| 264 |
+
return k in self._tokdict
|
| 265 |
+
|
| 266 |
+
def __len__(self) -> int:
|
| 267 |
+
return len(self._toklist)
|
| 268 |
+
|
| 269 |
+
def __bool__(self) -> bool:
|
| 270 |
+
return not not (self._toklist or self._tokdict)
|
| 271 |
+
|
| 272 |
+
def __iter__(self) -> Iterator:
|
| 273 |
+
return iter(self._toklist)
|
| 274 |
+
|
| 275 |
+
def __reversed__(self) -> Iterator:
|
| 276 |
+
return iter(self._toklist[::-1])
|
| 277 |
+
|
| 278 |
+
def keys(self):
|
| 279 |
+
return iter(self._tokdict)
|
| 280 |
+
|
| 281 |
+
def values(self):
|
| 282 |
+
return (self[k] for k in self.keys())
|
| 283 |
+
|
| 284 |
+
def items(self):
|
| 285 |
+
return ((k, self[k]) for k in self.keys())
|
| 286 |
+
|
| 287 |
+
def haskeys(self) -> bool:
|
| 288 |
+
"""
|
| 289 |
+
Since ``keys()`` returns an iterator, this method is helpful in bypassing
|
| 290 |
+
code that looks for the existence of any defined results names."""
|
| 291 |
+
return not not self._tokdict
|
| 292 |
+
|
| 293 |
+
def pop(self, *args, **kwargs):
|
| 294 |
+
"""
|
| 295 |
+
Removes and returns item at specified index (default= ``last``).
|
| 296 |
+
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
|
| 297 |
+
passed no argument or an integer argument, it will use ``list``
|
| 298 |
+
semantics and pop tokens from the list of parsed tokens. If passed
|
| 299 |
+
a non-integer argument (most likely a string), it will use ``dict``
|
| 300 |
+
semantics and pop the corresponding value from any defined results
|
| 301 |
+
names. A second default return value argument is supported, just as in
|
| 302 |
+
``dict.pop()``.
|
| 303 |
+
|
| 304 |
+
Example::
|
| 305 |
+
|
| 306 |
+
numlist = Word(nums)[...]
|
| 307 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 308 |
+
|
| 309 |
+
def remove_first(tokens):
|
| 310 |
+
tokens.pop(0)
|
| 311 |
+
numlist.add_parse_action(remove_first)
|
| 312 |
+
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
|
| 313 |
+
|
| 314 |
+
label = Word(alphas)
|
| 315 |
+
patt = label("LABEL") + Word(nums)[1, ...]
|
| 316 |
+
print(patt.parse_string("AAB 123 321").dump())
|
| 317 |
+
|
| 318 |
+
# Use pop() in a parse action to remove named result (note that corresponding value is not
|
| 319 |
+
# removed from list form of results)
|
| 320 |
+
def remove_LABEL(tokens):
|
| 321 |
+
tokens.pop("LABEL")
|
| 322 |
+
return tokens
|
| 323 |
+
patt.add_parse_action(remove_LABEL)
|
| 324 |
+
print(patt.parse_string("AAB 123 321").dump())
|
| 325 |
+
|
| 326 |
+
prints::
|
| 327 |
+
|
| 328 |
+
['AAB', '123', '321']
|
| 329 |
+
- LABEL: 'AAB'
|
| 330 |
+
|
| 331 |
+
['AAB', '123', '321']
|
| 332 |
+
"""
|
| 333 |
+
if not args:
|
| 334 |
+
args = [-1]
|
| 335 |
+
for k, v in kwargs.items():
|
| 336 |
+
if k == "default":
|
| 337 |
+
args = (args[0], v)
|
| 338 |
+
else:
|
| 339 |
+
raise TypeError(f"pop() got an unexpected keyword argument {k!r}")
|
| 340 |
+
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
|
| 341 |
+
index = args[0]
|
| 342 |
+
ret = self[index]
|
| 343 |
+
del self[index]
|
| 344 |
+
return ret
|
| 345 |
+
else:
|
| 346 |
+
defaultvalue = args[1]
|
| 347 |
+
return defaultvalue
|
| 348 |
+
|
| 349 |
+
def get(self, key, default_value=None):
|
| 350 |
+
"""
|
| 351 |
+
Returns named result matching the given key, or if there is no
|
| 352 |
+
such name, then returns the given ``default_value`` or ``None`` if no
|
| 353 |
+
``default_value`` is specified.
|
| 354 |
+
|
| 355 |
+
Similar to ``dict.get()``.
|
| 356 |
+
|
| 357 |
+
Example::
|
| 358 |
+
|
| 359 |
+
integer = Word(nums)
|
| 360 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 361 |
+
|
| 362 |
+
result = date_str.parse_string("1999/12/31")
|
| 363 |
+
print(result.get("year")) # -> '1999'
|
| 364 |
+
print(result.get("hour", "not specified")) # -> 'not specified'
|
| 365 |
+
print(result.get("hour")) # -> None
|
| 366 |
+
"""
|
| 367 |
+
if key in self:
|
| 368 |
+
return self[key]
|
| 369 |
+
else:
|
| 370 |
+
return default_value
|
| 371 |
+
|
| 372 |
+
def insert(self, index, ins_string):
|
| 373 |
+
"""
|
| 374 |
+
Inserts new element at location index in the list of parsed tokens.
|
| 375 |
+
|
| 376 |
+
Similar to ``list.insert()``.
|
| 377 |
+
|
| 378 |
+
Example::
|
| 379 |
+
|
| 380 |
+
numlist = Word(nums)[...]
|
| 381 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 382 |
+
|
| 383 |
+
# use a parse action to insert the parse location in the front of the parsed results
|
| 384 |
+
def insert_locn(locn, tokens):
|
| 385 |
+
tokens.insert(0, locn)
|
| 386 |
+
numlist.add_parse_action(insert_locn)
|
| 387 |
+
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
|
| 388 |
+
"""
|
| 389 |
+
self._toklist.insert(index, ins_string)
|
| 390 |
+
# fixup indices in token dictionary
|
| 391 |
+
for occurrences in self._tokdict.values():
|
| 392 |
+
for k, (value, position) in enumerate(occurrences):
|
| 393 |
+
occurrences[k] = _ParseResultsWithOffset(
|
| 394 |
+
value, position + (position > index)
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
def append(self, item):
|
| 398 |
+
"""
|
| 399 |
+
Add single element to end of ``ParseResults`` list of elements.
|
| 400 |
+
|
| 401 |
+
Example::
|
| 402 |
+
|
| 403 |
+
numlist = Word(nums)[...]
|
| 404 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 405 |
+
|
| 406 |
+
# use a parse action to compute the sum of the parsed integers, and add it to the end
|
| 407 |
+
def append_sum(tokens):
|
| 408 |
+
tokens.append(sum(map(int, tokens)))
|
| 409 |
+
numlist.add_parse_action(append_sum)
|
| 410 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
|
| 411 |
+
"""
|
| 412 |
+
self._toklist.append(item)
|
| 413 |
+
|
| 414 |
+
def extend(self, itemseq):
|
| 415 |
+
"""
|
| 416 |
+
Add sequence of elements to end of ``ParseResults`` list of elements.
|
| 417 |
+
|
| 418 |
+
Example::
|
| 419 |
+
|
| 420 |
+
patt = Word(alphas)[1, ...]
|
| 421 |
+
|
| 422 |
+
# use a parse action to append the reverse of the matched strings, to make a palindrome
|
| 423 |
+
def make_palindrome(tokens):
|
| 424 |
+
tokens.extend(reversed([t[::-1] for t in tokens]))
|
| 425 |
+
return ''.join(tokens)
|
| 426 |
+
patt.add_parse_action(make_palindrome)
|
| 427 |
+
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
|
| 428 |
+
"""
|
| 429 |
+
if isinstance(itemseq, ParseResults):
|
| 430 |
+
self.__iadd__(itemseq)
|
| 431 |
+
else:
|
| 432 |
+
self._toklist.extend(itemseq)
|
| 433 |
+
|
| 434 |
+
def clear(self):
|
| 435 |
+
"""
|
| 436 |
+
Clear all elements and results names.
|
| 437 |
+
"""
|
| 438 |
+
del self._toklist[:]
|
| 439 |
+
self._tokdict.clear()
|
| 440 |
+
|
| 441 |
+
def __getattr__(self, name):
|
| 442 |
+
try:
|
| 443 |
+
return self[name]
|
| 444 |
+
except KeyError:
|
| 445 |
+
if name.startswith("__"):
|
| 446 |
+
raise AttributeError(name)
|
| 447 |
+
return ""
|
| 448 |
+
|
| 449 |
+
def __add__(self, other: ParseResults) -> ParseResults:
|
| 450 |
+
ret = self.copy()
|
| 451 |
+
ret += other
|
| 452 |
+
return ret
|
| 453 |
+
|
| 454 |
+
def __iadd__(self, other: ParseResults) -> ParseResults:
|
| 455 |
+
if not other:
|
| 456 |
+
return self
|
| 457 |
+
|
| 458 |
+
if other._tokdict:
|
| 459 |
+
offset = len(self._toklist)
|
| 460 |
+
addoffset = lambda a: offset if a < 0 else a + offset
|
| 461 |
+
otheritems = other._tokdict.items()
|
| 462 |
+
otherdictitems = [
|
| 463 |
+
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
|
| 464 |
+
for k, vlist in otheritems
|
| 465 |
+
for v in vlist
|
| 466 |
+
]
|
| 467 |
+
for k, v in otherdictitems:
|
| 468 |
+
self[k] = v
|
| 469 |
+
if isinstance(v[0], ParseResults):
|
| 470 |
+
v[0]._parent = self
|
| 471 |
+
|
| 472 |
+
self._toklist += other._toklist
|
| 473 |
+
self._all_names |= other._all_names
|
| 474 |
+
return self
|
| 475 |
+
|
| 476 |
+
def __radd__(self, other) -> ParseResults:
|
| 477 |
+
if isinstance(other, int) and other == 0:
|
| 478 |
+
# useful for merging many ParseResults using sum() builtin
|
| 479 |
+
return self.copy()
|
| 480 |
+
else:
|
| 481 |
+
# this may raise a TypeError - so be it
|
| 482 |
+
return other + self
|
| 483 |
+
|
| 484 |
+
def __repr__(self) -> str:
|
| 485 |
+
return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})"
|
| 486 |
+
|
| 487 |
+
def __str__(self) -> str:
|
| 488 |
+
return (
|
| 489 |
+
"["
|
| 490 |
+
+ ", ".join(
|
| 491 |
+
[
|
| 492 |
+
str(i) if isinstance(i, ParseResults) else repr(i)
|
| 493 |
+
for i in self._toklist
|
| 494 |
+
]
|
| 495 |
+
)
|
| 496 |
+
+ "]"
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
def _asStringList(self, sep=""):
|
| 500 |
+
out = []
|
| 501 |
+
for item in self._toklist:
|
| 502 |
+
if out and sep:
|
| 503 |
+
out.append(sep)
|
| 504 |
+
if isinstance(item, ParseResults):
|
| 505 |
+
out += item._asStringList()
|
| 506 |
+
else:
|
| 507 |
+
out.append(str(item))
|
| 508 |
+
return out
|
| 509 |
+
|
| 510 |
+
def as_list(self, *, flatten: bool = False) -> list:
|
| 511 |
+
"""
|
| 512 |
+
Returns the parse results as a nested list of matching tokens, all converted to strings.
|
| 513 |
+
If flatten is True, all the nesting levels in the returned list are collapsed.
|
| 514 |
+
|
| 515 |
+
Example::
|
| 516 |
+
|
| 517 |
+
patt = Word(alphas)[1, ...]
|
| 518 |
+
result = patt.parse_string("sldkj lsdkj sldkj")
|
| 519 |
+
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
|
| 520 |
+
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
|
| 521 |
+
|
| 522 |
+
# Use as_list() to create an actual list
|
| 523 |
+
result_list = result.as_list()
|
| 524 |
+
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
|
| 525 |
+
"""
|
| 526 |
+
def flattened(pr):
|
| 527 |
+
to_visit = collections.deque([*self])
|
| 528 |
+
while to_visit:
|
| 529 |
+
to_do = to_visit.popleft()
|
| 530 |
+
if isinstance(to_do, ParseResults):
|
| 531 |
+
to_visit.extendleft(to_do[::-1])
|
| 532 |
+
else:
|
| 533 |
+
yield to_do
|
| 534 |
+
|
| 535 |
+
if flatten:
|
| 536 |
+
return [*flattened(self)]
|
| 537 |
+
else:
|
| 538 |
+
return [
|
| 539 |
+
res.as_list() if isinstance(res, ParseResults) else res
|
| 540 |
+
for res in self._toklist
|
| 541 |
+
]
|
| 542 |
+
|
| 543 |
+
def as_dict(self) -> dict:
|
| 544 |
+
"""
|
| 545 |
+
Returns the named parse results as a nested dictionary.
|
| 546 |
+
|
| 547 |
+
Example::
|
| 548 |
+
|
| 549 |
+
integer = Word(nums)
|
| 550 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 551 |
+
|
| 552 |
+
result = date_str.parse_string('12/31/1999')
|
| 553 |
+
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
|
| 554 |
+
|
| 555 |
+
result_dict = result.as_dict()
|
| 556 |
+
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
|
| 557 |
+
|
| 558 |
+
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
|
| 559 |
+
import json
|
| 560 |
+
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
|
| 561 |
+
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
|
| 562 |
+
"""
|
| 563 |
+
|
| 564 |
+
def to_item(obj):
|
| 565 |
+
if isinstance(obj, ParseResults):
|
| 566 |
+
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
|
| 567 |
+
else:
|
| 568 |
+
return obj
|
| 569 |
+
|
| 570 |
+
return dict((k, to_item(v)) for k, v in self.items())
|
| 571 |
+
|
| 572 |
+
def copy(self) -> ParseResults:
|
| 573 |
+
"""
|
| 574 |
+
Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults`
|
| 575 |
+
items contained within the source are shared with the copy. Use
|
| 576 |
+
:class:`ParseResults.deepcopy()` to create a copy with its own separate
|
| 577 |
+
content values.
|
| 578 |
+
"""
|
| 579 |
+
ret = ParseResults(self._toklist)
|
| 580 |
+
ret._tokdict = self._tokdict.copy()
|
| 581 |
+
ret._parent = self._parent
|
| 582 |
+
ret._all_names |= self._all_names
|
| 583 |
+
ret._name = self._name
|
| 584 |
+
return ret
|
| 585 |
+
|
| 586 |
+
def deepcopy(self) -> ParseResults:
|
| 587 |
+
"""
|
| 588 |
+
Returns a new deep copy of a :class:`ParseResults` object.
|
| 589 |
+
"""
|
| 590 |
+
ret = self.copy()
|
| 591 |
+
# replace values with copies if they are of known mutable types
|
| 592 |
+
for i, obj in enumerate(self._toklist):
|
| 593 |
+
if isinstance(obj, ParseResults):
|
| 594 |
+
ret._toklist[i] = obj.deepcopy()
|
| 595 |
+
elif isinstance(obj, (str, bytes)):
|
| 596 |
+
pass
|
| 597 |
+
elif isinstance(obj, MutableMapping):
|
| 598 |
+
ret._toklist[i] = dest = type(obj)()
|
| 599 |
+
for k, v in obj.items():
|
| 600 |
+
dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v
|
| 601 |
+
elif isinstance(obj, Iterable):
|
| 602 |
+
ret._toklist[i] = type(obj)(
|
| 603 |
+
v.deepcopy() if isinstance(v, ParseResults) else v for v in obj # type: ignore[call-arg]
|
| 604 |
+
)
|
| 605 |
+
return ret
|
| 606 |
+
|
| 607 |
+
def get_name(self) -> str | None:
|
| 608 |
+
r"""
|
| 609 |
+
Returns the results name for this token expression. Useful when several
|
| 610 |
+
different expressions might match at a particular location.
|
| 611 |
+
|
| 612 |
+
Example::
|
| 613 |
+
|
| 614 |
+
integer = Word(nums)
|
| 615 |
+
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
|
| 616 |
+
house_number_expr = Suppress('#') + Word(nums, alphanums)
|
| 617 |
+
user_data = (Group(house_number_expr)("house_number")
|
| 618 |
+
| Group(ssn_expr)("ssn")
|
| 619 |
+
| Group(integer)("age"))
|
| 620 |
+
user_info = user_data[1, ...]
|
| 621 |
+
|
| 622 |
+
result = user_info.parse_string("22 111-22-3333 #221B")
|
| 623 |
+
for item in result:
|
| 624 |
+
print(item.get_name(), ':', item[0])
|
| 625 |
+
|
| 626 |
+
prints::
|
| 627 |
+
|
| 628 |
+
age : 22
|
| 629 |
+
ssn : 111-22-3333
|
| 630 |
+
house_number : 221B
|
| 631 |
+
"""
|
| 632 |
+
if self._name:
|
| 633 |
+
return self._name
|
| 634 |
+
elif self._parent:
|
| 635 |
+
par: ParseResults = self._parent
|
| 636 |
+
parent_tokdict_items = par._tokdict.items()
|
| 637 |
+
return next(
|
| 638 |
+
(
|
| 639 |
+
k
|
| 640 |
+
for k, vlist in parent_tokdict_items
|
| 641 |
+
for v, loc in vlist
|
| 642 |
+
if v is self
|
| 643 |
+
),
|
| 644 |
+
None,
|
| 645 |
+
)
|
| 646 |
+
elif (
|
| 647 |
+
len(self) == 1
|
| 648 |
+
and len(self._tokdict) == 1
|
| 649 |
+
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
|
| 650 |
+
):
|
| 651 |
+
return next(iter(self._tokdict.keys()))
|
| 652 |
+
else:
|
| 653 |
+
return None
|
| 654 |
+
|
| 655 |
+
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
|
| 656 |
+
"""
|
| 657 |
+
Diagnostic method for listing out the contents of
|
| 658 |
+
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
|
| 659 |
+
that this string can be embedded in a nested display of other data.
|
| 660 |
+
|
| 661 |
+
Example::
|
| 662 |
+
|
| 663 |
+
integer = Word(nums)
|
| 664 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 665 |
+
|
| 666 |
+
result = date_str.parse_string('1999/12/31')
|
| 667 |
+
print(result.dump())
|
| 668 |
+
|
| 669 |
+
prints::
|
| 670 |
+
|
| 671 |
+
['1999', '/', '12', '/', '31']
|
| 672 |
+
- day: '31'
|
| 673 |
+
- month: '12'
|
| 674 |
+
- year: '1999'
|
| 675 |
+
"""
|
| 676 |
+
out = []
|
| 677 |
+
NL = "\n"
|
| 678 |
+
out.append(indent + str(self.as_list()) if include_list else "")
|
| 679 |
+
|
| 680 |
+
if not full:
|
| 681 |
+
return "".join(out)
|
| 682 |
+
|
| 683 |
+
if self.haskeys():
|
| 684 |
+
items = sorted((str(k), v) for k, v in self.items())
|
| 685 |
+
for k, v in items:
|
| 686 |
+
if out:
|
| 687 |
+
out.append(NL)
|
| 688 |
+
out.append(f"{indent}{(' ' * _depth)}- {k}: ")
|
| 689 |
+
if not isinstance(v, ParseResults):
|
| 690 |
+
out.append(repr(v))
|
| 691 |
+
continue
|
| 692 |
+
|
| 693 |
+
if not v:
|
| 694 |
+
out.append(str(v))
|
| 695 |
+
continue
|
| 696 |
+
|
| 697 |
+
out.append(
|
| 698 |
+
v.dump(
|
| 699 |
+
indent=indent,
|
| 700 |
+
full=full,
|
| 701 |
+
include_list=include_list,
|
| 702 |
+
_depth=_depth + 1,
|
| 703 |
+
)
|
| 704 |
+
)
|
| 705 |
+
if not any(isinstance(vv, ParseResults) for vv in self):
|
| 706 |
+
return "".join(out)
|
| 707 |
+
|
| 708 |
+
v = self
|
| 709 |
+
incr = " "
|
| 710 |
+
nl = "\n"
|
| 711 |
+
for i, vv in enumerate(v):
|
| 712 |
+
if isinstance(vv, ParseResults):
|
| 713 |
+
vv_dump = vv.dump(
|
| 714 |
+
indent=indent,
|
| 715 |
+
full=full,
|
| 716 |
+
include_list=include_list,
|
| 717 |
+
_depth=_depth + 1,
|
| 718 |
+
)
|
| 719 |
+
out.append(
|
| 720 |
+
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}"
|
| 721 |
+
)
|
| 722 |
+
else:
|
| 723 |
+
out.append(
|
| 724 |
+
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}"
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
return "".join(out)
|
| 728 |
+
|
| 729 |
+
def pprint(self, *args, **kwargs):
|
| 730 |
+
"""
|
| 731 |
+
Pretty-printer for parsed results as a list, using the
|
| 732 |
+
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
|
| 733 |
+
Accepts additional positional or keyword args as defined for
|
| 734 |
+
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
|
| 735 |
+
|
| 736 |
+
Example::
|
| 737 |
+
|
| 738 |
+
ident = Word(alphas, alphanums)
|
| 739 |
+
num = Word(nums)
|
| 740 |
+
func = Forward()
|
| 741 |
+
term = ident | num | Group('(' + func + ')')
|
| 742 |
+
func <<= ident + Group(Optional(DelimitedList(term)))
|
| 743 |
+
result = func.parse_string("fna a,b,(fnb c,d,200),100")
|
| 744 |
+
result.pprint(width=40)
|
| 745 |
+
|
| 746 |
+
prints::
|
| 747 |
+
|
| 748 |
+
['fna',
|
| 749 |
+
['a',
|
| 750 |
+
'b',
|
| 751 |
+
['(', 'fnb', ['c', 'd', '200'], ')'],
|
| 752 |
+
'100']]
|
| 753 |
+
"""
|
| 754 |
+
pprint.pprint(self.as_list(), *args, **kwargs)
|
| 755 |
+
|
| 756 |
+
# add support for pickle protocol
|
| 757 |
+
def __getstate__(self):
|
| 758 |
+
return (
|
| 759 |
+
self._toklist,
|
| 760 |
+
(
|
| 761 |
+
self._tokdict.copy(),
|
| 762 |
+
None,
|
| 763 |
+
self._all_names,
|
| 764 |
+
self._name,
|
| 765 |
+
),
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
def __setstate__(self, state):
|
| 769 |
+
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
|
| 770 |
+
self._all_names = set(inAccumNames)
|
| 771 |
+
self._parent = None
|
| 772 |
+
|
| 773 |
+
def __getnewargs__(self):
|
| 774 |
+
return self._toklist, self._name
|
| 775 |
+
|
| 776 |
+
def __dir__(self):
|
| 777 |
+
return dir(type(self)) + list(self.keys())
|
| 778 |
+
|
| 779 |
+
@classmethod
|
| 780 |
+
def from_dict(cls, other, name=None) -> ParseResults:
|
| 781 |
+
"""
|
| 782 |
+
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
|
| 783 |
+
name-value relations as results names. If an optional ``name`` argument is
|
| 784 |
+
given, a nested ``ParseResults`` will be returned.
|
| 785 |
+
"""
|
| 786 |
+
|
| 787 |
+
def is_iterable(obj):
|
| 788 |
+
try:
|
| 789 |
+
iter(obj)
|
| 790 |
+
except Exception:
|
| 791 |
+
return False
|
| 792 |
+
# str's are iterable, but in pyparsing, we don't want to iterate over them
|
| 793 |
+
else:
|
| 794 |
+
return not isinstance(obj, str_type)
|
| 795 |
+
|
| 796 |
+
ret = cls([])
|
| 797 |
+
for k, v in other.items():
|
| 798 |
+
if isinstance(v, Mapping):
|
| 799 |
+
ret += cls.from_dict(v, name=k)
|
| 800 |
+
else:
|
| 801 |
+
ret += cls([v], name=k, asList=is_iterable(v))
|
| 802 |
+
if name is not None:
|
| 803 |
+
ret = cls([ret], name=name)
|
| 804 |
+
return ret
|
| 805 |
+
|
| 806 |
+
asList = as_list
|
| 807 |
+
"""Deprecated - use :class:`as_list`"""
|
| 808 |
+
asDict = as_dict
|
| 809 |
+
"""Deprecated - use :class:`as_dict`"""
|
| 810 |
+
getName = get_name
|
| 811 |
+
"""Deprecated - use :class:`get_name`"""
|
| 812 |
+
|
| 813 |
+
|
| 814 |
+
MutableMapping.register(ParseResults)
|
| 815 |
+
MutableSequence.register(ParseResults)
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/testing.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# testing.py
|
| 2 |
+
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
import re
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
from .core import (
|
| 9 |
+
ParserElement,
|
| 10 |
+
ParseException,
|
| 11 |
+
Keyword,
|
| 12 |
+
__diag__,
|
| 13 |
+
__compat__,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class pyparsing_test:
|
| 18 |
+
"""
|
| 19 |
+
namespace class for classes useful in writing unit tests
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
class reset_pyparsing_context:
|
| 23 |
+
"""
|
| 24 |
+
Context manager to be used when writing unit tests that modify pyparsing config values:
|
| 25 |
+
- packrat parsing
|
| 26 |
+
- bounded recursion parsing
|
| 27 |
+
- default whitespace characters.
|
| 28 |
+
- default keyword characters
|
| 29 |
+
- literal string auto-conversion class
|
| 30 |
+
- __diag__ settings
|
| 31 |
+
|
| 32 |
+
Example::
|
| 33 |
+
|
| 34 |
+
with reset_pyparsing_context():
|
| 35 |
+
# test that literals used to construct a grammar are automatically suppressed
|
| 36 |
+
ParserElement.inlineLiteralsUsing(Suppress)
|
| 37 |
+
|
| 38 |
+
term = Word(alphas) | Word(nums)
|
| 39 |
+
group = Group('(' + term[...] + ')')
|
| 40 |
+
|
| 41 |
+
# assert that the '()' characters are not included in the parsed tokens
|
| 42 |
+
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
|
| 43 |
+
|
| 44 |
+
# after exiting context manager, literals are converted to Literal expressions again
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self):
|
| 48 |
+
self._save_context = {}
|
| 49 |
+
|
| 50 |
+
def save(self):
|
| 51 |
+
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
|
| 52 |
+
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
|
| 53 |
+
|
| 54 |
+
self._save_context["literal_string_class"] = (
|
| 55 |
+
ParserElement._literalStringClass
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
|
| 59 |
+
|
| 60 |
+
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
|
| 61 |
+
if ParserElement._packratEnabled:
|
| 62 |
+
self._save_context["packrat_cache_size"] = (
|
| 63 |
+
ParserElement.packrat_cache.size
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
self._save_context["packrat_cache_size"] = None
|
| 67 |
+
self._save_context["packrat_parse"] = ParserElement._parse
|
| 68 |
+
self._save_context["recursion_enabled"] = (
|
| 69 |
+
ParserElement._left_recursion_enabled
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
self._save_context["__diag__"] = {
|
| 73 |
+
name: getattr(__diag__, name) for name in __diag__._all_names
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
self._save_context["__compat__"] = {
|
| 77 |
+
"collect_all_And_tokens": __compat__.collect_all_And_tokens
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
return self
|
| 81 |
+
|
| 82 |
+
def restore(self):
|
| 83 |
+
# reset pyparsing global state
|
| 84 |
+
if (
|
| 85 |
+
ParserElement.DEFAULT_WHITE_CHARS
|
| 86 |
+
!= self._save_context["default_whitespace"]
|
| 87 |
+
):
|
| 88 |
+
ParserElement.set_default_whitespace_chars(
|
| 89 |
+
self._save_context["default_whitespace"]
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
|
| 93 |
+
|
| 94 |
+
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
|
| 95 |
+
ParserElement.inlineLiteralsUsing(
|
| 96 |
+
self._save_context["literal_string_class"]
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
for name, value in self._save_context["__diag__"].items():
|
| 100 |
+
(__diag__.enable if value else __diag__.disable)(name)
|
| 101 |
+
|
| 102 |
+
ParserElement._packratEnabled = False
|
| 103 |
+
if self._save_context["packrat_enabled"]:
|
| 104 |
+
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
|
| 105 |
+
else:
|
| 106 |
+
ParserElement._parse = self._save_context["packrat_parse"]
|
| 107 |
+
ParserElement._left_recursion_enabled = self._save_context[
|
| 108 |
+
"recursion_enabled"
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
|
| 112 |
+
|
| 113 |
+
return self
|
| 114 |
+
|
| 115 |
+
def copy(self):
|
| 116 |
+
ret = type(self)()
|
| 117 |
+
ret._save_context.update(self._save_context)
|
| 118 |
+
return ret
|
| 119 |
+
|
| 120 |
+
def __enter__(self):
|
| 121 |
+
return self.save()
|
| 122 |
+
|
| 123 |
+
def __exit__(self, *args):
|
| 124 |
+
self.restore()
|
| 125 |
+
|
| 126 |
+
class TestParseResultsAsserts:
|
| 127 |
+
"""
|
| 128 |
+
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
def assertParseResultsEquals(
|
| 132 |
+
self, result, expected_list=None, expected_dict=None, msg=None
|
| 133 |
+
):
|
| 134 |
+
"""
|
| 135 |
+
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
|
| 136 |
+
and compare any defined results names with an optional ``expected_dict``.
|
| 137 |
+
"""
|
| 138 |
+
if expected_list is not None:
|
| 139 |
+
self.assertEqual(expected_list, result.as_list(), msg=msg)
|
| 140 |
+
if expected_dict is not None:
|
| 141 |
+
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
|
| 142 |
+
|
| 143 |
+
def assertParseAndCheckList(
|
| 144 |
+
self, expr, test_string, expected_list, msg=None, verbose=True
|
| 145 |
+
):
|
| 146 |
+
"""
|
| 147 |
+
Convenience wrapper assert to test a parser element and input string, and assert that
|
| 148 |
+
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
|
| 149 |
+
"""
|
| 150 |
+
result = expr.parse_string(test_string, parse_all=True)
|
| 151 |
+
if verbose:
|
| 152 |
+
print(result.dump())
|
| 153 |
+
else:
|
| 154 |
+
print(result.as_list())
|
| 155 |
+
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
|
| 156 |
+
|
| 157 |
+
def assertParseAndCheckDict(
|
| 158 |
+
self, expr, test_string, expected_dict, msg=None, verbose=True
|
| 159 |
+
):
|
| 160 |
+
"""
|
| 161 |
+
Convenience wrapper assert to test a parser element and input string, and assert that
|
| 162 |
+
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
|
| 163 |
+
"""
|
| 164 |
+
result = expr.parse_string(test_string, parseAll=True)
|
| 165 |
+
if verbose:
|
| 166 |
+
print(result.dump())
|
| 167 |
+
else:
|
| 168 |
+
print(result.as_list())
|
| 169 |
+
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
|
| 170 |
+
|
| 171 |
+
def assertRunTestResults(
|
| 172 |
+
self, run_tests_report, expected_parse_results=None, msg=None
|
| 173 |
+
):
|
| 174 |
+
"""
|
| 175 |
+
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
|
| 176 |
+
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
|
| 177 |
+
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
|
| 178 |
+
Finally, asserts that the overall ``runTests()`` success value is ``True``.
|
| 179 |
+
|
| 180 |
+
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
|
| 181 |
+
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
|
| 182 |
+
"""
|
| 183 |
+
run_test_success, run_test_results = run_tests_report
|
| 184 |
+
|
| 185 |
+
if expected_parse_results is None:
|
| 186 |
+
self.assertTrue(
|
| 187 |
+
run_test_success, msg=msg if msg is not None else "failed runTests"
|
| 188 |
+
)
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
merged = [
|
| 192 |
+
(*rpt, expected)
|
| 193 |
+
for rpt, expected in zip(run_test_results, expected_parse_results)
|
| 194 |
+
]
|
| 195 |
+
for test_string, result, expected in merged:
|
| 196 |
+
# expected should be a tuple containing a list and/or a dict or an exception,
|
| 197 |
+
# and optional failure message string
|
| 198 |
+
# an empty tuple will skip any result validation
|
| 199 |
+
fail_msg = next((exp for exp in expected if isinstance(exp, str)), None)
|
| 200 |
+
expected_exception = next(
|
| 201 |
+
(
|
| 202 |
+
exp
|
| 203 |
+
for exp in expected
|
| 204 |
+
if isinstance(exp, type) and issubclass(exp, Exception)
|
| 205 |
+
),
|
| 206 |
+
None,
|
| 207 |
+
)
|
| 208 |
+
if expected_exception is not None:
|
| 209 |
+
with self.assertRaises(
|
| 210 |
+
expected_exception=expected_exception, msg=fail_msg or msg
|
| 211 |
+
):
|
| 212 |
+
if isinstance(result, Exception):
|
| 213 |
+
raise result
|
| 214 |
+
else:
|
| 215 |
+
expected_list = next(
|
| 216 |
+
(exp for exp in expected if isinstance(exp, list)), None
|
| 217 |
+
)
|
| 218 |
+
expected_dict = next(
|
| 219 |
+
(exp for exp in expected if isinstance(exp, dict)), None
|
| 220 |
+
)
|
| 221 |
+
if (expected_list, expected_dict) != (None, None):
|
| 222 |
+
self.assertParseResultsEquals(
|
| 223 |
+
result,
|
| 224 |
+
expected_list=expected_list,
|
| 225 |
+
expected_dict=expected_dict,
|
| 226 |
+
msg=fail_msg or msg,
|
| 227 |
+
)
|
| 228 |
+
else:
|
| 229 |
+
# warning here maybe?
|
| 230 |
+
print(f"no validation for {test_string!r}")
|
| 231 |
+
|
| 232 |
+
# do this last, in case some specific test results can be reported instead
|
| 233 |
+
self.assertTrue(
|
| 234 |
+
run_test_success, msg=msg if msg is not None else "failed runTests"
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
@contextmanager
|
| 238 |
+
def assertRaisesParseException(
|
| 239 |
+
self, exc_type=ParseException, expected_msg=None, msg=None
|
| 240 |
+
):
|
| 241 |
+
if expected_msg is not None:
|
| 242 |
+
if isinstance(expected_msg, str):
|
| 243 |
+
expected_msg = re.escape(expected_msg)
|
| 244 |
+
with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx:
|
| 245 |
+
yield ctx
|
| 246 |
+
|
| 247 |
+
else:
|
| 248 |
+
with self.assertRaises(exc_type, msg=msg) as ctx:
|
| 249 |
+
yield ctx
|
| 250 |
+
|
| 251 |
+
@staticmethod
|
| 252 |
+
def with_line_numbers(
|
| 253 |
+
s: str,
|
| 254 |
+
start_line: typing.Optional[int] = None,
|
| 255 |
+
end_line: typing.Optional[int] = None,
|
| 256 |
+
expand_tabs: bool = True,
|
| 257 |
+
eol_mark: str = "|",
|
| 258 |
+
mark_spaces: typing.Optional[str] = None,
|
| 259 |
+
mark_control: typing.Optional[str] = None,
|
| 260 |
+
*,
|
| 261 |
+
indent: typing.Union[str, int] = "",
|
| 262 |
+
base_1: bool = True,
|
| 263 |
+
) -> str:
|
| 264 |
+
"""
|
| 265 |
+
Helpful method for debugging a parser - prints a string with line and column numbers.
|
| 266 |
+
(Line and column numbers are 1-based by default - if debugging a parse action,
|
| 267 |
+
pass base_1=False, to correspond to the loc value passed to the parse action.)
|
| 268 |
+
|
| 269 |
+
:param s: tuple(bool, str - string to be printed with line and column numbers
|
| 270 |
+
:param start_line: int - (optional) starting line number in s to print (default=1)
|
| 271 |
+
:param end_line: int - (optional) ending line number in s to print (default=len(s))
|
| 272 |
+
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
|
| 273 |
+
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
|
| 274 |
+
:param mark_spaces: str - (optional) special character to display in place of spaces
|
| 275 |
+
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
|
| 276 |
+
character; valid values:
|
| 277 |
+
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
|
| 278 |
+
- any single character string - replace control characters with given string
|
| 279 |
+
- None (default) - string is displayed as-is
|
| 280 |
+
:param indent: str | int - (optional) string to indent with line and column numbers; if an int
|
| 281 |
+
is passed, converted to " " * indent
|
| 282 |
+
:param base_1: bool - (optional) whether to label string using base 1; if False, string will be
|
| 283 |
+
labeled based at 0 (default=True)
|
| 284 |
+
|
| 285 |
+
:return: str - input string with leading line numbers and column number headers
|
| 286 |
+
"""
|
| 287 |
+
if expand_tabs:
|
| 288 |
+
s = s.expandtabs()
|
| 289 |
+
if isinstance(indent, int):
|
| 290 |
+
indent = " " * indent
|
| 291 |
+
indent = indent.expandtabs()
|
| 292 |
+
if mark_control is not None:
|
| 293 |
+
mark_control = typing.cast(str, mark_control)
|
| 294 |
+
if mark_control == "unicode":
|
| 295 |
+
transtable_map = {
|
| 296 |
+
c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))
|
| 297 |
+
}
|
| 298 |
+
transtable_map[127] = 0x2421
|
| 299 |
+
tbl = str.maketrans(transtable_map)
|
| 300 |
+
eol_mark = ""
|
| 301 |
+
else:
|
| 302 |
+
ord_mark_control = ord(mark_control)
|
| 303 |
+
tbl = str.maketrans(
|
| 304 |
+
{c: ord_mark_control for c in list(range(0, 32)) + [127]}
|
| 305 |
+
)
|
| 306 |
+
s = s.translate(tbl)
|
| 307 |
+
if mark_spaces is not None and mark_spaces != " ":
|
| 308 |
+
if mark_spaces == "unicode":
|
| 309 |
+
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
|
| 310 |
+
s = s.translate(tbl)
|
| 311 |
+
else:
|
| 312 |
+
s = s.replace(" ", mark_spaces)
|
| 313 |
+
if start_line is None:
|
| 314 |
+
start_line = 0
|
| 315 |
+
if end_line is None:
|
| 316 |
+
end_line = len(s)
|
| 317 |
+
end_line = min(end_line, len(s))
|
| 318 |
+
start_line = min(max(0, start_line), end_line)
|
| 319 |
+
|
| 320 |
+
if mark_control != "unicode":
|
| 321 |
+
s_lines = s.splitlines()[start_line - base_1 : end_line]
|
| 322 |
+
else:
|
| 323 |
+
s_lines = [
|
| 324 |
+
line + "␊" for line in s.split("␊")[start_line - base_1 : end_line]
|
| 325 |
+
]
|
| 326 |
+
if not s_lines:
|
| 327 |
+
return ""
|
| 328 |
+
|
| 329 |
+
lineno_width = len(str(end_line))
|
| 330 |
+
max_line_len = max(len(line) for line in s_lines)
|
| 331 |
+
lead = indent + " " * (lineno_width + 1)
|
| 332 |
+
if max_line_len >= 99:
|
| 333 |
+
header0 = (
|
| 334 |
+
lead
|
| 335 |
+
+ ("" if base_1 else " ")
|
| 336 |
+
+ "".join(
|
| 337 |
+
f"{' ' * 99}{(i + 1) % 100}"
|
| 338 |
+
for i in range(1 if base_1 else 0, max(max_line_len // 100, 1))
|
| 339 |
+
)
|
| 340 |
+
+ "\n"
|
| 341 |
+
)
|
| 342 |
+
else:
|
| 343 |
+
header0 = ""
|
| 344 |
+
header1 = (
|
| 345 |
+
("" if base_1 else " ")
|
| 346 |
+
+ lead
|
| 347 |
+
+ "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10)))
|
| 348 |
+
+ "\n"
|
| 349 |
+
)
|
| 350 |
+
digits = "1234567890"
|
| 351 |
+
header2 = (
|
| 352 |
+
lead + ("" if base_1 else "0") + digits * (-(-max_line_len // 10)) + "\n"
|
| 353 |
+
)
|
| 354 |
+
return (
|
| 355 |
+
header1
|
| 356 |
+
+ header2
|
| 357 |
+
+ "\n".join(
|
| 358 |
+
f"{indent}{i:{lineno_width}d}:{line}{eol_mark}"
|
| 359 |
+
for i, line in enumerate(s_lines, start=start_line + base_1)
|
| 360 |
+
)
|
| 361 |
+
+ "\n"
|
| 362 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/pyparsing/unicode.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# unicode.py
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
from itertools import filterfalse
|
| 5 |
+
from typing import Union
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class _lazyclassproperty:
|
| 9 |
+
def __init__(self, fn):
|
| 10 |
+
self.fn = fn
|
| 11 |
+
self.__doc__ = fn.__doc__
|
| 12 |
+
self.__name__ = fn.__name__
|
| 13 |
+
|
| 14 |
+
def __get__(self, obj, cls):
|
| 15 |
+
if cls is None:
|
| 16 |
+
cls = type(obj)
|
| 17 |
+
if not hasattr(cls, "_intern") or any(
|
| 18 |
+
cls._intern is getattr(superclass, "_intern", [])
|
| 19 |
+
for superclass in cls.__mro__[1:]
|
| 20 |
+
):
|
| 21 |
+
cls._intern = {}
|
| 22 |
+
attrname = self.fn.__name__
|
| 23 |
+
if attrname not in cls._intern:
|
| 24 |
+
cls._intern[attrname] = self.fn(cls)
|
| 25 |
+
return cls._intern[attrname]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
UnicodeRangeList = list[Union[tuple[int, int], tuple[int]]]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class unicode_set:
|
| 32 |
+
"""
|
| 33 |
+
A set of Unicode characters, for language-specific strings for
|
| 34 |
+
``alphas``, ``nums``, ``alphanums``, and ``printables``.
|
| 35 |
+
A unicode_set is defined by a list of ranges in the Unicode character
|
| 36 |
+
set, in a class attribute ``_ranges``. Ranges can be specified using
|
| 37 |
+
2-tuples or a 1-tuple, such as::
|
| 38 |
+
|
| 39 |
+
_ranges = [
|
| 40 |
+
(0x0020, 0x007e),
|
| 41 |
+
(0x00a0, 0x00ff),
|
| 42 |
+
(0x0100,),
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
|
| 46 |
+
|
| 47 |
+
A unicode set can also be defined using multiple inheritance of other unicode sets::
|
| 48 |
+
|
| 49 |
+
class CJK(Chinese, Japanese, Korean):
|
| 50 |
+
pass
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
_ranges: UnicodeRangeList = []
|
| 54 |
+
|
| 55 |
+
@_lazyclassproperty
|
| 56 |
+
def _chars_for_ranges(cls) -> list[str]:
|
| 57 |
+
ret: list[int] = []
|
| 58 |
+
for cc in cls.__mro__: # type: ignore[attr-defined]
|
| 59 |
+
if cc is unicode_set:
|
| 60 |
+
break
|
| 61 |
+
for rr in getattr(cc, "_ranges", ()):
|
| 62 |
+
ret.extend(range(rr[0], rr[-1] + 1))
|
| 63 |
+
return sorted(chr(c) for c in set(ret))
|
| 64 |
+
|
| 65 |
+
@_lazyclassproperty
|
| 66 |
+
def printables(cls) -> str:
|
| 67 |
+
"""all non-whitespace characters in this range"""
|
| 68 |
+
return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
|
| 69 |
+
|
| 70 |
+
@_lazyclassproperty
|
| 71 |
+
def alphas(cls) -> str:
|
| 72 |
+
"""all alphabetic characters in this range"""
|
| 73 |
+
return "".join(filter(str.isalpha, cls._chars_for_ranges))
|
| 74 |
+
|
| 75 |
+
@_lazyclassproperty
|
| 76 |
+
def nums(cls) -> str:
|
| 77 |
+
"""all numeric digit characters in this range"""
|
| 78 |
+
return "".join(filter(str.isdigit, cls._chars_for_ranges))
|
| 79 |
+
|
| 80 |
+
@_lazyclassproperty
|
| 81 |
+
def alphanums(cls) -> str:
|
| 82 |
+
"""all alphanumeric characters in this range"""
|
| 83 |
+
return cls.alphas + cls.nums
|
| 84 |
+
|
| 85 |
+
@_lazyclassproperty
|
| 86 |
+
def identchars(cls) -> str:
|
| 87 |
+
"""all characters in this range that are valid identifier characters, plus underscore '_'"""
|
| 88 |
+
return "".join(
|
| 89 |
+
sorted(
|
| 90 |
+
set(filter(str.isidentifier, cls._chars_for_ranges))
|
| 91 |
+
| set(
|
| 92 |
+
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
|
| 93 |
+
"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
|
| 94 |
+
"_"
|
| 95 |
+
)
|
| 96 |
+
)
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
@_lazyclassproperty
|
| 100 |
+
def identbodychars(cls) -> str:
|
| 101 |
+
"""
|
| 102 |
+
all characters in this range that are valid identifier body characters,
|
| 103 |
+
plus the digits 0-9, and · (Unicode MIDDLE DOT)
|
| 104 |
+
"""
|
| 105 |
+
identifier_chars = set(
|
| 106 |
+
c for c in cls._chars_for_ranges if ("_" + c).isidentifier()
|
| 107 |
+
)
|
| 108 |
+
return "".join(
|
| 109 |
+
sorted(identifier_chars | set(cls.identchars) | set("0123456789·"))
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
@_lazyclassproperty
|
| 113 |
+
def identifier(cls):
|
| 114 |
+
"""
|
| 115 |
+
a pyparsing Word expression for an identifier using this range's definitions for
|
| 116 |
+
identchars and identbodychars
|
| 117 |
+
"""
|
| 118 |
+
from pyparsing import Word
|
| 119 |
+
|
| 120 |
+
return Word(cls.identchars, cls.identbodychars)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class pyparsing_unicode(unicode_set):
|
| 124 |
+
"""
|
| 125 |
+
A namespace class for defining common language unicode_sets.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
# fmt: off
|
| 129 |
+
|
| 130 |
+
# define ranges in language character sets
|
| 131 |
+
_ranges: UnicodeRangeList = [
|
| 132 |
+
(0x0020, sys.maxunicode),
|
| 133 |
+
]
|
| 134 |
+
|
| 135 |
+
class BasicMultilingualPlane(unicode_set):
|
| 136 |
+
"""Unicode set for the Basic Multilingual Plane"""
|
| 137 |
+
_ranges: UnicodeRangeList = [
|
| 138 |
+
(0x0020, 0xFFFF),
|
| 139 |
+
]
|
| 140 |
+
|
| 141 |
+
class Latin1(unicode_set):
|
| 142 |
+
"""Unicode set for Latin-1 Unicode Character Range"""
|
| 143 |
+
_ranges: UnicodeRangeList = [
|
| 144 |
+
(0x0020, 0x007E),
|
| 145 |
+
(0x00A0, 0x00FF),
|
| 146 |
+
]
|
| 147 |
+
|
| 148 |
+
class LatinA(unicode_set):
|
| 149 |
+
"""Unicode set for Latin-A Unicode Character Range"""
|
| 150 |
+
_ranges: UnicodeRangeList = [
|
| 151 |
+
(0x0100, 0x017F),
|
| 152 |
+
]
|
| 153 |
+
|
| 154 |
+
class LatinB(unicode_set):
|
| 155 |
+
"""Unicode set for Latin-B Unicode Character Range"""
|
| 156 |
+
_ranges: UnicodeRangeList = [
|
| 157 |
+
(0x0180, 0x024F),
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
class Greek(unicode_set):
|
| 161 |
+
"""Unicode set for Greek Unicode Character Ranges"""
|
| 162 |
+
_ranges: UnicodeRangeList = [
|
| 163 |
+
(0x0342, 0x0345),
|
| 164 |
+
(0x0370, 0x0377),
|
| 165 |
+
(0x037A, 0x037F),
|
| 166 |
+
(0x0384, 0x038A),
|
| 167 |
+
(0x038C,),
|
| 168 |
+
(0x038E, 0x03A1),
|
| 169 |
+
(0x03A3, 0x03E1),
|
| 170 |
+
(0x03F0, 0x03FF),
|
| 171 |
+
(0x1D26, 0x1D2A),
|
| 172 |
+
(0x1D5E,),
|
| 173 |
+
(0x1D60,),
|
| 174 |
+
(0x1D66, 0x1D6A),
|
| 175 |
+
(0x1F00, 0x1F15),
|
| 176 |
+
(0x1F18, 0x1F1D),
|
| 177 |
+
(0x1F20, 0x1F45),
|
| 178 |
+
(0x1F48, 0x1F4D),
|
| 179 |
+
(0x1F50, 0x1F57),
|
| 180 |
+
(0x1F59,),
|
| 181 |
+
(0x1F5B,),
|
| 182 |
+
(0x1F5D,),
|
| 183 |
+
(0x1F5F, 0x1F7D),
|
| 184 |
+
(0x1F80, 0x1FB4),
|
| 185 |
+
(0x1FB6, 0x1FC4),
|
| 186 |
+
(0x1FC6, 0x1FD3),
|
| 187 |
+
(0x1FD6, 0x1FDB),
|
| 188 |
+
(0x1FDD, 0x1FEF),
|
| 189 |
+
(0x1FF2, 0x1FF4),
|
| 190 |
+
(0x1FF6, 0x1FFE),
|
| 191 |
+
(0x2129,),
|
| 192 |
+
(0x2719, 0x271A),
|
| 193 |
+
(0xAB65,),
|
| 194 |
+
(0x10140, 0x1018D),
|
| 195 |
+
(0x101A0,),
|
| 196 |
+
(0x1D200, 0x1D245),
|
| 197 |
+
(0x1F7A1, 0x1F7A7),
|
| 198 |
+
]
|
| 199 |
+
|
| 200 |
+
class Cyrillic(unicode_set):
|
| 201 |
+
"""Unicode set for Cyrillic Unicode Character Range"""
|
| 202 |
+
_ranges: UnicodeRangeList = [
|
| 203 |
+
(0x0400, 0x052F),
|
| 204 |
+
(0x1C80, 0x1C88),
|
| 205 |
+
(0x1D2B,),
|
| 206 |
+
(0x1D78,),
|
| 207 |
+
(0x2DE0, 0x2DFF),
|
| 208 |
+
(0xA640, 0xA672),
|
| 209 |
+
(0xA674, 0xA69F),
|
| 210 |
+
(0xFE2E, 0xFE2F),
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
class Chinese(unicode_set):
|
| 214 |
+
"""Unicode set for Chinese Unicode Character Range"""
|
| 215 |
+
_ranges: UnicodeRangeList = [
|
| 216 |
+
(0x2E80, 0x2E99),
|
| 217 |
+
(0x2E9B, 0x2EF3),
|
| 218 |
+
(0x31C0, 0x31E3),
|
| 219 |
+
(0x3400, 0x4DB5),
|
| 220 |
+
(0x4E00, 0x9FEF),
|
| 221 |
+
(0xA700, 0xA707),
|
| 222 |
+
(0xF900, 0xFA6D),
|
| 223 |
+
(0xFA70, 0xFAD9),
|
| 224 |
+
(0x16FE2, 0x16FE3),
|
| 225 |
+
(0x1F210, 0x1F212),
|
| 226 |
+
(0x1F214, 0x1F23B),
|
| 227 |
+
(0x1F240, 0x1F248),
|
| 228 |
+
(0x20000, 0x2A6D6),
|
| 229 |
+
(0x2A700, 0x2B734),
|
| 230 |
+
(0x2B740, 0x2B81D),
|
| 231 |
+
(0x2B820, 0x2CEA1),
|
| 232 |
+
(0x2CEB0, 0x2EBE0),
|
| 233 |
+
(0x2F800, 0x2FA1D),
|
| 234 |
+
]
|
| 235 |
+
|
| 236 |
+
class Japanese(unicode_set):
|
| 237 |
+
"""Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"""
|
| 238 |
+
|
| 239 |
+
class Kanji(unicode_set):
|
| 240 |
+
"Unicode set for Kanji Unicode Character Range"
|
| 241 |
+
_ranges: UnicodeRangeList = [
|
| 242 |
+
(0x4E00, 0x9FBF),
|
| 243 |
+
(0x3000, 0x303F),
|
| 244 |
+
]
|
| 245 |
+
|
| 246 |
+
class Hiragana(unicode_set):
|
| 247 |
+
"""Unicode set for Hiragana Unicode Character Range"""
|
| 248 |
+
_ranges: UnicodeRangeList = [
|
| 249 |
+
(0x3041, 0x3096),
|
| 250 |
+
(0x3099, 0x30A0),
|
| 251 |
+
(0x30FC,),
|
| 252 |
+
(0xFF70,),
|
| 253 |
+
(0x1B001,),
|
| 254 |
+
(0x1B150, 0x1B152),
|
| 255 |
+
(0x1F200,),
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
class Katakana(unicode_set):
|
| 259 |
+
"""Unicode set for Katakana Unicode Character Range"""
|
| 260 |
+
_ranges: UnicodeRangeList = [
|
| 261 |
+
(0x3099, 0x309C),
|
| 262 |
+
(0x30A0, 0x30FF),
|
| 263 |
+
(0x31F0, 0x31FF),
|
| 264 |
+
(0x32D0, 0x32FE),
|
| 265 |
+
(0xFF65, 0xFF9F),
|
| 266 |
+
(0x1B000,),
|
| 267 |
+
(0x1B164, 0x1B167),
|
| 268 |
+
(0x1F201, 0x1F202),
|
| 269 |
+
(0x1F213,),
|
| 270 |
+
]
|
| 271 |
+
|
| 272 |
+
漢字 = Kanji
|
| 273 |
+
カタカナ = Katakana
|
| 274 |
+
ひらがな = Hiragana
|
| 275 |
+
|
| 276 |
+
_ranges = (
|
| 277 |
+
Kanji._ranges
|
| 278 |
+
+ Hiragana._ranges
|
| 279 |
+
+ Katakana._ranges
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
class Hangul(unicode_set):
|
| 283 |
+
"""Unicode set for Hangul (Korean) Unicode Character Range"""
|
| 284 |
+
_ranges: UnicodeRangeList = [
|
| 285 |
+
(0x1100, 0x11FF),
|
| 286 |
+
(0x302E, 0x302F),
|
| 287 |
+
(0x3131, 0x318E),
|
| 288 |
+
(0x3200, 0x321C),
|
| 289 |
+
(0x3260, 0x327B),
|
| 290 |
+
(0x327E,),
|
| 291 |
+
(0xA960, 0xA97C),
|
| 292 |
+
(0xAC00, 0xD7A3),
|
| 293 |
+
(0xD7B0, 0xD7C6),
|
| 294 |
+
(0xD7CB, 0xD7FB),
|
| 295 |
+
(0xFFA0, 0xFFBE),
|
| 296 |
+
(0xFFC2, 0xFFC7),
|
| 297 |
+
(0xFFCA, 0xFFCF),
|
| 298 |
+
(0xFFD2, 0xFFD7),
|
| 299 |
+
(0xFFDA, 0xFFDC),
|
| 300 |
+
]
|
| 301 |
+
|
| 302 |
+
Korean = Hangul
|
| 303 |
+
|
| 304 |
+
class CJK(Chinese, Japanese, Hangul):
|
| 305 |
+
"""Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"""
|
| 306 |
+
|
| 307 |
+
class Thai(unicode_set):
|
| 308 |
+
"""Unicode set for Thai Unicode Character Range"""
|
| 309 |
+
_ranges: UnicodeRangeList = [
|
| 310 |
+
(0x0E01, 0x0E3A),
|
| 311 |
+
(0x0E3F, 0x0E5B)
|
| 312 |
+
]
|
| 313 |
+
|
| 314 |
+
class Arabic(unicode_set):
|
| 315 |
+
"""Unicode set for Arabic Unicode Character Range"""
|
| 316 |
+
_ranges: UnicodeRangeList = [
|
| 317 |
+
(0x0600, 0x061B),
|
| 318 |
+
(0x061E, 0x06FF),
|
| 319 |
+
(0x0700, 0x077F),
|
| 320 |
+
]
|
| 321 |
+
|
| 322 |
+
class Hebrew(unicode_set):
|
| 323 |
+
"""Unicode set for Hebrew Unicode Character Range"""
|
| 324 |
+
_ranges: UnicodeRangeList = [
|
| 325 |
+
(0x0591, 0x05C7),
|
| 326 |
+
(0x05D0, 0x05EA),
|
| 327 |
+
(0x05EF, 0x05F4),
|
| 328 |
+
(0xFB1D, 0xFB36),
|
| 329 |
+
(0xFB38, 0xFB3C),
|
| 330 |
+
(0xFB3E,),
|
| 331 |
+
(0xFB40, 0xFB41),
|
| 332 |
+
(0xFB43, 0xFB44),
|
| 333 |
+
(0xFB46, 0xFB4F),
|
| 334 |
+
]
|
| 335 |
+
|
| 336 |
+
class Devanagari(unicode_set):
|
| 337 |
+
"""Unicode set for Devanagari Unicode Character Range"""
|
| 338 |
+
_ranges: UnicodeRangeList = [
|
| 339 |
+
(0x0900, 0x097F),
|
| 340 |
+
(0xA8E0, 0xA8FF)
|
| 341 |
+
]
|
| 342 |
+
|
| 343 |
+
BMP = BasicMultilingualPlane
|
| 344 |
+
|
| 345 |
+
# add language identifiers using language Unicode
|
| 346 |
+
العربية = Arabic
|
| 347 |
+
中文 = Chinese
|
| 348 |
+
кириллица = Cyrillic
|
| 349 |
+
Ελληνικά = Greek
|
| 350 |
+
עִברִית = Hebrew
|
| 351 |
+
日本語 = Japanese
|
| 352 |
+
한국어 = Korean
|
| 353 |
+
ไทย = Thai
|
| 354 |
+
देवनागरी = Devanagari
|
| 355 |
+
|
| 356 |
+
# fmt: on
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Top-level package for sniffio."""
|
| 2 |
+
|
| 3 |
+
__all__ = [
|
| 4 |
+
"current_async_library",
|
| 5 |
+
"AsyncLibraryNotFoundError",
|
| 6 |
+
"current_async_library_cvar",
|
| 7 |
+
"thread_local",
|
| 8 |
+
]
|
| 9 |
+
|
| 10 |
+
from ._version import __version__
|
| 11 |
+
|
| 12 |
+
from ._impl import (
|
| 13 |
+
current_async_library,
|
| 14 |
+
AsyncLibraryNotFoundError,
|
| 15 |
+
current_async_library_cvar,
|
| 16 |
+
thread_local,
|
| 17 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (423 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc
ADDED
|
Binary file (2.67 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/_impl.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextvars import ContextVar
|
| 2 |
+
from typing import Optional
|
| 3 |
+
import sys
|
| 4 |
+
import threading
|
| 5 |
+
|
| 6 |
+
current_async_library_cvar = ContextVar(
|
| 7 |
+
"current_async_library_cvar", default=None
|
| 8 |
+
) # type: ContextVar[Optional[str]]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class _ThreadLocal(threading.local):
|
| 12 |
+
# Since threading.local provides no explicit mechanism is for setting
|
| 13 |
+
# a default for a value, a custom class with a class attribute is used
|
| 14 |
+
# instead.
|
| 15 |
+
name = None # type: Optional[str]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
thread_local = _ThreadLocal()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class AsyncLibraryNotFoundError(RuntimeError):
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def current_async_library() -> str:
|
| 26 |
+
"""Detect which async library is currently running.
|
| 27 |
+
|
| 28 |
+
The following libraries are currently supported:
|
| 29 |
+
|
| 30 |
+
================ =========== ============================
|
| 31 |
+
Library Requires Magic string
|
| 32 |
+
================ =========== ============================
|
| 33 |
+
**Trio** Trio v0.6+ ``"trio"``
|
| 34 |
+
**Curio** - ``"curio"``
|
| 35 |
+
**asyncio** ``"asyncio"``
|
| 36 |
+
**Trio-asyncio** v0.8.2+ ``"trio"`` or ``"asyncio"``,
|
| 37 |
+
depending on current mode
|
| 38 |
+
================ =========== ============================
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
A string like ``"trio"``.
|
| 42 |
+
|
| 43 |
+
Raises:
|
| 44 |
+
AsyncLibraryNotFoundError: if called from synchronous context,
|
| 45 |
+
or if the current async library was not recognized.
|
| 46 |
+
|
| 47 |
+
Examples:
|
| 48 |
+
|
| 49 |
+
.. code-block:: python3
|
| 50 |
+
|
| 51 |
+
from sniffio import current_async_library
|
| 52 |
+
|
| 53 |
+
async def generic_sleep(seconds):
|
| 54 |
+
library = current_async_library()
|
| 55 |
+
if library == "trio":
|
| 56 |
+
import trio
|
| 57 |
+
await trio.sleep(seconds)
|
| 58 |
+
elif library == "asyncio":
|
| 59 |
+
import asyncio
|
| 60 |
+
await asyncio.sleep(seconds)
|
| 61 |
+
# ... and so on ...
|
| 62 |
+
else:
|
| 63 |
+
raise RuntimeError(f"Unsupported library {library!r}")
|
| 64 |
+
|
| 65 |
+
"""
|
| 66 |
+
value = thread_local.name
|
| 67 |
+
if value is not None:
|
| 68 |
+
return value
|
| 69 |
+
|
| 70 |
+
value = current_async_library_cvar.get()
|
| 71 |
+
if value is not None:
|
| 72 |
+
return value
|
| 73 |
+
|
| 74 |
+
# Need to sniff for asyncio
|
| 75 |
+
if "asyncio" in sys.modules:
|
| 76 |
+
import asyncio
|
| 77 |
+
try:
|
| 78 |
+
current_task = asyncio.current_task # type: ignore[attr-defined]
|
| 79 |
+
except AttributeError:
|
| 80 |
+
current_task = asyncio.Task.current_task # type: ignore[attr-defined]
|
| 81 |
+
try:
|
| 82 |
+
if current_task() is not None:
|
| 83 |
+
return "asyncio"
|
| 84 |
+
except RuntimeError:
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
# Sniff for curio (for now)
|
| 88 |
+
if 'curio' in sys.modules:
|
| 89 |
+
from curio.meta import curio_running
|
| 90 |
+
if curio_running():
|
| 91 |
+
return 'curio'
|
| 92 |
+
|
| 93 |
+
raise AsyncLibraryNotFoundError(
|
| 94 |
+
"unknown async library, or not in async context"
|
| 95 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from .. import (
|
| 7 |
+
current_async_library, AsyncLibraryNotFoundError,
|
| 8 |
+
current_async_library_cvar, thread_local
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_basics_cvar():
|
| 13 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 14 |
+
current_async_library()
|
| 15 |
+
|
| 16 |
+
token = current_async_library_cvar.set("generic-lib")
|
| 17 |
+
try:
|
| 18 |
+
assert current_async_library() == "generic-lib"
|
| 19 |
+
finally:
|
| 20 |
+
current_async_library_cvar.reset(token)
|
| 21 |
+
|
| 22 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 23 |
+
current_async_library()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_basics_tlocal():
|
| 27 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 28 |
+
current_async_library()
|
| 29 |
+
|
| 30 |
+
old_name, thread_local.name = thread_local.name, "generic-lib"
|
| 31 |
+
try:
|
| 32 |
+
assert current_async_library() == "generic-lib"
|
| 33 |
+
finally:
|
| 34 |
+
thread_local.name = old_name
|
| 35 |
+
|
| 36 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 37 |
+
current_async_library()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_asyncio():
|
| 41 |
+
import asyncio
|
| 42 |
+
|
| 43 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 44 |
+
current_async_library()
|
| 45 |
+
|
| 46 |
+
ran = []
|
| 47 |
+
|
| 48 |
+
async def this_is_asyncio():
|
| 49 |
+
assert current_async_library() == "asyncio"
|
| 50 |
+
# Call it a second time to exercise the caching logic
|
| 51 |
+
assert current_async_library() == "asyncio"
|
| 52 |
+
ran.append(True)
|
| 53 |
+
|
| 54 |
+
asyncio.run(this_is_asyncio())
|
| 55 |
+
assert ran == [True]
|
| 56 |
+
|
| 57 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 58 |
+
current_async_library()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@pytest.mark.skipif(
|
| 62 |
+
sys.version_info >= (3, 12),
|
| 63 |
+
reason=
|
| 64 |
+
"curio broken on 3.12 (https://github.com/python-trio/sniffio/pull/42)",
|
| 65 |
+
)
|
| 66 |
+
def test_curio():
|
| 67 |
+
import curio
|
| 68 |
+
|
| 69 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 70 |
+
current_async_library()
|
| 71 |
+
|
| 72 |
+
ran = []
|
| 73 |
+
|
| 74 |
+
async def this_is_curio():
|
| 75 |
+
assert current_async_library() == "curio"
|
| 76 |
+
# Call it a second time to exercise the caching logic
|
| 77 |
+
assert current_async_library() == "curio"
|
| 78 |
+
ran.append(True)
|
| 79 |
+
|
| 80 |
+
curio.run(this_is_curio)
|
| 81 |
+
assert ran == [True]
|
| 82 |
+
|
| 83 |
+
with pytest.raises(AsyncLibraryNotFoundError):
|
| 84 |
+
current_async_library()
|
evalkit_tf437/lib/python3.10/site-packages/sniffio/_version.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is imported from __init__.py and exec'd from setup.py
|
| 2 |
+
|
| 3 |
+
__version__ = "1.3.1"
|