diff --git a/.gitattributes b/.gitattributes index c93f89eab5e178299f905c1f34386cf7b675d5dd..f8cae9ef62951b79df3ab9cb36654a4b8508b971 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2624,3 +2624,4 @@ evalkit_tf446/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl filt evalkit_tf446/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_tf446/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text evalkit_tf446/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_sentencepiece.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/INSTALLER b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/LICENSE.rst b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..d12a849186982399c537c5b9a8fd77bf2edd5eab --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2014 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/METADATA b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7a6bbb24b5f05575ac0263dd7fb24e0f0180d641 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/METADATA @@ -0,0 +1,103 @@ +Metadata-Version: 2.1 +Name: click +Version: 8.1.7 +Summary: Composable command line interface toolkit +Home-page: https://palletsprojects.com/p/click/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Changes, https://click.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/click/ +Project-URL: Issue Tracker, https://github.com/pallets/click/issues/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: colorama ; platform_system == "Windows" +Requires-Dist: importlib-metadata ; python_version < "3.8" + +\$ click\_ +========== + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U click + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +A Simple Example +---------------- + +.. code-block:: python + + import click + + @click.command() + @click.option("--count", default=1, help="Number of greetings.") + @click.option("--name", prompt="Your name", help="The person to greet.") + def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo(f"Hello, {name}!") + + if __name__ == '__main__': + hello() + +.. code-block:: text + + $ python hello.py --count=3 + Your name: Click + Hello, Click! + Hello, Click! + Hello, Click! + + +Donate +------ + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://click.palletsprojects.com/ +- Changes: https://click.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/click/ +- Source Code: https://github.com/pallets/click +- Issue Tracker: https://github.com/pallets/click/issues +- Chat: https://discord.gg/pallets diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/RECORD b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1c017ef744756904e45f505368e410ba35aff302 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/RECORD @@ -0,0 +1,40 @@ +click-8.1.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +click-8.1.7.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 +click-8.1.7.dist-info/METADATA,sha256=qIMevCxGA9yEmJOM_4WHuUJCwWpsIEVbCPOhs45YPN4,3014 +click-8.1.7.dist-info/RECORD,, +click-8.1.7.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click-8.1.7.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92 +click-8.1.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click/__init__.py,sha256=YDDbjm406dTOA0V8bTtdGnhN7zj5j-_dFRewZF_pLvw,3138 +click/__pycache__/__init__.cpython-310.pyc,, +click/__pycache__/_compat.cpython-310.pyc,, +click/__pycache__/_termui_impl.cpython-310.pyc,, +click/__pycache__/_textwrap.cpython-310.pyc,, +click/__pycache__/_winconsole.cpython-310.pyc,, +click/__pycache__/core.cpython-310.pyc,, +click/__pycache__/decorators.cpython-310.pyc,, +click/__pycache__/exceptions.cpython-310.pyc,, +click/__pycache__/formatting.cpython-310.pyc,, +click/__pycache__/globals.cpython-310.pyc,, +click/__pycache__/parser.cpython-310.pyc,, +click/__pycache__/shell_completion.cpython-310.pyc,, +click/__pycache__/termui.cpython-310.pyc,, +click/__pycache__/testing.cpython-310.pyc,, +click/__pycache__/types.cpython-310.pyc,, +click/__pycache__/utils.cpython-310.pyc,, +click/_compat.py,sha256=5318agQpbt4kroKsbqDOYpTSWzL_YCZVUQiTT04yXmc,18744 +click/_termui_impl.py,sha256=3dFYv4445Nw-rFvZOTBMBPYwB1bxnmNk9Du6Dm_oBSU,24069 +click/_textwrap.py,sha256=10fQ64OcBUMuK7mFvh8363_uoOxPlRItZBmKzRJDgoY,1353 +click/_winconsole.py,sha256=5ju3jQkcZD0W27WEMGqmEP4y_crUVzPCqsX_FYb7BO0,7860 +click/core.py,sha256=j6oEWtGgGna8JarD6WxhXmNnxLnfRjwXglbBc-8jr7U,114086 +click/decorators.py,sha256=-ZlbGYgV-oI8jr_oH4RpuL1PFS-5QmeuEAsLDAYgxtw,18719 +click/exceptions.py,sha256=fyROO-47HWFDjt2qupo7A3J32VlpM-ovJnfowu92K3s,9273 +click/formatting.py,sha256=Frf0-5W33-loyY_i9qrwXR8-STnW3m5gvyxLVUdyxyk,9706 +click/globals.py,sha256=TP-qM88STzc7f127h35TD_v920FgfOD2EwzqA0oE8XU,1961 +click/parser.py,sha256=LKyYQE9ZLj5KgIDXkrcTHQRXIggfoivX14_UVIn56YA,19067 +click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click/shell_completion.py,sha256=Ty3VM_ts0sQhj6u7eFTiLwHPoTgcXTGEAUg2OpLqYKw,18460 +click/termui.py,sha256=H7Q8FpmPelhJ2ovOhfCRhjMtCpNyjFXryAMLZODqsdc,28324 +click/testing.py,sha256=1Qd4kS5bucn1hsNIRryd0WtTMuCpkA93grkWxT8POsU,16084 +click/types.py,sha256=TZvz3hKvBztf-Hpa2enOmP4eznSPLzijjig5b_0XMxE,36391 +click/utils.py,sha256=1476UduUNY6UePGU4m18uzVHLt1sKM2PP3yWsQhbItM,20298 diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/REQUESTED b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/WHEEL b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2c08da084599354e5b2dbccb3ab716165e63d1a0 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dca9a909647e3b066931de2909c2d1e65c78c995 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/METADATA b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..0078392f3181a0cc92b39a4456890848d821c55e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/METADATA @@ -0,0 +1,59 @@ +Metadata-Version: 2.3 +Name: filelock +Version: 3.16.1 +Summary: A platform independent file lock. +Project-URL: Documentation, https://py-filelock.readthedocs.io +Project-URL: Homepage, https://github.com/tox-dev/py-filelock +Project-URL: Source, https://github.com/tox-dev/py-filelock +Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues +Maintainer-email: Bernát Gábor +License-Expression: Unlicense +License-File: LICENSE +Keywords: application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: The Unlicense (Unlicense) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System +Requires-Python: >=3.8 +Provides-Extra: docs +Requires-Dist: furo>=2024.8.6; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints>=2.4.1; extra == 'docs' +Requires-Dist: sphinx>=8.0.2; extra == 'docs' +Provides-Extra: testing +Requires-Dist: covdefaults>=2.3; extra == 'testing' +Requires-Dist: coverage>=7.6.1; extra == 'testing' +Requires-Dist: diff-cover>=9.2; extra == 'testing' +Requires-Dist: pytest-asyncio>=0.24; extra == 'testing' +Requires-Dist: pytest-cov>=5; extra == 'testing' +Requires-Dist: pytest-mock>=3.14; extra == 'testing' +Requires-Dist: pytest-timeout>=2.3.1; extra == 'testing' +Requires-Dist: pytest>=8.3.3; extra == 'testing' +Requires-Dist: virtualenv>=20.26.4; extra == 'testing' +Provides-Extra: typing +Requires-Dist: typing-extensions>=4.12.2; (python_version < '3.11') and extra == 'typing' +Description-Content-Type: text/markdown + +# filelock + +[![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/) +[![Supported Python +versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/) +[![Documentation +status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest) +[![Code style: +black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock) +[![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml) + +For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html). diff --git a/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/RECORD b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a9c03ce620495d35cf7be0ffb1772cbe18ce13d0 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/RECORD @@ -0,0 +1,25 @@ +filelock-3.16.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.16.1.dist-info/METADATA,sha256=LXL5-XQe_eTKkdNs76A6jSicQ1DBSTXqkDcjsprWvIM,2944 +filelock-3.16.1.dist-info/RECORD,, +filelock-3.16.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock-3.16.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +filelock-3.16.1.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210 +filelock/__init__.py,sha256=_t_-OAGXo_qyPa9lNQ1YnzVYEvSW3I0onPqzpomsVVg,1769 +filelock/__pycache__/__init__.cpython-310.pyc,, +filelock/__pycache__/_api.cpython-310.pyc,, +filelock/__pycache__/_error.cpython-310.pyc,, +filelock/__pycache__/_soft.cpython-310.pyc,, +filelock/__pycache__/_unix.cpython-310.pyc,, +filelock/__pycache__/_util.cpython-310.pyc,, +filelock/__pycache__/_windows.cpython-310.pyc,, +filelock/__pycache__/asyncio.cpython-310.pyc,, +filelock/__pycache__/version.cpython-310.pyc,, +filelock/_api.py,sha256=GVeBEGjpDD8S1bYqG6_u0MZfbYHS6XrHs_n3PVKq-h0,14541 +filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787 +filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711 +filelock/_unix.py,sha256=-FXP0tjInBHUYygOlMpp4taUmD87QOkrD_4ybg_iT7Q,2259 +filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715 +filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177 +filelock/asyncio.py,sha256=3D4JP4Ms5IXTGib5eOekyr6uH6rZlieV_moVGY36juA,12463 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=KSOBzuLwiqiVWDPGfMj1ntr25YrY6JBDr8RvinQX_FM,413 diff --git a/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/REQUESTED b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/WHEEL b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/licenses/LICENSE b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/filelock-3.16.1.dist-info/licenses/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66cab7529ee2c722441f768d884e85bc4713e681 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/__init__.py @@ -0,0 +1,642 @@ +r"""Hjson, the Human JSON. A configuration file format that caters to +humans and helps reduce the errors they make. + +For details and syntax see . + +Decoding Hjson:: + + >>> import hjson + >>> text = "{\n foo: a\n bar: 1\n}" + >>> hjson.loads(text) + OrderedDict([('foo', 'a'), ('bar', 1)]) + +Encoding Python object hierarchies:: + + >>> import hjson + >>> # hjson.dumps({'foo': 'text', 'bar': (1, 2)}) + >>> hjson.dumps(OrderedDict([('foo', 'text'), ('bar', (1, 2))])) + '{\n foo: text\n bar:\n [\n 1\n 2\n ]\n}' + +Encoding as JSON:: + + Note that this is probably not as performant as the simplejson version. + + >>> import hjson + >>> hjson.dumpsJSON(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + +Using hjson.tool from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m hjson.tool + { + json: obj + } + + Other formats are -c for compact or -j for formatted JSON. + +""" +from __future__ import absolute_import +__version__ = '3.1.0' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'dumpJSON', 'dumpsJSON', + 'HjsonDecoder', 'HjsonDecodeError', 'HjsonEncoder', 'JSONEncoder', + 'OrderedDict', 'simple_first', +] + +# based on simplejson by +# __author__ = 'Bob Ippolito ' +__author__ = 'Christian Zangl ' + +from decimal import Decimal + +from .scanner import HjsonDecodeError +from .decoder import HjsonDecoder +from .encoderH import HjsonEncoder +from .encoder import JSONEncoder +def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: + from . import ordered_dict + return ordered_dict.OrderedDict +OrderedDict = _import_OrderedDict() + + +_default_decoder = HjsonDecoder(encoding=None, object_hook=None, + object_pairs_hook=OrderedDict) + + +def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, object_pairs_hook=OrderedDict, + use_decimal=False, namedtuple_as_object=True, tuple_as_array=True, + **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``HjsonDecoder`` subclass, specify it with the ``cls`` + kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead + of subclassing whenever possible. + + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + object_pairs_hook=object_pairs_hook, + use_decimal=use_decimal, **kw) + + +def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``HjsonDecoder`` subclass, specify it with the ``cls`` + kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead + of subclassing whenever possible. + + """ + if (cls is None and encoding is None and object_hook is None and + parse_int is None and parse_float is None and + object_pairs_hook is None + and not use_decimal and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = HjsonDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if use_decimal: + if parse_float is not None: + raise TypeError("use_decimal=True implies parse_float=Decimal") + kw['parse_float'] = Decimal + return cls(encoding=encoding, **kw).decode(s) + + +_default_hjson_encoder = HjsonEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + indent=None, + encoding='utf-8', + default=None, + use_decimal=True, + namedtuple_as_object=True, + tuple_as_array=True, + bigint_as_string=False, + item_sort_key=None, + for_json=False, + int_as_string_bitcount=None, +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + cls=None, indent=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, int_as_string_bitcount=None, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If *skipkeys* is true then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If *ensure_ascii* is false, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If *check_circular* is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + *indent* defines the amount of whitespace that the JSON array elements + and object members will be indented for each level of nesting. + The default is two spaces. + + *encoding* is the character encoding for str instances, default is UTF-8. + + *default(obj)* is a function that should return a serializable version + of obj or raise ``TypeError``. The default simply raises ``TypeError``. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. Note that this is still a + lossy operation that will not round-trip correctly and should be used + sparingly. + + If *int_as_string_bitcount* is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precedence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + To use a custom ``HjsonEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead + of subclassing whenever possible. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and + cls is None and indent is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not sort_keys + and not item_sort_key and not for_json + and int_as_string_bitcount is None + and not kw + ): + iterable = _default_hjson_encoder.iterencode(obj) + else: + if cls is None: + cls = HjsonEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, indent=indent, + encoding=encoding, + default=default, use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + int_as_string_bitcount=int_as_string_bitcount, + **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + cls=None, indent=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, int_as_string_bitcount=None, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is false then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + *indent* defines the amount of whitespace that the JSON array elements + and object members will be indented for each level of nesting. + The default is two spaces. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If *int_as_string_bitcount* is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precendence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + To use a custom ``HjsonEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing + whenever possible. + + """ + # cached encoder + if ( + not skipkeys and ensure_ascii and + check_circular and + cls is None and indent is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not sort_keys + and not item_sort_key and not for_json + and int_as_string_bitcount is None + and not kw + ): + return _default_hjson_encoder.encode(obj) + if cls is None: + cls = HjsonEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, indent=indent, + encoding=encoding, default=default, + use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + int_as_string_bitcount=int_as_string_bitcount, + **kw).encode(obj) + + + +_default_json_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + use_decimal=True, + namedtuple_as_object=True, + tuple_as_array=True, + bigint_as_string=False, + item_sort_key=None, + for_json=False, + int_as_string_bitcount=None, +) + +def dumpJSON(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, int_as_string_bitcount=None, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If *skipkeys* is true then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If *ensure_ascii* is false, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If *check_circular* is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If *indent* is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. An integer is also accepted + and is converted to a string with that many spaces. + + If specified, *separators* should be an + ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` + if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most + compact JSON representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + *encoding* is the character encoding for str instances, default is UTF-8. + + *default(obj)* is a function that should return a serializable version + of obj or raise ``TypeError``. The default simply raises ``TypeError``. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. Note that this is still a + lossy operation that will not round-trip correctly and should be used + sparingly. + + If *int_as_string_bitcount* is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precedence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead + of subclassing whenever possible. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not sort_keys + and not item_sort_key and not for_json + and int_as_string_bitcount is None + and not kw + ): + iterable = _default_json_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, indent=indent, + separators=separators, encoding=encoding, + default=default, use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + int_as_string_bitcount=int_as_string_bitcount, + **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumpsJSON(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, int_as_string_bitcount=None, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is false then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``indent`` is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. An integer is also accepted + and is converted to a string with that many spaces. + + If specified, ``separators`` should be an + ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` + if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most + compact JSON representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If *int_as_string_bitcount* is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precendence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing + whenever possible. + + """ + # cached encoder + if ( + not skipkeys and ensure_ascii and + check_circular and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not sort_keys + and not item_sort_key and not for_json + and int_as_string_bitcount is None + and not kw + ): + return _default_json_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, indent=indent, + separators=separators, encoding=encoding, default=default, + use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + int_as_string_bitcount=int_as_string_bitcount, + **kw).encode(obj) + + + +def simple_first(kv): + """Helper function to pass to item_sort_key to sort simple + elements to the top, then container elements. + """ + return (isinstance(kv[1], (list, dict, tuple)), kv[0]) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/compat.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..a0af4a1cb86e070ca42b45ac5c7cd1eca3619c79 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/compat.py @@ -0,0 +1,46 @@ +"""Python 3 compatibility shims +""" +import sys +if sys.version_info[0] < 3: + PY3 = False + def b(s): + return s + def u(s): + return unicode(s, 'unicode_escape') + import cStringIO as StringIO + StringIO = BytesIO = StringIO.StringIO + text_type = unicode + binary_type = str + string_types = (basestring,) + integer_types = (int, long) + unichr = unichr + reload_module = reload + def fromhex(s): + return s.decode('hex') + +else: + PY3 = True + if sys.version_info[:2] >= (3, 4): + from importlib import reload as reload_module + else: + from imp import reload as reload_module + import codecs + def b(s): + return codecs.latin_1_encode(s)[0] + def u(s): + return s + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + text_type = str + binary_type = bytes + string_types = (str,) + integer_types = (int,) + + def unichr(s): + return u(chr(s)) + + def fromhex(s): + return bytes.fromhex(s) + +long_type = integer_types[-1] diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/decoder.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcc2a2db2314ff37774b9cae6b9e75e6bf5ddda --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/decoder.py @@ -0,0 +1,569 @@ +"""Implementation of HjsonDecoder +""" +from __future__ import absolute_import +import re +import sys +import struct +from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr +from .scanner import HjsonDecodeError + +# NOTE (3.1.0): HjsonDecodeError may still be imported from this module for +# compatibility, but it was never in the __all__ +__all__ = ['HjsonDecoder'] + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + +def _floatconstants(): + _BYTES = fromhex('7FF80000000000007FF0000000000000') + # The struct module in Python 2.4 would get frexp() out of range here + # when an endian is specified in the format string. Fixed in Python 2.5+ + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + +WHITESPACE = ' \t\n\r' +PUNCTUATOR = '{}[],:' + +NUMBER_RE = re.compile(r'[\t ]*(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?[\t ]*') +STRINGCHUNK = re.compile(r'(.*?)([\'"\\\x00-\x1f])', FLAGS) +BACKSLASH = { + '"': u('"'), '\'': u('\''), '\\': u('\u005c'), '/': u('/'), + 'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'), +} + +DEFAULT_ENCODING = "utf-8" + +def getNext(s, end): + while 1: + # Use a slice to prevent IndexError from being raised + ch = s[end:end + 1] + # Skip whitespace. + while ch in WHITESPACE: + if ch == '': return ch, end + end += 1 + ch = s[end:end + 1] + + # Hjson allows comments + ch2 = s[end + 1:end + 2] + if ch == '#' or ch == '/' and ch2 == '/': + end = getEol(s, end) + elif ch == '/' and ch2 == '*': + end += 2 + ch = s[end] + while ch != '' and not (ch == '*' and s[end + 1] == '/'): + end += 1 + ch = s[end] + if ch != '': + end += 2 + else: + break + + return ch, end + +def getEol(s, end): + # skip until eol + + while 1: + ch = s[end:end + 1] + if ch == '\r' or ch == '\n' or ch == '': + return end + end += 1 + +def skipIndent(s, end, n): + ch = s[end:end + 1] + while ch != '' and ch in " \t\r" and (n > 0 or n < 0): + end += 1 + n -= 1 + ch = s[end:end + 1] + return end + + +def scanstring(s, end, encoding=None, strict=True, + _b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join, + _PY3=PY3, _maxunicode=sys.maxunicode): + """Scan the string s for a JSON string. End is the index of the + character in s after the quote that started the JSON string. + Unescapes all valid JSON string escape sequences and raises ValueError + on attempt to decode an invalid string. If strict is False then literal + control characters are allowed in the string. + + Returns a tuple of the decoded string and the index of the character in s + after the end quote.""" + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + # callers make sure that string starts with " or ' + exitCh = s[begin] + while 1: + chunk = _m(s, end) + if chunk is None: + raise HjsonDecodeError( + "Unterminated string starting at", s, begin) + end = chunk.end() + content, terminator = chunk.groups() + # Content is contains zero or more unescaped string characters + if content: + if not _PY3 and not isinstance(content, text_type): + content = text_type(content, encoding) + _append(content) + # Terminator is the end of string, a literal control character, + # or a backslash denoting that an escape sequence follows + if terminator == exitCh: + break + elif terminator == '"' or terminator == '\'': + _append(terminator) + continue + elif terminator != '\\': + if strict: + msg = "Invalid control character %r at" + raise HjsonDecodeError(msg, s, end) + else: + _append(terminator) + continue + try: + esc = s[end] + except IndexError: + raise HjsonDecodeError( + "Unterminated string starting at", s, begin) + # If not a unicode escape sequence, must be in the lookup table + if esc != 'u': + try: + char = _b[esc] + except KeyError: + msg = "Invalid \\X escape sequence %r" + raise HjsonDecodeError(msg, s, end) + end += 1 + else: + # Unicode escape sequence + msg = "Invalid \\uXXXX escape sequence" + esc = s[end + 1:end + 5] + escX = esc[1:2] + if len(esc) != 4 or escX == 'x' or escX == 'X': + raise HjsonDecodeError(msg, s, end - 1) + try: + uni = int(esc, 16) + except ValueError: + raise HjsonDecodeError(msg, s, end - 1) + end += 5 + # Check for surrogate pair on UCS-4 systems + # Note that this will join high/low surrogate pairs + # but will also pass unpaired surrogates through + if (_maxunicode > 65535 and + uni & 0xfc00 == 0xd800 and + s[end:end + 2] == '\\u'): + esc2 = s[end + 2:end + 6] + escX = esc2[1:2] + if len(esc2) == 4 and not (escX == 'x' or escX == 'X'): + try: + uni2 = int(esc2, 16) + except ValueError: + raise HjsonDecodeError(msg, s, end) + if uni2 & 0xfc00 == 0xdc00: + uni = 0x10000 + (((uni - 0xd800) << 10) | + (uni2 - 0xdc00)) + end += 6 + char = unichr(uni) + # Append the unescaped character + _append(char) + return _join(chunks), end + +def mlscanstring(s, end): + """Scan a multiline string""" + + string = "" + triple = 0 + + # we are at ''' - get indent + indent = 0 + while 1: + ch = s[end-indent-1] + if ch == '\n': break + indent += 1 + + # skip white/to (newline) + end = skipIndent(s, end + 3, -1) + + ch = s[end] + if ch == '\n': end = skipIndent(s, end + 1, indent) + + # When parsing multiline string values, we must look for ' characters + while 1: + ch = s[end:end + 1] + if ch == '': + raise HjsonDecodeError("Bad multiline string", s, end); + elif ch == '\'': + triple += 1 + end += 1 + if triple == 3: + if string and string[-1] == '\n': + string = string[:-1] # remove last EOL + return string, end + else: + continue + else: + while triple > 0: + string += '\'' + triple -= 1 + + if ch == '\n': + string += ch + end = skipIndent(s, end + 1, indent) + else: + if ch != '\r': + string += ch + end += 1 + +def scantfnns(context, s, end): + """Scan s until eol. return string, True, False or None""" + + chf, begin = getNext(s, end) + end = begin + + if chf in PUNCTUATOR: + raise HjsonDecodeError("Found a punctuator character when expecting a quoteless string (check your syntax)", s, end); + + while 1: + ch = s[end:end + 1] + + isEol = ch == '\r' or ch == '\n' or ch == '' + if isEol or ch == ',' or \ + ch == '}' or ch == ']' or \ + ch == '#' or \ + ch == '/' and (s[end + 1:end + 2] == '/' or s[end + 1:end + 2] == '*'): + + m = None + mend = end + if next: mend -= 1 + + if chf == 'n' and s[begin:end].strip() == 'null': + return None, end + elif chf == 't' and s[begin:end].strip() == 'true': + return True, end + elif chf == 'f' and s[begin:end].strip() == 'false': + return False, end + elif chf == '-' or chf >= '0' and chf <= '9': + m = NUMBER_RE.match(s, begin) + + if m is not None and m.end() == end: + integer, frac, exp = m.groups() + if frac or exp: + res = context.parse_float(integer + (frac or '') + (exp or '')) + if int(res) == res and abs(res)<1e10: res = int(res) + else: + res = context.parse_int(integer) + return res, end + + if isEol: + return s[begin:end].strip(), end + + end += 1 + +def scanKeyName(s, end, encoding=None, strict=True): + """Scan the string s for a JSON/Hjson key. see scanstring""" + + ch, end = getNext(s, end) + + if ch == '"' or ch == '\'': + return scanstring(s, end + 1, encoding, strict) + + begin = end + space = -1 + while 1: + ch = s[end:end + 1] + + if ch == '': + raise HjsonDecodeError("Bad key name (eof)", s, end); + elif ch == ':': + if begin == end: + raise HjsonDecodeError("Found ':' but no key name (for an empty key name use quotes)", s, begin) + elif space >= 0: + if space != end - 1: raise HjsonDecodeError("Found whitespace in your key name (use quotes to include)", s, space) + return s[begin:end].rstrip(), end + else: + return s[begin:end], end + elif ch in WHITESPACE: + if space < 0 or space == end - 1: space = end + elif ch == '{' or ch == '}' or ch == '[' or ch == ']' or ch == ',': + raise HjsonDecodeError("Found '" + ch + "' where a key name was expected (check your syntax or use quotes if the key name includes {}[],: or whitespace)", s, begin) + end += 1 + +def make_scanner(context): + parse_object = context.parse_object + parse_array = context.parse_array + parse_string = context.parse_string + parse_mlstring = context.parse_mlstring + parse_tfnns = context.parse_tfnns + encoding = context.encoding + strict = context.strict + object_hook = context.object_hook + object_pairs_hook = context.object_pairs_hook + memo = context.memo + + def _scan_once(string, idx): + try: + ch = string[idx] + except IndexError: + raise HjsonDecodeError('Expecting value', string, idx) + + if ch == '"' or ch == '\'': + if string[idx:idx + 3] == '\'\'\'': + return parse_mlstring(string, idx) + else: + return parse_string(string, idx + 1, encoding, strict) + elif ch == '{': + return parse_object((string, idx + 1), encoding, strict, + _scan_once, object_hook, object_pairs_hook, memo) + elif ch == '[': + return parse_array((string, idx + 1), _scan_once) + + return parse_tfnns(context, string, idx) + + def scan_once(string, idx): + if idx < 0: raise HjsonDecodeError('Expecting value', string, idx) + try: + return _scan_once(string, idx) + finally: + memo.clear() + + def scan_object_once(string, idx): + if idx < 0: raise HjsonDecodeError('Expecting value', string, idx) + try: + return parse_object((string, idx), encoding, strict, + _scan_once, object_hook, object_pairs_hook, memo, True) + finally: + memo.clear() + + return scan_once, scan_object_once + + +def JSONObject(state, encoding, strict, scan_once, object_hook, + object_pairs_hook, memo=None, objectWithoutBraces=False): + (s, end) = state + # Backwards compatibility + if memo is None: + memo = {} + memo_get = memo.setdefault + pairs = [] + + ch, end = getNext(s, end) + + # Trivial empty object + if not objectWithoutBraces and ch == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + 1 + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + 1 + + while True: + key, end = scanKeyName(s, end, encoding, strict) + key = memo_get(key, key) + + ch, end = getNext(s, end) + if ch != ':': + raise HjsonDecodeError("Expecting ':' delimiter", s, end) + + ch, end = getNext(s, end + 1) + + value, end = scan_once(s, end) + pairs.append((key, value)) + + ch, end = getNext(s, end) + + if ch == ',': + ch, end = getNext(s, end + 1) + + if objectWithoutBraces: + if ch == '': break; + else: + if ch == '}': + end += 1 + break + + ch, end = getNext(s, end) + + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = dict(pairs) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + +def JSONArray(state, scan_once): + (s, end) = state + values = [] + + ch, end = getNext(s, end) + + # Look-ahead for trivial empty array + if ch == ']': + return values, end + 1 + elif ch == '': + raise HjsonDecodeError("End of input while parsing an array (did you forget a closing ']'?)", s, end) + _append = values.append + while True: + value, end = scan_once(s, end) + _append(value) + + ch, end = getNext(s, end) + if ch == ',': + ch, end = getNext(s, end + 1) + + if ch == ']': + end += 1 + break + + ch, end = getNext(s, end) + + return values, end + + +class HjsonDecoder(object): + """Hjson decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | str, unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + """ + + def __init__(self, encoding=None, object_hook=None, parse_float=None, + parse_int=None, strict=True, + object_pairs_hook=None): + """ + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *strict* controls the parser's behavior when it encounters an + invalid control character in a string. The default setting of + ``True`` means that unescaped control characters are parse errors, if + ``False`` then control characters will be allowed in strings. + + """ + if encoding is None: + encoding = DEFAULT_ENCODING + self.encoding = encoding + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.parse_float = parse_float or float + self.parse_int = parse_int or int + self.strict = strict + self.parse_object = JSONObject + self.parse_array = JSONArray + self.parse_string = scanstring + self.parse_mlstring = mlscanstring + self.parse_tfnns = scantfnns + self.memo = {} + (self.scan_once, self.scan_object_once) = make_scanner(self) + + def decode(self, s, _PY3=PY3): + """Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + + """ + if _PY3 and isinstance(s, binary_type): + s = s.decode(self.encoding) + obj, end = self.raw_decode(s) + ch, end = getNext(s, end) + if end != len(s): + raise HjsonDecodeError("Extra data", s, end, len(s)) + return obj + + def raw_decode(self, s, idx=0, _PY3=PY3): + """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` + beginning with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + Optionally, ``idx`` can be used to specify an offset in ``s`` where + the JSON document begins. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + + """ + if idx < 0: + # Ensure that raw_decode bails on negative indexes, the regex + # would otherwise mask this behavior. #98 + raise HjsonDecodeError('Expecting value', s, idx) + if _PY3 and not isinstance(s, text_type): + raise TypeError("Input string must be text") + # strip UTF-8 bom + if len(s) > idx: + ord0 = ord(s[idx]) + if ord0 == 0xfeff: + idx += 1 + elif ord0 == 0xef and s[idx:idx + 3] == '\xef\xbb\xbf': + idx += 3 + + start_index = idx + ch, idx = getNext(s, idx) + + # If blank or comment only file, return dict + if start_index == 0 and ch == '': + return {}, 0 + + if ch == '{' or ch == '[': + return self.scan_once(s, idx) + else: + # assume we have a root object without braces + try: + return self.scan_object_once(s, idx) + except HjsonDecodeError as e: + # test if we are dealing with a single JSON value instead (true/false/null/num/"") + try: + return self.scan_once(s, idx) + except: + raise e diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/encoder.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..52b77adc770e9eb3958a9dd25ad898cbb394ac28 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/encoder.py @@ -0,0 +1,521 @@ +"""Implementation of JSONEncoder +""" +from __future__ import absolute_import +import re +from operator import itemgetter +from decimal import Decimal +from .compat import u, unichr, binary_type, string_types, integer_types, PY3 +from .decoder import PosInf + +#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]') +# This is required because u() will mangle the string and ur'' isn't valid +# python3 syntax +ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +for i in [0x2028, 0x2029]: + ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,)) + +FLOAT_REPR = repr + +def encode_basestring(s, _PY3=PY3, _q=u('"')): + """Return a JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return _q + ESCAPE.sub(replace, s) + _q + + +def py_encode_basestring_ascii(s, _PY3=PY3): + """Return an ASCII-only JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = ( + py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict, namedtuple | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None, + use_decimal=True, namedtuple_as_object=True, + tuple_as_array=True, bigint_as_string=False, + item_sort_key=None, for_json=False, + int_as_string_bitcount=None): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of hjson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, separators should be an (item_separator, key_separator) + tuple. The default is (', ', ': ') if *indent* is ``None`` and + (',', ': ') otherwise. To get the most compact JSON representation, + you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + If namedtuple_as_object is true (the default), objects with + ``_asdict()`` methods will be encoded as JSON objects. + + If tuple_as_array is true (the default), tuple (and subclasses) will + be encoded as JSON arrays. + + If bigint_as_string is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If int_as_string_bitcount is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, item_sort_key is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. + + If for_json is true (not the default), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.sort_keys = sort_keys + self.use_decimal = use_decimal + self.namedtuple_as_object = namedtuple_as_object + self.tuple_as_array = tuple_as_array + self.bigint_as_string = bigint_as_string + self.item_sort_key = item_sort_key + self.for_json = for_json + self.int_as_string_bitcount = int_as_string_bitcount + if indent is not None and not isinstance(indent, string_types): + indent = indent * ' ' + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + elif indent is not None: + self.item_separator = ',' + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from hjson import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, binary_type): + _encoding = self.encoding + if (_encoding is not None and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if isinstance(o, string_types): + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, binary_type): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o: + text = 'null' + elif o == _inf: + text = 'null' + elif o == _neginf: + text = 'null' + else: + return _repr(o) + + return text + + key_memo = {} + int_as_string_bitcount = ( + 53 if self.bigint_as_string else self.int_as_string_bitcount) + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot, self.use_decimal, + self.namedtuple_as_object, self.tuple_as_array, + int_as_string_bitcount, + self.item_sort_key, self.encoding, self.for_json, + Decimal=Decimal) + try: + return _iterencode(o, 0) + finally: + key_memo.clear() + + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + _use_decimal, _namedtuple_as_object, _tuple_as_array, + _int_as_string_bitcount, _item_sort_key, + _encoding,_for_json, + ## HACK: hand-optimized bytecode; turn globals into locals + _PY3=PY3, + ValueError=ValueError, + string_types=string_types, + Decimal=Decimal, + dict=dict, + float=float, + id=id, + integer_types=integer_types, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + ): + if _item_sort_key and not callable(_item_sort_key): + raise TypeError("item_sort_key must be None or callable") + elif _sort_keys and not _item_sort_key: + _item_sort_key = itemgetter(0) + + if (_int_as_string_bitcount is not None and + (_int_as_string_bitcount <= 0 or + not isinstance(_int_as_string_bitcount, integer_types))): + raise TypeError("int_as_string_bitcount must be a positive integer") + + def _encode_int(value): + skip_quoting = ( + _int_as_string_bitcount is None + or + _int_as_string_bitcount < 1 + ) + if ( + skip_quoting or + (-1 << _int_as_string_bitcount) + < value < + (1 << _int_as_string_bitcount) + ): + return str(value) + return '"' + str(value) + '"' + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + first = True + for value in lst: + if first: + first = False + else: + buf = separator + yield buf + + for chunk in _iterencode(value, _current_indent_level): + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield ']' + if markers is not None: + del markers[markerid] + + def _stringify_key(key): + if isinstance(key, string_types): # pragma: no cover + pass + elif isinstance(key, binary_type): + key = key.decode(_encoding) + elif isinstance(key, float): + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, integer_types): + key = str(key) + elif _use_decimal and isinstance(key, Decimal): + key = str(key) + elif _skipkeys: + key = None + else: + raise TypeError("key " + repr(key) + " is not a string") + return key + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + item_separator = _item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _PY3: + iteritems = dct.items() + else: + iteritems = dct.iteritems() + if _item_sort_key: + items = [] + for k, v in dct.items(): + if not isinstance(k, string_types): + k = _stringify_key(k) + if k is None: + continue + items.append((k, v)) + items.sort(key=_item_sort_key) + else: + items = iteritems + for key, value in items: + if not (_item_sort_key or isinstance(key, string_types)): + key = _stringify_key(key) + if key is None: + # _skipkeys must be True + continue + if first: + first = False + else: + yield item_separator + yield _encoder(key) + yield _key_separator + + for chunk in _iterencode(value, _current_indent_level): + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if (isinstance(o, string_types) or + (_PY3 and isinstance(o, binary_type))): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, integer_types): + yield _encode_int(o) + elif isinstance(o, float): + yield _floatstr(o) + else: + for_json = _for_json and getattr(o, 'for_json', None) + if for_json and callable(for_json): + for chunk in _iterencode(for_json(), _current_indent_level): + yield chunk + elif isinstance(o, list): + for chunk in _iterencode_list(o, _current_indent_level): + yield chunk + else: + _asdict = _namedtuple_as_object and getattr(o, '_asdict', None) + if _asdict and callable(_asdict): + for chunk in _iterencode_dict(_asdict(), _current_indent_level): + yield chunk + elif (_tuple_as_array and isinstance(o, tuple)): + for chunk in _iterencode_list(o, _current_indent_level): + yield chunk + elif isinstance(o, dict): + for chunk in _iterencode_dict(o, _current_indent_level): + yield chunk + elif _use_decimal and isinstance(o, Decimal): + yield str(o) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + o = _default(o) + for chunk in _iterencode(o, _current_indent_level): + yield chunk + if markers is not None: + del markers[markerid] + + return _iterencode diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/encoderH.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/encoderH.py new file mode 100644 index 0000000000000000000000000000000000000000..a4cbfd50bc37789f72ce50cb4ca3af8842380b71 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/encoderH.py @@ -0,0 +1,552 @@ +"""Implementation of HjsonEncoder +""" +from __future__ import absolute_import +import re +from operator import itemgetter +from decimal import Decimal +from .compat import u, unichr, binary_type, string_types, integer_types, PY3 +from .decoder import PosInf + +# This is required because u() will mangle the string and ur'' isn't valid +# python3 syntax +ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029\uffff]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +for i in [0x2028, 0x2029, 0xffff]: + ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,)) + +COMMONRANGE=u'\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff' + +# NEEDSESCAPE tests if the string can be written without escapes +NEEDSESCAPE = re.compile(u'[\\\"\x00-\x1f'+COMMONRANGE+']') +# NEEDSQUOTES tests if the string can be written as a quoteless string (like needsEscape but without \\ and \") +NEEDSQUOTES = re.compile(u'^\\s|^"|^\'|^#|^\\/\\*|^\\/\\/|^\\{|^\\}|^\\[|^\\]|^:|^,|\\s$|[\x00-\x1f'+COMMONRANGE+u']') +# NEEDSESCAPEML tests if the string can be written as a multiline string (like needsEscape but without \n, \r, \\, \", \t) +NEEDSESCAPEML = re.compile(u'\'\'\'|^[\\s]+$|[\x00-\x08\x0b\x0c\x0e-\x1f'+COMMONRANGE+u']') + +WHITESPACE = ' \t\n\r' +STARTSWITHNUMBER = re.compile(r'^[\t ]*(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?\s*((,|\]|\}|#|\/\/|\/\*).*)?$'); +STARTSWITHKEYWORD = re.compile(r'^(true|false|null)\s*((,|\]|\}|#|\/\/|\/\*).*)?$'); +NEEDSESCAPENAME = re.compile(r'[,\{\[\}\]\s:#"\']|\/\/|\/\*|'+"'''") + +FLOAT_REPR = repr + +def encode_basestring(s, _PY3=PY3, _q=u('"')): + """Return a JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return _q + ESCAPE.sub(replace, s) + _q + + +def encode_basestring_ascii(s, _PY3=PY3): + """Return an ASCII-only JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +class HjsonEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict, namedtuple | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, sort_keys=False, + indent=' ', encoding='utf-8', default=None, + use_decimal=True, namedtuple_as_object=True, + tuple_as_array=True, bigint_as_string=False, + item_sort_key=None, for_json=False, + int_as_string_bitcount=None): + """Constructor for HjsonEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + If namedtuple_as_object is true (the default), objects with + ``_asdict()`` methods will be encoded as JSON objects. + + If tuple_as_array is true (the default), tuple (and subclasses) will + be encoded as JSON arrays. + + If bigint_as_string is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If int_as_string_bitcount is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, item_sort_key is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. + + If for_json is true (not the default), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.sort_keys = sort_keys + self.use_decimal = use_decimal + self.namedtuple_as_object = namedtuple_as_object + self.tuple_as_array = tuple_as_array + self.bigint_as_string = bigint_as_string + self.item_sort_key = item_sort_key + self.for_json = for_json + self.int_as_string_bitcount = int_as_string_bitcount + if indent is not None and not isinstance(indent, string_types): + indent = indent * ' ' + elif indent is None: + indent = ' ' + self.indent = indent + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return HjsonEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from hjson import HjsonEncoder + >>> HjsonEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, binary_type): + _encoding = self.encoding + if (_encoding is not None and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in HjsonEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, binary_type): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o or o == _inf or o == _neginf: + return 'null' + else: + return _repr(o) + + key_memo = {} + int_as_string_bitcount = ( + 53 if self.bigint_as_string else self.int_as_string_bitcount) + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.sort_keys, self.skipkeys, _one_shot, self.use_decimal, + self.namedtuple_as_object, self.tuple_as_array, + int_as_string_bitcount, + self.item_sort_key, self.encoding, self.for_json, + Decimal=Decimal) + try: + return _iterencode(o, 0, True) + finally: + key_memo.clear() + + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _sort_keys, _skipkeys, _one_shot, + _use_decimal, _namedtuple_as_object, _tuple_as_array, + _int_as_string_bitcount, _item_sort_key, + _encoding,_for_json, + ## HACK: hand-optimized bytecode; turn globals into locals + _PY3=PY3, + ValueError=ValueError, + string_types=string_types, + Decimal=Decimal, + dict=dict, + float=float, + id=id, + integer_types=integer_types, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + ): + if _item_sort_key and not callable(_item_sort_key): + raise TypeError("item_sort_key must be None or callable") + elif _sort_keys and not _item_sort_key: + _item_sort_key = itemgetter(0) + + if (_int_as_string_bitcount is not None and + (_int_as_string_bitcount <= 0 or + not isinstance(_int_as_string_bitcount, integer_types))): + raise TypeError("int_as_string_bitcount must be a positive integer") + + def _encode_int(value): + return str(value) + + def _stringify_key(key): + if isinstance(key, string_types): # pragma: no cover + pass + elif isinstance(key, binary_type): + key = key.decode(_encoding) + elif isinstance(key, float): + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, integer_types): + key = str(key) + elif _use_decimal and isinstance(key, Decimal): + key = str(key) + elif _skipkeys: + key = None + else: + raise TypeError("key " + repr(key) + " is not a string") + return key + + def _encoder_key(name): + if not name: return '""' + + # Check if we can insert this name without quotes + if NEEDSESCAPENAME.search(name): + return _encoder(name) + else: + # return without quotes + return name + + def _encoder_str(str, _current_indent_level): + if not str: return '""' + + # Check if we can insert this string without quotes + # see hjson syntax (must not parse as true, false, null or number) + + first = str[0] + isNumber = False + if first == '-' or first >= '0' and first <= '9': + isNumber = STARTSWITHNUMBER.match(str) is not None + + if (NEEDSQUOTES.search(str) or + isNumber or + STARTSWITHKEYWORD.match(str) is not None): + + # If the string contains no control characters, no quote characters, and no + # backslash characters, then we can safely slap some quotes around it. + # Otherwise we first check if the string can be expressed in multiline + # format or we must replace the offending characters with safe escape + # sequences. + + if not NEEDSESCAPE.search(str): + return '"' + str + '"' + elif not NEEDSESCAPEML.search(str): + return _encoder_str_ml(str, _current_indent_level + 1) + else: + return _encoder(str) + else: + # return without quotes + return str + + def _encoder_str_ml(str, _current_indent_level): + + a = str.replace('\r', '').split('\n') + # gap += indent; + + if len(a) == 1: + # The string contains only a single line. We still use the multiline + # format as it avoids escaping the \ character (e.g. when used in a + # regex). + return "'''" + a[0] + "'''" + else: + gap = _indent * _current_indent_level + res = '\n' + gap + "'''" + for line in a: + res += '\n' + if line: res += gap + line + return res + '\n' + gap + "'''" + + def _iterencode_dict(dct, _current_indent_level, _isRoot=False): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + + if not _isRoot: + yield '\n' + (_indent * _current_indent_level) + + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + + yield '{' + + if _PY3: + iteritems = dct.items() + else: + iteritems = dct.iteritems() + if _item_sort_key: + items = [] + for k, v in dct.items(): + if not isinstance(k, string_types): + k = _stringify_key(k) + if k is None: + continue + items.append((k, v)) + items.sort(key=_item_sort_key) + else: + items = iteritems + for key, value in items: + if not (_item_sort_key or isinstance(key, string_types)): + key = _stringify_key(key) + if key is None: + # _skipkeys must be True + continue + + yield newline_indent + yield _encoder_key(key) + + first = True + for chunk in _iterencode(value, _current_indent_level): + if first: + first = False + if chunk[0 : 1] == '\n': yield ':' + else: yield ': ' + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield '}' + if markers is not None: + del markers[markerid] + + + def _iterencode_list(lst, _current_indent_level, _isRoot=False): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + + if not _isRoot: + yield '\n' + (_indent * _current_indent_level) + + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + yield '[' + + for value in lst: + yield newline_indent + + for chunk in _iterencode(value, _current_indent_level, True): + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield ']' + if markers is not None: + del markers[markerid] + + + def _iterencode(o, _current_indent_level, _isRoot=False): + if (isinstance(o, string_types) or + (_PY3 and isinstance(o, binary_type))): + yield _encoder_str(o, _current_indent_level) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, integer_types): + yield _encode_int(o) + elif isinstance(o, float): + yield _floatstr(o) + else: + for_json = _for_json and getattr(o, 'for_json', None) + if for_json and callable(for_json): + for chunk in _iterencode(for_json(), _current_indent_level, _isRoot): + yield chunk + elif isinstance(o, list): + for chunk in _iterencode_list(o, _current_indent_level, _isRoot): + yield chunk + else: + _asdict = _namedtuple_as_object and getattr(o, '_asdict', None) + if _asdict and callable(_asdict): + for chunk in _iterencode_dict(_asdict(), _current_indent_level, _isRoot): + yield chunk + elif (_tuple_as_array and isinstance(o, tuple)): + for chunk in _iterencode_list(o, _current_indent_level, _isRoot): + yield chunk + elif isinstance(o, dict): + for chunk in _iterencode_dict(o, _current_indent_level, _isRoot): + yield chunk + elif _use_decimal and isinstance(o, Decimal): + yield str(o) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + o = _default(o) + for chunk in _iterencode(o, _current_indent_level, _isRoot): + yield chunk + if markers is not None: + del markers[markerid] + + return _iterencode diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/ordered_dict.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/ordered_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..87ad8882482ca02970df711f4aa1039bece45cb9 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/ordered_dict.py @@ -0,0 +1,119 @@ +"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger + +http://code.activestate.com/recipes/576693/ + +""" +from UserDict import DictMixin + +# Modified from original to support Python 2.4, see +# http://code.google.com/p/simplejson/issues/detail?id=53 +try: + all +except NameError: + def all(seq): + for elem in seq: + if not elem: + return False + return True + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + # Modified from original to support Python 2.4, see + # http://code.google.com/p/simplejson/issues/detail?id=53 + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/scanner.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/scanner.py new file mode 100644 index 0000000000000000000000000000000000000000..3ece06f5460c3b0c3ad52cef1b32a7b4dcca8919 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/scanner.py @@ -0,0 +1,56 @@ +"""JSON token scanner +""" +import re + +__all__ = ['HjsonDecodeError'] + +class HjsonDecodeError(ValueError): + """Subclass of ValueError with the following additional properties: + + msg: The unformatted error message + doc: The JSON document being parsed + pos: The start index of doc where parsing failed + end: The end index of doc where parsing failed (may be None) + lineno: The line corresponding to pos + colno: The column corresponding to pos + endlineno: The line corresponding to end (may be None) + endcolno: The column corresponding to end (may be None) + + """ + # Note that this exception is used from _speedups + def __init__(self, msg, doc, pos, end=None): + ValueError.__init__(self, errmsg(msg, doc, pos, end=end)) + self.msg = msg + self.doc = doc + self.pos = pos + self.end = end + self.lineno, self.colno = linecol(doc, pos) + if end is not None: + self.endlineno, self.endcolno = linecol(doc, end) + else: + self.endlineno, self.endcolno = None, None + + def __reduce__(self): + return self.__class__, (self.msg, self.doc, self.pos, self.end) + + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + 1 + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + + +def errmsg(msg, doc, pos, end=None): + lineno, colno = linecol(doc, pos) + msg = msg.replace('%r', repr(doc[pos:pos + 1])) + if end is None: + fmt = '%s: line %d column %d (char %d)' + return fmt % (msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' + return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) + + diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15cbdc13cfdd4c9ddc939cc30a9c40a8a9af8d77 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__init__.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import +import unittest +import doctest +import sys + + +def additional_tests(suite=None): + import hjson + import hjson.encoder + import hjson.decoder + if suite is None: + suite = unittest.TestSuite() + for mod in (hjson, hjson.encoder, hjson.decoder): + suite.addTest(doctest.DocTestSuite(mod)) + return suite + + +def all_tests_suite(): + def get_suite(): + return additional_tests( + unittest.TestLoader().loadTestsFromNames([ + 'hjson.tests.test_hjson', + 'hjson.tests.test_bitsize_int_as_string', + 'hjson.tests.test_bigint_as_string', + 'hjson.tests.test_check_circular', + 'hjson.tests.test_decode', + 'hjson.tests.test_default', + 'hjson.tests.test_dump', + 'hjson.tests.test_encode_basestring_ascii', + 'hjson.tests.test_errors', + 'hjson.tests.test_fail', + 'hjson.tests.test_float', + 'hjson.tests.test_indent', + 'hjson.tests.test_pass1', + 'hjson.tests.test_pass2', + 'hjson.tests.test_pass3', + 'hjson.tests.test_recursion', + 'hjson.tests.test_scanstring', + 'hjson.tests.test_separators', + 'hjson.tests.test_unicode', + 'hjson.tests.test_decimal', + 'hjson.tests.test_tuple', + 'hjson.tests.test_namedtuple', + #'hjson.tests.test_tool', # fails on windows + 'hjson.tests.test_for_json', + ])) + suite = get_suite() + return suite + + +def main(): + runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count('-v')) + suite = all_tests_suite() + raise SystemExit(not runner.run(suite).wasSuccessful()) + + +if __name__ == '__main__': + import os + import sys + sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + main() diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bigint_as_string.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bigint_as_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1edfee24947a20aea780b1a9eb22f3113ec5 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bigint_as_string.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bitsize_int_as_string.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bitsize_int_as_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d8a663974d6ed1f3eb0a4527817f811e3038ab8 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_bitsize_int_as_string.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_check_circular.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_check_circular.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..617969149af0f6fbc1917c6dec223f7276b29e11 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_check_circular.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_decode.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_decode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb418fd5eae7e2ff37c6676317bffcf886f11d87 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_decode.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_default.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_default.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78b47a5f5ac3b22a278073bdf0d8e5cbadc3f6ac Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_default.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_dump.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_dump.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33f5900f14890e06b8340650abe1d4d887eb43e4 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_dump.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_encode_basestring_ascii.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_encode_basestring_ascii.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84861e8c716fa849325fd389ba2c4152116866d4 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_encode_basestring_ascii.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_errors.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..638500e97e2e1b60255c628784e2e6eb08e491f1 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_errors.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ec181da39bcc16ec9bb4a3cfd620d1ce265459 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d40cd747d07300326eda7058d1364c84a8c7b7 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_hjson.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_hjson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db4a9ac4c2b334bd1b4534829f2aabf255529f3f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_hjson.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_indent.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_indent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d78989bc4c7af9ce6b95bf89285f30185f6d5ef8 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_indent.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c14d36acc44c23d3c1853b4d7289db2f0d8c6079 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_namedtuple.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_namedtuple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1947799a1a50892089048b55ff216dc3eaa5288 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_namedtuple.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8e8058b351c893b54c7ad94932d9780416f53ae Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass3.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a63dee76431aa58494d3865985733a2bd383bd5 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass3.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_recursion.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_recursion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dd1def0ad4665c7b10f607db209c7819ed02d8b Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_recursion.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_separators.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_separators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc49825facbe77b56bcb1387af6f598ab4c917d7 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_separators.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tool.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc49b0496f488068c30428fcb9abb0a17d9ad0a0 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tool.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tuple.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tuple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0121604f389a054c340d75a51829fbe3ac079877 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_tuple.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..808deaace957ac89d7413ee3bb4d932aa9281f65 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..3cdb04fe33dc6c057d4d2f44cc197b5ecddbb92d --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import hjson as json + + +class TestBigintAsString(TestCase): + # Python 2.5, at least the one that ships on Mac OS X, calculates + # 2 ** 53 as 0! It manages to calculate 1 << 53 correctly. + values = [(200, 200), + ((1 << 53) - 1, 9007199254740991), + ((1 << 53), '9007199254740992'), + ((1 << 53) + 1, '9007199254740993'), + (-100, -100), + ((-1 << 53), '-9007199254740992'), + ((-1 << 53) - 1, '-9007199254740993'), + ((-1 << 53) + 1, -9007199254740991)] + + options = ( + {"bigint_as_string": True}, + {"int_as_string_bitcount": 53} + ) + + def test_ints(self): + for opts in self.options: + for val, expect in self.values: + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_lists(self): + for opts in self.options: + for val, expect in self.values: + val = [val, val] + expect = [expect, expect] + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_dicts(self): + for opts in self.options: + for val, expect in self.values: + val = {'k': val} + expect = {'k': expect} + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_dict_keys(self): + for opts in self.options: + for val, _ in self.values: + expect = {str(val): 'value'} + val = {val: 'value'} + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..b8955b9c56b6ce5430aa60c31da74ec7d5c65aa7 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py @@ -0,0 +1,73 @@ +from unittest import TestCase + +import hjson as json + + +class TestBitSizeIntAsString(TestCase): + # Python 2.5, at least the one that ships on Mac OS X, calculates + # 2 ** 31 as 0! It manages to calculate 1 << 31 correctly. + values = [ + (200, 200), + ((1 << 31) - 1, (1 << 31) - 1), + ((1 << 31), str(1 << 31)), + ((1 << 31) + 1, str((1 << 31) + 1)), + (-100, -100), + ((-1 << 31), str(-1 << 31)), + ((-1 << 31) - 1, str((-1 << 31) - 1)), + ((-1 << 31) + 1, (-1 << 31) + 1), + ] + + def test_invalid_counts(self): + for n in ['foo', -1, 0, 1.0]: + self.assertRaises( + TypeError, + json.dumpsJSON, 0, int_as_string_bitcount=n) + + def test_ints_outside_range_fails(self): + self.assertNotEqual( + str(1 << 15), + json.loads(json.dumpsJSON(1 << 15, int_as_string_bitcount=16)), + ) + + def test_ints(self): + for val, expect in self.values: + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31)), + ) + + def test_lists(self): + for val, expect in self.values: + val = [val, val] + expect = [expect, expect] + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) + + def test_dicts(self): + for val, expect in self.values: + val = {'k': val} + expect = {'k': expect} + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) + + def test_dict_keys(self): + for val, _ in self.values: + expect = {str(val): 'value'} + val = {val: 'value'} + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_check_circular.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_check_circular.py new file mode 100644 index 0000000000000000000000000000000000000000..4b1046549427651db8a80cf60617cc0e2b04bf33 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_check_circular.py @@ -0,0 +1,30 @@ +from unittest import TestCase +import hjson as json + +def default_iterable(obj): + return list(obj) + +class TestCheckCircular(TestCase): + def test_circular_dict(self): + dct = {} + dct['a'] = dct + self.assertRaises(ValueError, json.dumpsJSON, dct) + + def test_circular_list(self): + lst = [] + lst.append(lst) + self.assertRaises(ValueError, json.dumpsJSON, lst) + + def test_circular_composite(self): + dct2 = {} + dct2['a'] = [] + dct2['a'].append(dct2) + self.assertRaises(ValueError, json.dumpsJSON, dct2) + + def test_circular_default(self): + json.dumpsJSON([set()], default=default_iterable) + self.assertRaises(TypeError, json.dumpsJSON, [set()]) + + def test_circular_off_default(self): + json.dumpsJSON([set()], default=default_iterable, check_circular=False) + self.assertRaises(TypeError, json.dumpsJSON, [set()], check_circular=False) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decimal.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb3910610b9d028fffca530086519d492567afc --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decimal.py @@ -0,0 +1,71 @@ +import decimal +from decimal import Decimal +from unittest import TestCase +from hjson.compat import StringIO, reload_module + +import hjson as json + +class TestDecimal(TestCase): + NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500" + def dumps(self, obj, **kw): + sio = StringIO() + json.dumpJSON(obj, sio, **kw) + res = json.dumpsJSON(obj, **kw) + self.assertEqual(res, sio.getvalue()) + return res + + def loads(self, s, **kw): + sio = StringIO(s) + res = json.loads(s, **kw) + self.assertEqual(res, json.load(sio, **kw)) + return res + + def test_decimal_encode(self): + for d in map(Decimal, self.NUMS): + self.assertEqual(self.dumps(d, use_decimal=True), str(d)) + + def test_decimal_decode(self): + for s in self.NUMS: + self.assertEqual(self.loads(s, parse_float=Decimal), Decimal(s)) + + def test_stringify_key(self): + for d in map(Decimal, self.NUMS): + v = {d: d} + self.assertEqual( + self.loads( + self.dumps(v, use_decimal=True), parse_float=Decimal), + {str(d): d}) + + def test_decimal_roundtrip(self): + for d in map(Decimal, self.NUMS): + # The type might not be the same (int and Decimal) but they + # should still compare equal. + for v in [d, [d], {'': d}]: + self.assertEqual( + self.loads( + self.dumps(v, use_decimal=True), parse_float=Decimal), + v) + + def test_decimal_defaults(self): + d = Decimal('1.1') + # use_decimal=True is the default + self.assertRaises(TypeError, json.dumpsJSON, d, use_decimal=False) + self.assertEqual('1.1', json.dumpsJSON(d)) + self.assertEqual('1.1', json.dumpsJSON(d, use_decimal=True)) + self.assertRaises(TypeError, json.dumpJSON, d, StringIO(), + use_decimal=False) + sio = StringIO() + json.dumpJSON(d, sio) + self.assertEqual('1.1', sio.getvalue()) + sio = StringIO() + json.dumpJSON(d, sio, use_decimal=True) + self.assertEqual('1.1', sio.getvalue()) + + def test_decimal_reload(self): + # Simulate a subinterpreter that reloads the Python modules but not + # the C code https://github.com/simplejson/simplejson/issues/34 + global Decimal + Decimal = reload_module(decimal).Decimal + import hjson.encoder + hjson.encoder.Decimal = Decimal + self.test_decimal_roundtrip() diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decode.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decode.py new file mode 100644 index 0000000000000000000000000000000000000000..cdab0efa3ecc50817a432e9820eb33c2431b25f3 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_decode.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import + +import decimal +from unittest import TestCase + +import hjson as json +from hjson import OrderedDict +from hjson.compat import StringIO + + +class TestDecode(TestCase): + if not hasattr(TestCase, "assertIs"): + + def assertIs(self, a, b): + self.assertTrue(a is b, "%r is %r" % (a, b)) + + def test_decimal(self): + rval = json.loads("1.1", parse_float=decimal.Decimal) + self.assertTrue(isinstance(rval, decimal.Decimal)) + self.assertEqual(rval, decimal.Decimal("1.1")) + + def test_float(self): + rval = json.loads("1", parse_int=float) + self.assertTrue(isinstance(rval, float)) + self.assertEqual(rval, 1.0) + + def test_decoder_optimizations(self): + # Several optimizations were made that skip over calls to + # the whitespace regex, so this test is designed to try and + # exercise the uncommon cases. The array cases are already covered. + rval = json.loads('{ "key" : "value" , "k":"v" }') + self.assertEqual(rval, {"key": "value", "k": "v"}) + + def test_empty_objects(self): + s = "{}" + self.assertEqual(json.loads(s), eval(s)) + s = "[]" + self.assertEqual(json.loads(s), eval(s)) + s = '""' + self.assertEqual(json.loads(s), eval(s)) + + def test_object_pairs_hook(self): + s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' + p = [ + ("xkd", 1), + ("kcw", 2), + ("art", 3), + ("hxm", 4), + ("qrt", 5), + ("pad", 6), + ("hoy", 7), + ] + self.assertEqual(json.loads(s), eval(s)) + self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) + self.assertEqual(json.load(StringIO(s), object_pairs_hook=lambda x: x), p) + od = json.loads(s, object_pairs_hook=OrderedDict) + self.assertEqual(od, OrderedDict(p)) + self.assertEqual(type(od), OrderedDict) + # the object_pairs_hook takes priority over the object_hook + self.assertEqual( + json.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), + OrderedDict(p), + ) + + def check_keys_reuse(self, source, loads): + rval = loads(source) + (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) + self.assertIs(a, c) + self.assertIs(b, d) + + def test_keys_reuse_str(self): + s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'.encode("utf8") + self.check_keys_reuse(s, json.loads) + + def test_keys_reuse_unicode(self): + s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]' + self.check_keys_reuse(s, json.loads) + + def test_empty_strings(self): + self.assertEqual(json.loads('""'), "") + self.assertEqual(json.loads(u'""'), u"") + self.assertEqual(json.loads('[""]'), [""]) + self.assertEqual(json.loads(u'[""]'), [u""]) + + def test_multiline_string(self): + s1 = """ + +hello: ''' + +''' + +""" + s2 = """ + +hello: ''' +''' + +""" + s3 = """ + +hello: '''''' + +""" + s4 = """ + +hello: '' + +""" + s5 = """ + +hello: "" + +""" + self.assertEqual(json.loads(s1), {"hello": ""}) + self.assertEqual(json.loads(s2), {"hello": ""}) + self.assertEqual(json.loads(s3), {"hello": ""}) + self.assertEqual(json.loads(s4), {"hello": ""}) + self.assertEqual(json.loads(s5), {"hello": ""}) + + def test_raw_decode(self): + cls = json.decoder.HjsonDecoder + self.assertEqual(({"a": {}}, 9), cls().raw_decode('{"a": {}}')) + # http://code.google.com/p/simplejson/issues/detail?id=85 + self.assertEqual( + ({"a": {}}, 9), cls(object_pairs_hook=dict).raw_decode('{"a": {}}') + ) + # https://github.com/simplejson/simplejson/pull/38 + self.assertEqual(({"a": {}}, 11), cls().raw_decode(' \n{"a": {}}')) + + def test_bounds_checking(self): + # https://github.com/simplejson/simplejson/issues/98 + j = json.decoder.HjsonDecoder() + for i in [4, 5, 6, -1, -2, -3, -4, -5, -6]: + self.assertRaises(ValueError, j.scan_once, "1234", i) + self.assertRaises(ValueError, j.raw_decode, "1234", i) + x, y = sorted(["128931233", "472389423"], key=id) + diff = id(x) - id(y) + self.assertRaises(ValueError, j.scan_once, y, diff) + self.assertRaises(ValueError, j.raw_decode, y, i) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_default.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_default.py new file mode 100644 index 0000000000000000000000000000000000000000..cfed1155e9eb05e5437e1f475160f01a48f602b8 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_default.py @@ -0,0 +1,9 @@ +from unittest import TestCase + +import hjson as json + +class TestDefault(TestCase): + def test_default(self): + self.assertEqual( + json.dumpsJSON(type, default=repr), + json.dumpsJSON(repr(type))) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_dump.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_dump.py new file mode 100644 index 0000000000000000000000000000000000000000..2c711b04d110d8dcf7b1be74da4f77a1e4167267 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_dump.py @@ -0,0 +1,130 @@ +from unittest import TestCase +from hjson.compat import StringIO, long_type, b, binary_type, PY3 +import hjson as json + +def as_text_type(s): + if PY3 and isinstance(s, binary_type): + return s.decode('ascii') + return s + +class TestDump(TestCase): + def test_dump(self): + sio = StringIO() + json.dumpJSON({}, sio) + self.assertEqual(sio.getvalue(), '{}') + + def test_constants(self): + for c in [None, True, False]: + self.assertTrue(json.loads(json.dumpsJSON(c)) is c) + self.assertTrue(json.loads(json.dumpsJSON([c]))[0] is c) + self.assertTrue(json.loads(json.dumpsJSON({'a': c}))['a'] is c) + + def test_stringify_key(self): + items = [(b('bytes'), 'bytes'), + (1.0, '1.0'), + (10, '10'), + (True, 'true'), + (False, 'false'), + (None, 'null'), + (long_type(100), '100')] + for k, expect in items: + self.assertEqual( + json.loads(json.dumpsJSON({k: expect})), + {expect: expect}) + self.assertEqual( + json.loads(json.dumpsJSON({k: expect}, sort_keys=True)), + {expect: expect}) + self.assertRaises(TypeError, json.dumpsJSON, {json: 1}) + for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]: + for sort_keys in [False, True]: + v0 = dict(v) + v0[json] = 1 + v1 = dict((as_text_type(key), val) for (key, val) in v.items()) + self.assertEqual( + json.loads(json.dumpsJSON(v0, skipkeys=True, sort_keys=sort_keys)), + v1) + self.assertEqual( + json.loads(json.dumpsJSON({'': v0}, skipkeys=True, sort_keys=sort_keys)), + {'': v1}) + self.assertEqual( + json.loads(json.dumpsJSON([v0], skipkeys=True, sort_keys=sort_keys)), + [v1]) + + def test_dumps(self): + self.assertEqual(json.dumpsJSON({}), '{}') + + def test_encode_truefalse(self): + self.assertEqual(json.dumpsJSON( + {True: False, False: True}, sort_keys=True), + '{"false": true, "true": false}') + self.assertEqual( + json.dumpsJSON( + {2: 3.0, + 4.0: long_type(5), + False: 1, + long_type(6): True, + "7": 0}, + sort_keys=True), + '{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}') + + def test_ordered_dict(self): + # http://bugs.python.org/issue6105 + items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] + s = json.dumpsJSON(json.OrderedDict(items)) + self.assertEqual( + s, + '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}') + + def test_indent_unknown_type_acceptance(self): + """ + A test against the regression mentioned at `github issue 29`_. + + The indent parameter should accept any type which pretends to be + an instance of int or long when it comes to being multiplied by + strings, even if it is not actually an int or long, for + backwards compatibility. + + .. _github issue 29: + http://github.com/simplejson/simplejson/issue/29 + """ + + class AwesomeInt(object): + """An awesome reimplementation of integers""" + + def __init__(self, *args, **kwargs): + if len(args) > 0: + # [construct from literals, objects, etc.] + # ... + + # Finally, if args[0] is an integer, store it + if isinstance(args[0], int): + self._int = args[0] + + # [various methods] + + def __mul__(self, other): + # [various ways to multiply AwesomeInt objects] + # ... finally, if the right-hand operand is not awesome enough, + # try to do a normal integer multiplication + if hasattr(self, '_int'): + return self._int * other + else: + raise NotImplementedError("To do non-awesome things with" + " this object, please construct it from an integer!") + + s = json.dumpsJSON([0, 1, 2], indent=AwesomeInt(3)) + self.assertEqual(s, '[\n 0,\n 1,\n 2\n]') + + def test_accumulator(self): + # the C API uses an accumulator that collects after 100,000 appends + lst = [0] * 100000 + self.assertEqual(json.loads(json.dumpsJSON(lst)), lst) + + def test_sort_keys(self): + # https://github.com/simplejson/simplejson/issues/106 + for num_keys in range(2, 32): + p = dict((str(x), x) for x in range(num_keys)) + sio = StringIO() + json.dumpJSON(p, sio, sort_keys=True) + self.assertEqual(sio.getvalue(), json.dumpsJSON(p, sort_keys=True)) + self.assertEqual(json.loads(sio.getvalue()), p) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py new file mode 100644 index 0000000000000000000000000000000000000000..6783eff681a9f2638d57706bf01987c58280c04f --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py @@ -0,0 +1,42 @@ +from unittest import TestCase + +import hjson.encoder +from hjson.compat import b + +CASES = [ + (u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), + (u'controls', '"controls"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'), + (u' s p a c e d ', '" s p a c e d "'), + (u'\U0001d120', '"\\ud834\\udd20"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u"`1~!@#$%^&*()_+-={':[,]}|;.?", '"`1~!@#$%^&*()_+-={\':[,]}|;.?"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), +] + +class TestEncodeBaseStringAscii(TestCase): + def test_py_encode_basestring_ascii(self): + self._test_encode_basestring_ascii(hjson.encoder.encode_basestring_ascii) + + def _test_encode_basestring_ascii(self, encode_basestring_ascii): + fname = encode_basestring_ascii.__name__ + for input_string, expect in CASES: + result = encode_basestring_ascii(input_string) + #self.assertEqual(result, expect, + # '{0!r} != {1!r} for {2}({3!r})'.format( + # result, expect, fname, input_string)) + self.assertEqual(result, expect, + '%r != %r for %s(%r)' % (result, expect, fname, input_string)) + + def test_sorted_dict(self): + items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] + s = hjson.dumpsJSON(dict(items), sort_keys=True) + self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}') diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_errors.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..fd256043be9c4a0e02575a0c77a0e19db5e69a0c --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_errors.py @@ -0,0 +1,51 @@ +import sys, pickle +from unittest import TestCase + +import hjson as json +from hjson.compat import u, b + +class TestErrors(TestCase): + def test_string_keys_error(self): + data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}] + self.assertRaises(TypeError, json.dumpsJSON, data) + + def test_decode_error(self): + err = None + try: + json.loads('{}\na\nb') + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + self.assertEqual(err.lineno, 2) + self.assertEqual(err.colno, 1) + self.assertEqual(err.endlineno, 3) + self.assertEqual(err.endcolno, 2) + + def test_scan_error(self): + err = None + for t in (u, b): + try: + json.loads(t('{"asdf": "')) + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + self.assertEqual(err.lineno, 1) + self.assertEqual(err.colno, 10) + + def test_error_is_pickable(self): + err = None + try: + json.loads('{}\na\nb') + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + s = pickle.dumps(err) + e = pickle.loads(s) + + self.assertEqual(err.msg, e.msg) + self.assertEqual(err.doc, e.doc) + self.assertEqual(err.pos, e.pos) + self.assertEqual(err.end, e.end) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_fail.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_fail.py new file mode 100644 index 0000000000000000000000000000000000000000..ca591e784aae3e4884c0768cba07d8a26508467e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_fail.py @@ -0,0 +1,143 @@ +import sys +from unittest import TestCase + +import hjson as json + +# 2007-10-05 +JSONDOCS = [ + # http://json.org/JSON_checker/test/fail1.json + # '"A JSON payload should be an object or array, not a string."', + # http://json.org/JSON_checker/test/fail2.json + '["Unclosed array"', + # http://json.org/JSON_checker/test/fail3.json + #'{unquoted_key: "keys must be quoted"}', + # http://json.org/JSON_checker/test/fail4.json + # '["extra comma",]', + # http://json.org/JSON_checker/test/fail5.json + '["double extra comma",,]', + # http://json.org/JSON_checker/test/fail6.json + '[ , "<-- missing value"]', + # http://json.org/JSON_checker/test/fail7.json + '["Comma after the close"],', + # http://json.org/JSON_checker/test/fail8.json + '["Extra close"]]', + # http://json.org/JSON_checker/test/fail9.json + # '{"Extra comma": true,}', + # http://json.org/JSON_checker/test/fail10.json + '{"Extra value after close": true} "misplaced quoted value"', + # http://json.org/JSON_checker/test/fail11.json + '{"Illegal expression": 1 + 2}', + # http://json.org/JSON_checker/test/fail12.json + '{"Illegal invocation": alert()}', + # http://json.org/JSON_checker/test/fail13.json + '{"Numbers cannot have leading zeroes": 013}', + # http://json.org/JSON_checker/test/fail14.json + '{"Numbers cannot be hex": 0x14}', + # http://json.org/JSON_checker/test/fail15.json + '["Illegal backslash escape: \\x15"]', + # http://json.org/JSON_checker/test/fail16.json + '[\\naked]', + # http://json.org/JSON_checker/test/fail17.json + '["Illegal backslash escape: \\017"]', + # http://json.org/JSON_checker/test/fail18.json + # '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', + # http://json.org/JSON_checker/test/fail19.json + '{"Missing colon" null}', + # http://json.org/JSON_checker/test/fail20.json + '{"Double colon":: null}', + # http://json.org/JSON_checker/test/fail21.json + '{"Comma instead of colon", null}', + # http://json.org/JSON_checker/test/fail22.json + '["Colon instead of comma": false]', + # http://json.org/JSON_checker/test/fail23.json + '["Bad value", truth]', + # http://json.org/JSON_checker/test/fail24.json + #"['single quote']", + # http://json.org/JSON_checker/test/fail25.json + '["\ttab\tcharacter\tin\tstring\t"]', + # http://json.org/JSON_checker/test/fail26.json + '["tab\\ character\\ in\\ string\\ "]', + # http://json.org/JSON_checker/test/fail27.json + '["line\nbreak"]', + # http://json.org/JSON_checker/test/fail28.json + '["line\\\nbreak"]', + # http://json.org/JSON_checker/test/fail29.json + '[0e]', + # http://json.org/JSON_checker/test/fail30.json + '[0e+]', + # http://json.org/JSON_checker/test/fail31.json + '[0e+-1]', + # http://json.org/JSON_checker/test/fail32.json + '{"Comma instead if closing brace": true,', + # http://json.org/JSON_checker/test/fail33.json + '["mismatch"}', + # http://code.google.com/p/simplejson/issues/detail?id=3 + u'["A\u001FZ control characters in string"]', + # misc based on coverage + '{', + '{]', + '{"foo": "bar"]', + '{"foo": "bar"', +] + +class TestFail(TestCase): + def test_failures(self): + for idx, doc in enumerate(JSONDOCS): + idx = idx + 1 + try: + json.loads(doc) + except json.HjsonDecodeError: + pass + else: + self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) + + def test_array_decoder_issue46(self): + # http://code.google.com/p/simplejson/issues/detail?id=46 + for doc in [u'[,]', '[,]']: + try: + json.loads(doc) + except json.HjsonDecodeError: + pass + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '[,]'") + + def test_truncated_input(self): + test_cases = [ + ('[', "End of input while parsing an array", 1), + # ('[42', "Expecting ',' delimiter", 3), + ('[42,', 'Expecting value', 4), + ('["', 'Unterminated string starting at', 1), + ('["spam', 'Unterminated string starting at', 1), + # ('["spam"', "Expecting ',' delimiter", 7), + ('["spam",', 'Expecting value', 8), + ('{', 'Bad key name (eof)', 1), + ('{"', 'Unterminated string starting at', 1), + ('{"spam', 'Unterminated string starting at', 1), + ('{"spam"', "Expecting ':' delimiter", 7), + ('{"spam":', 'Expecting value', 8), + # ('{"spam":42', "Expecting ',' delimiter", 10), + ('{"spam":42,', 'Bad key name (eof)', 11), + ('"', 'Unterminated string starting at', 0), + ('"spam', 'Unterminated string starting at', 0), + ('[,', "Found a punctuator character", 1), + ] + for data, msg, idx in test_cases: + try: + json.loads(data) + except json.HjsonDecodeError: + e = sys.exc_info()[1] + self.assertEqual( + e.msg[:len(msg)], + msg, + "%r doesn't start with %r for %r" % (e.msg, msg, data)) + self.assertEqual( + e.pos, idx, + "pos %r != %r for %r" % (e.pos, idx, data)) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '%r'" % (data,)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_float.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_float.py new file mode 100644 index 0000000000000000000000000000000000000000..567ea427a64efebba1277e65ad7b1c1e4d52d118 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_float.py @@ -0,0 +1,25 @@ +import math +from unittest import TestCase +from hjson.compat import long_type, text_type +import hjson as json +from hjson.decoder import NaN, PosInf, NegInf + +class TestFloat(TestCase): + + def test_degenerates_ignore(self): + for f in (PosInf, NegInf, NaN): + self.assertEqual(json.loads(json.dumpsJSON(f)), None) + + def test_floats(self): + for num in [1617161771.7650001, math.pi, math.pi**100, + math.pi**-100, 3.1]: + self.assertEqual(float(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(text_type(json.dumpsJSON(num))), num) + + def test_ints(self): + for num in [1, long_type(1), 1<<32, 1<<64]: + self.assertEqual(json.dumpsJSON(num), str(num)) + self.assertEqual(int(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(text_type(json.dumpsJSON(num))), num) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_for_json.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_for_json.py new file mode 100644 index 0000000000000000000000000000000000000000..672b8b278a844d6885d66a2d8380f18846aabb99 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_for_json.py @@ -0,0 +1,97 @@ +import unittest +import hjson as json + + +class ForJson(object): + def for_json(self): + return {'for_json': 1} + + +class NestedForJson(object): + def for_json(self): + return {'nested': ForJson()} + + +class ForJsonList(object): + def for_json(self): + return ['list'] + + +class DictForJson(dict): + def for_json(self): + return {'alpha': 1} + + +class ListForJson(list): + def for_json(self): + return ['list'] + + +class TestForJson(unittest.TestCase): + def assertRoundTrip(self, obj, other, for_json=True): + if for_json is None: + # None will use the default + s = json.dumpsJSON(obj) + else: + s = json.dumpsJSON(obj, for_json=for_json) + self.assertEqual( + json.loads(s), + other) + + def test_for_json_encodes_stand_alone_object(self): + self.assertRoundTrip( + ForJson(), + ForJson().for_json()) + + def test_for_json_encodes_object_nested_in_dict(self): + self.assertRoundTrip( + {'hooray': ForJson()}, + {'hooray': ForJson().for_json()}) + + def test_for_json_encodes_object_nested_in_list_within_dict(self): + self.assertRoundTrip( + {'list': [0, ForJson(), 2, 3]}, + {'list': [0, ForJson().for_json(), 2, 3]}) + + def test_for_json_encodes_object_nested_within_object(self): + self.assertRoundTrip( + NestedForJson(), + {'nested': {'for_json': 1}}) + + def test_for_json_encodes_list(self): + self.assertRoundTrip( + ForJsonList(), + ForJsonList().for_json()) + + def test_for_json_encodes_list_within_object(self): + self.assertRoundTrip( + {'nested': ForJsonList()}, + {'nested': ForJsonList().for_json()}) + + def test_for_json_encodes_dict_subclass(self): + self.assertRoundTrip( + DictForJson(a=1), + DictForJson(a=1).for_json()) + + def test_for_json_encodes_list_subclass(self): + self.assertRoundTrip( + ListForJson(['l']), + ListForJson(['l']).for_json()) + + def test_for_json_ignored_if_not_true_with_dict_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + DictForJson(a=1), + {'a': 1}, + for_json=for_json) + + def test_for_json_ignored_if_not_true_with_list_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + ListForJson(['l']), + ['l'], + for_json=for_json) + + def test_raises_typeerror_if_for_json_not_true_with_object(self): + self.assertRaises(TypeError, json.dumpsJSON, ForJson()) + self.assertRaises(TypeError, json.dumpsJSON, ForJson(), for_json=False) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_hjson.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_hjson.py new file mode 100644 index 0000000000000000000000000000000000000000..725c6abfd7e28db4b88def7217f0361d81900971 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_hjson.py @@ -0,0 +1,65 @@ +from __future__ import with_statement + +import os +import sys +import subprocess +import tempfile +import codecs # dump + +from unittest import TestCase + +import hjson + +class TestAssets(TestCase): + + def __init__(self, *args, **kwargs): + super(TestAssets, self).__init__(*args, **kwargs) + self.assetsDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets") + self.assets = self.load('testlist.txt', False).split('\n') + self.maxDiff = None + self.verma, self.vermi = sys.version_info[0:2] + + def load(self, name, cr): + name = os.path.join(self.assetsDir, name) + with open(name, 'rb') as f: + text = f.read().decode('utf-8') + text = text.replace('\r', '') + if cr: text = text.replace('\n', '\r\n') + return text + + def check(self, name, file, inputCr): + text = self.load(file, inputCr) + shouldFail = name[0:4] == "fail" + + try: + data = hjson.loads(text) + self.assertFalse(shouldFail, file) + + text1 = hjson.dumpsJSON(data) + hjson1 = hjson.dumps(data, ensure_ascii=False); + result = hjson.loads(self.load(name + "_result.json", inputCr)) + text2 = hjson.dumpsJSON(result) + hjson2 = self.load(name + "_result.hjson", False) + + # dbg + # with open(name + "_dbg1.txt", "w") as tmp: tmp.write(hjson1.encode("utf-8")) + # with open(name + "_dbg2.txt", "w") as tmp: tmp.write(hjson2.encode("utf-8")) + # with codecs.open(name + "_dbg3.txt", 'w', 'utf-8') as tmp: hjson.dump(data, tmp) + + if self.verma>2 or self.vermi>6: + # final check fails on py2.6 because of string formatting issues + self.assertEqual(text2, text1, file) + self.assertEqual(hjson2, hjson1, file) + + except hjson.HjsonDecodeError as e: + if not shouldFail: + self.fail("raised error on parsing %s: %r" % (file, e)) + + def test_files(self): + for file in self.assets: + name, sep, ext = file.partition("_test.") + if name.startswith("stringify/quotes") or \ + name.startswith("extra/"): continue # ignore/not supported + + self.check(name, file, True) + self.check(name, file, False) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_indent.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_indent.py new file mode 100644 index 0000000000000000000000000000000000000000..372e276cfad91077b434857d65b67a3592768603 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_indent.py @@ -0,0 +1,86 @@ +from unittest import TestCase +import textwrap + +import hjson as json +from hjson.compat import StringIO + +class TestIndent(TestCase): + def test_indent(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', + 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + \t[ + \t\t"blorpie" + \t], + \t[ + \t\t"whoops" + \t], + \t[], + \t"d-shtaeou", + \t"d-nthiouh", + \t"i-vhbjkhnth", + \t{ + \t\t"nifty": 87 + \t}, + \t{ + \t\t"field": "yes", + \t\t"morefield": false + \t} + ]""") + + + d1 = json.dumpsJSON(h) + d2 = json.dumpsJSON(h, indent='\t', sort_keys=True, separators=(',', ': ')) + d3 = json.dumpsJSON(h, indent=' ', sort_keys=True, separators=(',', ': ')) + d4 = json.dumpsJSON(h, indent=2, sort_keys=True, separators=(',', ': ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + h3 = json.loads(d3) + h4 = json.loads(d4) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(h3, h) + self.assertEqual(h4, h) + self.assertEqual(d3, expect.replace('\t', ' ')) + self.assertEqual(d4, expect.replace('\t', ' ')) + # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces, + # so the following is expected to fail. Python 2.4 is not a + # supported platform in hjson 2.1.0+. + self.assertEqual(d2, expect) + + def test_indent0(self): + h = {3: 1} + def check(indent, expected): + d1 = json.dumpsJSON(h, indent=indent) + self.assertEqual(d1, expected) + + sio = StringIO() + json.dumpJSON(h, sio, indent=indent) + self.assertEqual(sio.getvalue(), expected) + + # indent=0 should emit newlines + check(0, '{\n"3": 1\n}') + # indent=None is more compact + check(None, '{"3": 1}') + + def test_separators(self): + lst = [1,2,3,4] + expect = '[\n1,\n2,\n3,\n4\n]' + expect_spaces = '[\n1, \n2, \n3, \n4\n]' + # Ensure that separators still works + self.assertEqual( + expect_spaces, + json.dumpsJSON(lst, indent=0, separators=(', ', ': '))) + # Force the new defaults + self.assertEqual( + expect, + json.dumpsJSON(lst, indent=0, separators=(',', ': '))) + # Added in 2.1.4 + self.assertEqual( + expect, + json.dumpsJSON(lst, indent=0)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py new file mode 100644 index 0000000000000000000000000000000000000000..ee460f598b336a9ccba8bb52ae006a070ae98739 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import hjson as json +from operator import itemgetter + +class TestItemSortKey(TestCase): + def test_simple_first(self): + a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}', + json.dumpsJSON(a, item_sort_key=json.simple_first)) + + def test_case(self): + a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumpsJSON(a, item_sort_key=itemgetter(0))) + self.assertEqual( + '{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumpsJSON(a, item_sort_key=lambda kv: kv[0].lower())) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py new file mode 100644 index 0000000000000000000000000000000000000000..9d91b0f1d66bcf5e943c7bd204dc45f8a80b5d94 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py @@ -0,0 +1,122 @@ +from __future__ import absolute_import +import unittest +import hjson as json +from hjson.compat import StringIO + +try: + from collections import namedtuple +except ImportError: + class Value(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'value': self[0]} + class Point(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'x': self[0], 'y': self[1]} +else: + Value = namedtuple('Value', ['value']) + Point = namedtuple('Point', ['x', 'y']) + +class DuckValue(object): + def __init__(self, *args): + self.value = Value(*args) + + def _asdict(self): + return self.value._asdict() + +class DuckPoint(object): + def __init__(self, *args): + self.point = Point(*args) + + def _asdict(self): + return self.point._asdict() + +class DeadDuck(object): + _asdict = None + +class DeadDict(dict): + _asdict = None + +CONSTRUCTORS = [ + lambda v: v, + lambda v: [v], + lambda v: [{'key': v}], +] + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dumps(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + self.assertEqual(d, json.loads(json.dumpsJSON(v))) + self.assertEqual( + d, + json.loads(json.dumpsJSON(v, namedtuple_as_object=True))) + self.assertEqual(d, json.loads(json.dumpsJSON(v, tuple_as_array=False))) + self.assertEqual( + d, + json.loads(json.dumpsJSON(v, namedtuple_as_object=True, + tuple_as_array=False))) + + def test_namedtuple_dumps_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + self.assertEqual( + l, + json.loads(json.dumpsJSON(v, namedtuple_as_object=False))) + self.assertRaises(TypeError, json.dumpsJSON, v, + tuple_as_array=False, namedtuple_as_object=False) + + def test_namedtuple_dump(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + sio = StringIO() + json.dumpJSON(v, sio) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=True) + self.assertEqual( + d, + json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, tuple_as_array=False) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=True, + tuple_as_array=False) + self.assertEqual( + d, + json.loads(sio.getvalue())) + + def test_namedtuple_dump_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=False) + self.assertEqual( + l, + json.loads(sio.getvalue())) + self.assertRaises(TypeError, json.dumpJSON, v, StringIO(), + tuple_as_array=False, namedtuple_as_object=False) + + def test_asdict_not_callable_dump(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumpJSON, f(DeadDuck()), StringIO(), namedtuple_as_object=True) + sio = StringIO() + json.dumpJSON(f(DeadDict()), sio, namedtuple_as_object=True) + self.assertEqual( + json.dumpsJSON(f({})), + sio.getvalue()) + + def test_asdict_not_callable_dumps(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumpsJSON, f(DeadDuck()), namedtuple_as_object=True) + self.assertEqual( + json.dumpsJSON(f({})), + json.dumpsJSON(f(DeadDict()), namedtuple_as_object=True)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass1.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass1.py new file mode 100644 index 0000000000000000000000000000000000000000..8ddddd2bad08bfa8ff48c5321021deb05a15f5c8 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass1.py @@ -0,0 +1,71 @@ +from unittest import TestCase + +import hjson as json + +# from http://json.org/JSON_checker/test/pass1.json +JSON = r''' +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] +''' + +class TestPass1(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass2.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass2.py new file mode 100644 index 0000000000000000000000000000000000000000..a15db307d877d8796072778def9a66603d731df3 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass2.py @@ -0,0 +1,14 @@ +from unittest import TestCase +import hjson as json + +# from http://json.org/JSON_checker/test/pass2.json +JSON = r''' +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] +''' + +class TestPass2(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass3.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass3.py new file mode 100644 index 0000000000000000000000000000000000000000..cb528e2efae57ac8da5918f7be21bd1e1af6f7eb --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_pass3.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import hjson as json + +# from http://json.org/JSON_checker/test/pass3.json +JSON = r''' +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} +''' + +class TestPass3(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_recursion.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_recursion.py new file mode 100644 index 0000000000000000000000000000000000000000..56e1f71e3a4be2382e2b9f47312d62aeec42286c --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_recursion.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import hjson as json + +class JSONTestObject: + pass + + +class RecursiveJSONEncoder(json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return json.JSONEncoder.default(o) + + +class TestRecursion(TestCase): + def test_listrecursion(self): + x = [] + x.append(x) + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on list recursion") + x = [] + y = [x] + x.append(y) + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on alternating list recursion") + y = [] + x = [y, y] + # ensure that the marker is cleared + json.dumpsJSON(x) + + def test_dictrecursion(self): + x = {} + x["test"] = x + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on dict recursion") + x = {} + y = {"a": x, "b": x} + # ensure that the marker is cleared + json.dumpsJSON(y) + + def test_defaultrecursion(self): + enc = RecursiveJSONEncoder() + self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') + enc.recurse = True + try: + enc.encode(JSONTestObject) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on default recursion") diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_scanstring.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_scanstring.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6557cc7f3f25eefdb511c5dfee7725dffc8bec --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_scanstring.py @@ -0,0 +1,168 @@ +import sys +from unittest import TestCase + +import hjson as json +import hjson.decoder +from hjson.compat import b, PY3 + +class TestScanString(TestCase): + # The bytes type is intentionally not used in most of these tests + # under Python 3 because the decoder immediately coerces to str before + # calling scanstring. In Python 2 we are testing the code paths + # for both unicode and str. + # + # The reason this is done is because Python 3 would require + # entirely different code paths for parsing bytes and str. + # + def test_py_scanstring(self): + self._test_scanstring(hjson.decoder.scanstring) + + def _test_scanstring(self, scanstring): + if sys.maxunicode == 65535: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 6)) + else: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 5)) + + self.assertEqual( + scanstring('"\\u007b"', 1, None, True), + (u'{', 8)) + + self.assertEqual( + scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), + (u'A JSON payload should be an object or array, not a string.', 60)) + + self.assertEqual( + scanstring('["Unclosed array"', 2, None, True), + (u'Unclosed array', 17)) + + self.assertEqual( + scanstring('["extra comma",]', 2, None, True), + (u'extra comma', 14)) + + self.assertEqual( + scanstring('["double extra comma",,]', 2, None, True), + (u'double extra comma', 21)) + + self.assertEqual( + scanstring('["Comma after the close"],', 2, None, True), + (u'Comma after the close', 24)) + + self.assertEqual( + scanstring('["Extra close"]]', 2, None, True), + (u'Extra close', 14)) + + self.assertEqual( + scanstring('{"Extra comma": true,}', 2, None, True), + (u'Extra comma', 14)) + + self.assertEqual( + scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), + (u'Extra value after close', 26)) + + self.assertEqual( + scanstring('{"Illegal expression": 1 + 2}', 2, None, True), + (u'Illegal expression', 21)) + + self.assertEqual( + scanstring('{"Illegal invocation": alert()}', 2, None, True), + (u'Illegal invocation', 21)) + + self.assertEqual( + scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), + (u'Numbers cannot have leading zeroes', 37)) + + self.assertEqual( + scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), + (u'Numbers cannot be hex', 24)) + + self.assertEqual( + scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), + (u'Too deep', 30)) + + self.assertEqual( + scanstring('{"Missing colon" null}', 2, None, True), + (u'Missing colon', 16)) + + self.assertEqual( + scanstring('{"Double colon":: null}', 2, None, True), + (u'Double colon', 15)) + + self.assertEqual( + scanstring('{"Comma instead of colon", null}', 2, None, True), + (u'Comma instead of colon', 25)) + + self.assertEqual( + scanstring('["Colon instead of comma": false]', 2, None, True), + (u'Colon instead of comma', 25)) + + self.assertEqual( + scanstring('["Bad value", truth]', 2, None, True), + (u'Bad value', 12)) + + for c in map(chr, range(0x00, 0x1f)): + self.assertEqual( + scanstring(c + '"', 0, None, False), + (c, 2)) + self.assertRaises( + ValueError, + scanstring, c + '"', 0, None, True) + + def test_issue3623(self): + self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1, + "xxx") + self.assertRaises(UnicodeDecodeError, + json.encoder.encode_basestring_ascii, b("xx\xff")) + + def test_surrogates(self): + scanstring = json.decoder.scanstring + + def assertScan(given, expect, test_utf8=True): + givens = [given] + if not PY3 and test_utf8: + givens.append(given.encode('utf8')) + for given in givens: + (res, count) = scanstring(given, 1, None, True) + self.assertEqual(len(given), count) + self.assertEqual(res, expect) + + assertScan( + u'"z\\ud834\\u0079x"', + u'z\ud834yx') + assertScan( + u'"z\\ud834\\udd20x"', + u'z\U0001d120x') + assertScan( + u'"z\\ud834\\ud834\\udd20x"', + u'z\ud834\U0001d120x') + assertScan( + u'"z\\ud834x"', + u'z\ud834x') + assertScan( + u'"z\\udd20x"', + u'z\udd20x') + assertScan( + u'"z\ud834x"', + u'z\ud834x') + # It may look strange to join strings together, but Python is drunk. + # https://gist.github.com/etrepum/5538443 + assertScan( + u'"z\\ud834\udd20x12345"', + u''.join([u'z\ud834', u'\udd20x12345'])) + assertScan( + u'"z\ud834\\udd20x"', + u''.join([u'z\ud834', u'\udd20x'])) + # these have different behavior given UTF8 input, because the surrogate + # pair may be joined (in maxunicode > 65535 builds) + assertScan( + u''.join([u'"z\ud834', u'\udd20x"']), + u''.join([u'z\ud834', u'\udd20x']), + test_utf8=False) + + self.assertRaises(ValueError, + scanstring, u'"z\\ud83x"', 1, None, True) + self.assertRaises(ValueError, + scanstring, u'"z\\ud834\\udd2x"', 1, None, True) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_separators.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_separators.py new file mode 100644 index 0000000000000000000000000000000000000000..3be74efe7ac502e25193e15498279eba88fd4224 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_separators.py @@ -0,0 +1,42 @@ +import textwrap +from unittest import TestCase + +import hjson as json + + +class TestSeparators(TestCase): + def test_separators(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ] , + [ + "whoops" + ] , + [] , + "d-shtaeou" , + "d-nthiouh" , + "i-vhbjkhnth" , + { + "nifty" : 87 + } , + { + "field" : "yes" , + "morefield" : false + } + ]""") + + + d1 = json.dumpsJSON(h) + d2 = json.dumpsJSON(h, indent=' ', sort_keys=True, separators=(' ,', ' : ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tool.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..475fc5aa94be8729da9d0f92f272d2753b6d92cb --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tool.py @@ -0,0 +1,98 @@ +from __future__ import with_statement +import os +import sys +import textwrap +import unittest +import subprocess +import tempfile +try: + # Python 3.x + from test.support import strip_python_stderr +except ImportError: + # Python 2.6+ + try: + from test.test_support import strip_python_stderr + except ImportError: + # Python 2.5 + import re + def strip_python_stderr(stderr): + return re.sub( + r"\[\d+ refs\]\r?\n?$".encode(), + "".encode(), + stderr).strip() + +class TestTool(unittest.TestCase): + data = """ + + [["blorpie"],[ "whoops" ] , [ + ],\t"d-shtaeou",\r"d-nthiouh", + "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field" + :"yes"} ] + """ + + expect = textwrap.dedent("""\ + [ + [ + blorpie + ] + [ + whoops + ] + [] + d-shtaeou + d-nthiouh + i-vhbjkhnth + { + nifty: 87 + } + { + morefield: false + field: yes + } + ] + """) + + def runTool(self, args=None, data=None): + argv = [sys.executable, '-m', 'hjson.tool'] + if args: + argv.extend(args) + proc = subprocess.Popen(argv, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = proc.communicate(data) + self.assertEqual(strip_python_stderr(err), ''.encode()) + self.assertEqual(proc.returncode, 0) + return out + + def test_stdin_stdout(self): + self.assertEqual( + self.runTool(data=self.data.encode()), + self.expect.encode()) + + def test_infile_stdout(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + self.assertEqual( + self.runTool(args=[infile.name]), + self.expect.encode()) + + def x_test_infile_outfile(self): + """Not currently an option in tool""" + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + # outfile will get overwritten by tool, so the delete + # may not work on some platforms. Do it manually. + outfile = tempfile.NamedTemporaryFile() + try: + self.assertEqual( + self.runTool(args=[infile.name, outfile.name]), + ''.encode()) + with open(outfile.name, 'rb') as f: + self.assertEqual(f.read(), self.expect.encode()) + finally: + outfile.close() + if os.path.exists(outfile.name): + os.unlink(outfile.name) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tuple.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tuple.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7289df60759a379d511e127a8cec6ec2b980ac --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_tuple.py @@ -0,0 +1,51 @@ +import unittest + +from hjson.compat import StringIO +import hjson as json + +class TestTuples(unittest.TestCase): + def test_tuple_array_dumps(self): + t = (1, 2, 3) + expect = json.dumpsJSON(list(t)) + # Default is True + self.assertEqual(expect, json.dumpsJSON(t)) + self.assertEqual(expect, json.dumpsJSON(t, tuple_as_array=True)) + self.assertRaises(TypeError, json.dumpsJSON, t, tuple_as_array=False) + # Ensure that the "default" does not get called + self.assertEqual(expect, json.dumpsJSON(t, default=repr)) + self.assertEqual(expect, json.dumpsJSON(t, tuple_as_array=True, + default=repr)) + # Ensure that the "default" gets called + self.assertEqual( + json.dumpsJSON(repr(t)), + json.dumpsJSON(t, tuple_as_array=False, default=repr)) + + def test_tuple_array_dump(self): + t = (1, 2, 3) + expect = json.dumpsJSON(list(t)) + # Default is True + sio = StringIO() + json.dumpJSON(t, sio) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=True) + self.assertEqual(expect, sio.getvalue()) + self.assertRaises(TypeError, json.dumpJSON, t, StringIO(), + tuple_as_array=False) + # Ensure that the "default" does not get called + sio = StringIO() + json.dumpJSON(t, sio, default=repr) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=True, default=repr) + self.assertEqual(expect, sio.getvalue()) + # Ensure that the "default" gets called + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=False, default=repr) + self.assertEqual( + json.dumpsJSON(repr(t)), + sio.getvalue()) + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dump(self): + pass diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_unicode.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..d634fe40671742aa16074ce3704c56c504125237 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tests/test_unicode.py @@ -0,0 +1,153 @@ +import sys +import codecs +from unittest import TestCase + +import hjson as json +from hjson.compat import unichr, text_type, b, u, BytesIO + +class TestUnicode(TestCase): + def test_encoding1(self): + encoder = json.JSONEncoder(encoding='utf-8') + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = encoder.encode(u) + js = encoder.encode(s) + self.assertEqual(ju, js) + + def test_encoding2(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = json.dumpsJSON(u, encoding='utf-8') + js = json.dumpsJSON(s, encoding='utf-8') + self.assertEqual(ju, js) + + def test_encoding3(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON(u) + self.assertEqual(j, '"\\u03b1\\u03a9"') + + def test_encoding4(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON([u]) + self.assertEqual(j, '["\\u03b1\\u03a9"]') + + def test_encoding5(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON(u, ensure_ascii=False) + self.assertEqual(j, u'"' + u + u'"') + + def test_encoding6(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON([u], ensure_ascii=False) + self.assertEqual(j, u'["' + u + u'"]') + + def test_big_unicode_encode(self): + u = u'\U0001d120' + self.assertEqual(json.dumpsJSON(u), '"\\ud834\\udd20"') + self.assertEqual(json.dumpsJSON(u, ensure_ascii=False), u'"\U0001d120"') + + def test_big_unicode_decode(self): + u = u'z\U0001d120x' + self.assertEqual(json.loads('"' + u + '"'), u) + self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) + + def test_unicode_decode(self): + for i in range(0, 0xd7ff): + u = unichr(i) + #s = '"\\u{0:04x}"'.format(i) + s = '"\\u%04x"' % (i,) + self.assertEqual(json.loads(s), u) + + def test_object_pairs_hook_with_unicode(self): + s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' + p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4), + (u"qrt", 5), (u"pad", 6), (u"hoy", 7)] + self.assertEqual(json.loads(s), eval(s)) + self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) + od = json.loads(s, object_pairs_hook=json.OrderedDict) + self.assertEqual(od, json.OrderedDict(p)) + self.assertEqual(type(od), json.OrderedDict) + # the object_pairs_hook takes priority over the object_hook + self.assertEqual(json.loads(s, + object_pairs_hook=json.OrderedDict, + object_hook=lambda x: None), + json.OrderedDict(p)) + + + def test_default_encoding(self): + self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + {'a': u'\xe9'}) + + def test_unicode_preservation(self): + self.assertEqual(type(json.loads(u'""')), text_type) + self.assertEqual(type(json.loads(u'"a"')), text_type) + self.assertEqual(type(json.loads(u'["a"]')[0]), text_type) + + def test_ensure_ascii_false_returns_unicode(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + self.assertEqual(type(json.dumpsJSON([], ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON(0, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON({}, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON("", ensure_ascii=False)), text_type) + + def test_ensure_ascii_false_bytestring_encoding(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + doc1 = {u'quux': b('Arr\xc3\xaat sur images')} + doc2 = {u'quux': u('Arr\xeat sur images')} + doc_ascii = '{"quux": "Arr\\u00eat sur images"}' + doc_unicode = u'{"quux": "Arr\xeat sur images"}' + self.assertEqual(json.dumpsJSON(doc1), doc_ascii) + self.assertEqual(json.dumpsJSON(doc2), doc_ascii) + self.assertEqual(json.dumpsJSON(doc1, ensure_ascii=False), doc_unicode) + self.assertEqual(json.dumpsJSON(doc2, ensure_ascii=False), doc_unicode) + + def test_ensure_ascii_linebreak_encoding(self): + # http://timelessrepo.com/json-isnt-a-javascript-subset + s1 = u'\u2029\u2028' + s2 = s1.encode('utf8') + expect = '"\\u2029\\u2028"' + self.assertEqual(json.dumpsJSON(s1), expect) + self.assertEqual(json.dumpsJSON(s2), expect) + self.assertEqual(json.dumpsJSON(s1, ensure_ascii=False), expect) + self.assertEqual(json.dumpsJSON(s2, ensure_ascii=False), expect) + + def test_invalid_escape_sequences(self): + # incomplete escape sequence + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u12') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u123') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1234') + # invalid escape sequence + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u123x"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u12x4"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1x34"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ux234"') + if sys.maxunicode > 65535: + # invalid escape sequence for low surrogate + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u0"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u00"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u000"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u000x"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u00x0"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u0x00"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\ux000"') + + def test_ensure_ascii_still_works(self): + # in the ascii range, ensure that everything is the same + for c in map(unichr, range(0, 127)): + self.assertEqual( + json.dumpsJSON(c, ensure_ascii=False), + json.dumpsJSON(c)) + snowman = u'\N{SNOWMAN}' + self.assertEqual( + json.dumpsJSON(c, ensure_ascii=False), + '"' + c + '"') + + def test_strip_bom(self): + content = u"\u3053\u3093\u306b\u3061\u308f" + json_doc = codecs.BOM_UTF8 + b(json.dumpsJSON(content)) + self.assertEqual(json.load(BytesIO(json_doc)), content) + for doc in json_doc, json_doc.decode('utf8'): + self.assertEqual(json.loads(doc), content) diff --git a/evalkit_tf446/lib/python3.10/site-packages/hjson/tool.py b/evalkit_tf446/lib/python3.10/site-packages/hjson/tool.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb273cd871fec8aed213d8dd693af3dfa487096 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/hjson/tool.py @@ -0,0 +1,79 @@ +r"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | hjson + { + "json": "obj" + } + +""" +from __future__ import with_statement +import sys +import hjson + +HELP="""Hjson, a user interface for JSON + +Usage: + hjson [options] + hjson [options] + hjson (-h | --help) + hjson (-V | --version) + +Options: + -h --help Show this screen. + -j Output as formatted JSON. + -c Output as JSON. + -V --version Show version. +"""; + +def showerr(msg): + sys.stderr.write(msg) + sys.stderr.write("\n") + +def main(): + format = 'hjson' + args = [] + for arg in sys.argv[1:]: + if arg == '-h' or arg == '--help': + showerr(HELP) + return + elif arg == '-j': format = 'json' + elif arg == '-c': format = 'compact' + elif arg == '-V' or arg == '--version': + showerr('Hjson ' + hjson.__version__) + return + + elif arg[0] == '-': + showerr(HELP) + raise SystemExit('unknown option ' + arg) + else: + args.append(arg) + + outfile = sys.stdout + if len(args) == 0: + infile = sys.stdin + elif len(args) == 1: + infile = open(args[0], 'r') + else: + showerr(HELP) + raise SystemExit('unknown options') + + with infile: + try: + obj = hjson.load(infile, use_decimal=True) + except ValueError: + raise SystemExit(sys.exc_info()[1]) + + with outfile: + if format == 'json': + hjson.dumpJSON(obj, outfile, ensure_ascii=False, use_decimal=True, indent=' ') + elif format == 'compact': + hjson.dumpJSON(obj, outfile, ensure_ascii=False, use_decimal=True, separators=(',', ':')) + else: + hjson.dump(obj, outfile, ensure_ascii=False, use_decimal=True) + + outfile.write('\n') + +if __name__ == '__main__': + main() diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..911a2cbd83467612ad58de157a8647c47a85f242 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__init__.py @@ -0,0 +1,1087 @@ +# This file was automatically generated by SWIG (http://www.swig.org). +# Version 4.0.2 +# +# Do not make changes to this file unless you know what you are doing--modify +# the SWIG interface file instead. + +from sys import version_info as _swig_python_version_info +if _swig_python_version_info < (2, 7, 0): + raise RuntimeError("Python 2.7 or later required") + +# Import the low-level C/C++ module +if __package__ or "." in __name__: + from . import _sentencepiece +else: + import _sentencepiece + +try: + import builtins as __builtin__ +except ImportError: + import __builtin__ + +def _swig_repr(self): + try: + strthis = "proxy of " + self.this.__repr__() + except __builtin__.Exception: + strthis = "" + return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) + + +def _swig_setattr_nondynamic_instance_variable(set): + def set_instance_attr(self, name, value): + if name == "thisown": + self.this.own(value) + elif name == "this": + set(self, name, value) + elif hasattr(self, name) and isinstance(getattr(type(self), name), property): + set(self, name, value) + else: + raise AttributeError("You cannot add instance attributes to %s" % self) + return set_instance_attr + + +def _swig_setattr_nondynamic_class_variable(set): + def set_class_attr(cls, name, value): + if hasattr(cls, name) and not isinstance(getattr(cls, name), property): + set(cls, name, value) + else: + raise AttributeError("You cannot add class attributes to %s" % cls) + return set_class_attr + + +def _swig_add_metaclass(metaclass): + """Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass""" + def wrapper(cls): + return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) + return wrapper + + +class _SwigNonDynamicMeta(type): + """Meta class to enforce nondynamic attributes (no new attributes) for a class""" + __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) + + +class ImmutableSentencePieceText_ImmutableSentencePiece(object): + thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") + __repr__ = _swig_repr + + def __init__(self): + _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece_swiginit(self, _sentencepiece.new_ImmutableSentencePieceText_ImmutableSentencePiece()) + __swig_destroy__ = _sentencepiece.delete_ImmutableSentencePieceText_ImmutableSentencePiece + + def _piece(self): + return _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece__piece(self) + + def _surface(self): + return _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece__surface(self) + + def _id(self): + return _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece__id(self) + + def _begin(self): + return _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece__begin(self) + + def _end(self): + return _sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece__end(self) + + piece = property(_piece) + surface = property(_surface) + id = property(_id) + begin = property(_begin) + end = property(_end) + + def __str__(self): + return ('piece: \"{}\"\n' + 'id: {}\n' + 'surface: \"{}\"\n' + 'begin: {}\n' + 'end: {}\n').format(self.piece, self.id, self.surface, + self.begin, self.end) + + def __eq__(self, other): + return self.piece == other.piece and self.id == other.id and self.surface == other.surface and self.begin == other.begin and self.end == other.end + + def __hash__(self): + return hash(str(self)) + + __repr__ = __str__ + + +# Register ImmutableSentencePieceText_ImmutableSentencePiece in _sentencepiece: +_sentencepiece.ImmutableSentencePieceText_ImmutableSentencePiece_swigregister(ImmutableSentencePieceText_ImmutableSentencePiece) + +class ImmutableSentencePieceText(object): + thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") + __repr__ = _swig_repr + + def __init__(self): + _sentencepiece.ImmutableSentencePieceText_swiginit(self, _sentencepiece.new_ImmutableSentencePieceText()) + __swig_destroy__ = _sentencepiece.delete_ImmutableSentencePieceText + + def _pieces_size(self): + return _sentencepiece.ImmutableSentencePieceText__pieces_size(self) + + def _pieces(self, index): + return _sentencepiece.ImmutableSentencePieceText__pieces(self, index) + + def _text(self): + return _sentencepiece.ImmutableSentencePieceText__text(self) + + def _score(self): + return _sentencepiece.ImmutableSentencePieceText__score(self) + + def SerializeAsString(self): + return _sentencepiece.ImmutableSentencePieceText_SerializeAsString(self) + + text = property(_text) + score = property(_score) + + class ImmutableSentencePieceIterator: + def __init__(self, proto): + self.proto = proto + self.len = self.proto._pieces_size() + + def __len__(self): + return self.len + + def __getitem__(self, index): + if isinstance(index, slice): + return [self.proto._pieces(i) for i in range(self.len)][index.start:index.stop:index.step] + if index < 0: + index = index + self.len + if index < 0 or index >= self.len: + raise IndexError('piece index is out of range') + return self.proto._pieces(index) + + def __str__(self): + return '\n'.join(['pieces {{\n{}}}'.format(str(x)) for x in self]) + + __repr__ = __str__ + + @property + def pieces(self): + return ImmutableSentencePieceText.ImmutableSentencePieceIterator(self) + + def __eq__(self, other): + return self.SerializeAsString() == other.SerializeAsString() + + def __hash__(self): + return hash(self.SerializeAsString()) + + def __str__(self): + return ('text: \"{}\"\n' + 'score: {}\n' + '{}').format(self.text, self.score, + '\n'.join(['pieces {{\n{}}}'.format(str(x)) for x in self.pieces])) + + __repr__ = __str__ + + +# Register ImmutableSentencePieceText in _sentencepiece: +_sentencepiece.ImmutableSentencePieceText_swigregister(ImmutableSentencePieceText) + +class ImmutableNBestSentencePieceText(object): + thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") + __repr__ = _swig_repr + + def __init__(self): + _sentencepiece.ImmutableNBestSentencePieceText_swiginit(self, _sentencepiece.new_ImmutableNBestSentencePieceText()) + __swig_destroy__ = _sentencepiece.delete_ImmutableNBestSentencePieceText + + def _nbests_size(self): + return _sentencepiece.ImmutableNBestSentencePieceText__nbests_size(self) + + def _nbests(self, index): + return _sentencepiece.ImmutableNBestSentencePieceText__nbests(self, index) + + def SerializeAsString(self): + return _sentencepiece.ImmutableNBestSentencePieceText_SerializeAsString(self) + + class ImmutableSentencePieceTextIterator: + def __init__(self, proto): + self.proto = proto + self.len = self.proto._nbests_size() + + def __len__(self): + return self.len + + def __getitem__(self, index): + if isinstance(index, slice): + return [self.proto._nbests(i) for i in range(self.len)][index.start:index.stop:index.step] + if index < 0: + index = index + self.len + if index < 0 or index >= self.len: + raise IndexError('nbests index is out of range') + return self.proto._nbests(index) + + def __str__(self): + return '\n'.join(['nbests {{\n{}}}'.format(str(x)) for x in self]) + + __repr__ = __str__ + + @property + def nbests(self): + return ImmutableNBestSentencePieceText.ImmutableSentencePieceTextIterator(self) + + def __eq__(self, other): + return self.SerializeAsString() == other.SerializeAsString() + + def __hash__(self): + return hash(self.SerializeAsString()) + + def __str__(self): + return '\n'.join(['nbests {{\n{}}}'.format(str(x)) for x in self.nbests]) + + __repr__ = __str__ + + +# Register ImmutableNBestSentencePieceText in _sentencepiece: +_sentencepiece.ImmutableNBestSentencePieceText_swigregister(ImmutableNBestSentencePieceText) + +class SentencePieceProcessor(object): + thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") + __repr__ = _swig_repr + + def __init__(self): + _sentencepiece.SentencePieceProcessor_swiginit(self, _sentencepiece.new_SentencePieceProcessor()) + __swig_destroy__ = _sentencepiece.delete_SentencePieceProcessor + + def LoadFromSerializedProto(self, serialized): + return _sentencepiece.SentencePieceProcessor_LoadFromSerializedProto(self, serialized) + + def SetEncodeExtraOptions(self, extra_option): + return _sentencepiece.SentencePieceProcessor_SetEncodeExtraOptions(self, extra_option) + + def SetDecodeExtraOptions(self, extra_option): + return _sentencepiece.SentencePieceProcessor_SetDecodeExtraOptions(self, extra_option) + + def SetVocabulary(self, valid_vocab): + return _sentencepiece.SentencePieceProcessor_SetVocabulary(self, valid_vocab) + + def ResetVocabulary(self): + return _sentencepiece.SentencePieceProcessor_ResetVocabulary(self) + + def LoadVocabulary(self, filename, threshold): + return _sentencepiece.SentencePieceProcessor_LoadVocabulary(self, filename, threshold) + + def CalculateEntropy(self, *args): + return _sentencepiece.SentencePieceProcessor_CalculateEntropy(self, *args) + + def GetPieceSize(self): + return _sentencepiece.SentencePieceProcessor_GetPieceSize(self) + + def PieceToId(self, piece): + return _sentencepiece.SentencePieceProcessor_PieceToId(self, piece) + + def IdToPiece(self, id): + return _sentencepiece.SentencePieceProcessor_IdToPiece(self, id) + + def GetScore(self, id): + return _sentencepiece.SentencePieceProcessor_GetScore(self, id) + + def IsUnknown(self, id): + return _sentencepiece.SentencePieceProcessor_IsUnknown(self, id) + + def IsControl(self, id): + return _sentencepiece.SentencePieceProcessor_IsControl(self, id) + + def IsUnused(self, id): + return _sentencepiece.SentencePieceProcessor_IsUnused(self, id) + + def IsByte(self, id): + return _sentencepiece.SentencePieceProcessor_IsByte(self, id) + + def unk_id(self): + return _sentencepiece.SentencePieceProcessor_unk_id(self) + + def bos_id(self): + return _sentencepiece.SentencePieceProcessor_bos_id(self) + + def eos_id(self): + return _sentencepiece.SentencePieceProcessor_eos_id(self) + + def pad_id(self): + return _sentencepiece.SentencePieceProcessor_pad_id(self) + + def serialized_model_proto(self): + return _sentencepiece.SentencePieceProcessor_serialized_model_proto(self) + + def LoadFromFile(self, arg): + return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg) + + def _EncodeAsIds(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsIds(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsPieces(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsPieces(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsSerializedProto(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsSerializedProto(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsImmutableProto(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsImmutableProto(self, text, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsIdsBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsIdsBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsPiecesBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsPiecesBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsSerializedProtoBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsSerializedProtoBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _EncodeAsImmutableProtoBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__EncodeAsImmutableProtoBatch(self, ins, num_threads, enable_sampling, nbest_size, alpha, add_bos, add_eos, reverse, emit_unk_piece) + + def _DecodeIds(self, ids): + return _sentencepiece.SentencePieceProcessor__DecodeIds(self, ids) + + def _DecodePieces(self, pieces): + return _sentencepiece.SentencePieceProcessor__DecodePieces(self, pieces) + + def _DecodeIdsAsSerializedProto(self, ids): + return _sentencepiece.SentencePieceProcessor__DecodeIdsAsSerializedProto(self, ids) + + def _DecodePiecesAsSerializedProto(self, pieces): + return _sentencepiece.SentencePieceProcessor__DecodePiecesAsSerializedProto(self, pieces) + + def _DecodeIdsAsImmutableProto(self, ids): + return _sentencepiece.SentencePieceProcessor__DecodeIdsAsImmutableProto(self, ids) + + def _DecodePiecesAsImmutableProto(self, pieces): + return _sentencepiece.SentencePieceProcessor__DecodePiecesAsImmutableProto(self, pieces) + + def _DecodeIdsBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodeIdsBatch(self, ins, num_threads) + + def _DecodeIdsAsSerializedProtoBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodeIdsAsSerializedProtoBatch(self, ins, num_threads) + + def _DecodeIdsAsImmutableProtoBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodeIdsAsImmutableProtoBatch(self, ins, num_threads) + + def _DecodePiecesBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodePiecesBatch(self, ins, num_threads) + + def _DecodePiecesAsSerializedProtoBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodePiecesAsSerializedProtoBatch(self, ins, num_threads) + + def _DecodePiecesAsImmutableProtoBatch(self, ins, num_threads): + return _sentencepiece.SentencePieceProcessor__DecodePiecesAsImmutableProtoBatch(self, ins, num_threads) + + def _NBestEncodeAsIds(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__NBestEncodeAsIds(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece) + + def _NBestEncodeAsPieces(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__NBestEncodeAsPieces(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece) + + def _NBestEncodeAsSerializedProto(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__NBestEncodeAsSerializedProto(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece) + + def _NBestEncodeAsImmutableProto(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__NBestEncodeAsImmutableProto(self, text, nbest_size, add_bos, add_eos, reverse, emit_unk_piece) + + def _SampleEncodeAndScoreAsIds(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__SampleEncodeAndScoreAsIds(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece) + + def _SampleEncodeAndScoreAsPieces(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__SampleEncodeAndScoreAsPieces(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece) + + def _SampleEncodeAndScoreAsSerializedProto(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__SampleEncodeAndScoreAsSerializedProto(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece) + + def _SampleEncodeAndScoreAsImmutableProto(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece): + return _sentencepiece.SentencePieceProcessor__SampleEncodeAndScoreAsImmutableProto(self, text, num_samples, alpha, wor, include_best, add_bos, add_eos, reverse, emit_unk_piece) + + def _CalculateEntropy(self, text, alpha): + return _sentencepiece.SentencePieceProcessor__CalculateEntropy(self, text, alpha) + + def _CalculateEntropyBatch(self, ins, alpha, num_threads): + return _sentencepiece.SentencePieceProcessor__CalculateEntropyBatch(self, ins, alpha, num_threads) + + def Init(self, + model_file=None, + model_proto=None, + out_type=int, + add_bos=False, + add_eos=False, + reverse=False, + emit_unk_piece=False, + enable_sampling=False, + nbest_size=-1, + alpha=0.1, + num_threads=-1): + """Initialzie sentencepieceProcessor. + + Args: + model_file: The sentencepiece model file path. + model_proto: The sentencepiece model serialized proto. + out_type: output type. int or str. + add_bos: Add to the result (Default = false) + add_eos: Add to the result (Default = false) / is added after + reversing (if enabled). + reverse: Reverses the tokenized sequence (Default = false) + emit_unk_piece: Emits the unk literal string (Default = false) + nbest_size: sampling parameters for unigram. Invalid in BPE-Dropout. + nbest_size = {0,1}: No sampling is performed. + nbest_size > 1: samples from the nbest_size results. + nbest_size < 0: assuming that nbest_size is infinite and samples + from the all hypothesis (lattice) using + forward-filtering-and-backward-sampling algorithm. + alpha: Soothing parameter for unigram sampling, and dropout probability of + merge operations for BPE-dropout. + num_threads: number of threads in batch processing (Default = -1, auto-detected) + """ + + _sentencepiece_processor_init_native(self) + self._out_type = out_type + self._add_bos = add_bos + self._add_eos = add_eos + self._reverse = reverse + self._emit_unk_piece = emit_unk_piece + self._enable_sampling = enable_sampling + self._nbest_size = nbest_size + self._alpha = alpha + self._num_threads = num_threads + if model_file or model_proto: + self.Load(model_file=model_file, model_proto=model_proto) + + + def Encode(self, + input, + out_type=None, + add_bos=None, + add_eos=None, + reverse=None, + emit_unk_piece=None, + enable_sampling=None, + nbest_size=None, + alpha=None, + num_threads=None): + """Encode text input to segmented ids or tokens. + + Args: + input: input string. accepsts list of string. + out_type: output type. int or str. + add_bos: Add to the result (Default = false) + add_eos: Add to the result (Default = false) / is added after + reversing (if enabled). + reverse: Reverses the tokenized sequence (Default = false) + emit_unk_piece: Emits the unk literal string (Default = false) + nbest_size: sampling parameters for unigram. Invalid in BPE-Dropout. + nbest_size = {0,1}: No sampling is performed. + nbest_size > 1: samples from the nbest_size results. + nbest_size < 0: assuming that nbest_size is infinite and samples + from the all hypothesis (lattice) using + forward-filtering-and-backward-sampling algorithm. + alpha: Soothing parameter for unigram sampling, and merge probability for + BPE-dropout (probablity 'p' in BPE-dropout paper). + num_threads: the number of threads used in the batch processing (Default = -1). + """ + + if out_type is None: + out_type = self._out_type + if add_bos is None: + add_bos = self._add_bos + if add_eos is None: + add_eos = self._add_eos + if reverse is None: + reverse = self._reverse + if emit_unk_piece is None: + emit_unk_piece = self._emit_unk_piece + if enable_sampling is None: + enable_sampling = self._enable_sampling + if nbest_size is None: + nbest_size = self._nbest_size + if alpha is None: + alpha = self._alpha + if num_threads is None: + num_threads = self._num_threads + + if enable_sampling == True and (nbest_size is None or nbest_size == 0 or + nbest_size == 1 or alpha is None): + raise RuntimeError( + 'When enable_sampling is True, We must specify "nbest_size > 1" or "nbest_size = -1", ' + 'and "alpha". "nbest_size" is enabled only on unigram mode ignored in BPE-dropout. ' + 'when "nbest_size = -1" , this method samples from all candidates on the lattice ' + 'instead of nbest segmentations.' + ) + + if num_threads is None or type(num_threads) is not int: + raise RuntimeError('num_threads must be int') + + if type(input) is list: + if out_type is int: + return self._EncodeAsIdsBatch(input, num_threads, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type is str: + return self._EncodeAsPiecesBatch(input, num_threads, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'serialized_proto' or out_type == 'proto': + return self._EncodeAsSerializedProtoBatch(input, num_threads, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'immutable_proto': + return self._EncodeAsImmutableProtoBatch(input, num_threads, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + + if out_type is int: + return self._EncodeAsIds(input, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type is str: + return self._EncodeAsPieces(input, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'serialized_proto' or out_type == 'proto': + return self._EncodeAsSerializedProto(input, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'immutable_proto': + return self._EncodeAsImmutableProto(input, enable_sampling, nbest_size, + alpha, add_bos, add_eos, reverse, emit_unk_piece) + + raise RuntimeError('unknown out_type={}'.format(out_type)) + return None + + + def EncodeAsPieces(self, input, **kwargs): + return self.Encode(input=input, out_type=str, **kwargs) + + + def EncodeAsIds(self, input, **kwargs): + return self.Encode(input=input, out_type=int, **kwargs) + + + def EncodeAsSerializedProto(self, input, **kwargs): + return self.Encode(input=input, out_type='serialized_proto', **kwargs) + + + def EncodeAsImmutableProto(self, input, **kwargs): + return self.Encode(input=input, out_type='immutable_proto', **kwargs) + + + def SampleEncodeAsPieces(self, input, nbest_size=None, alpha=None, **kwargs): + return self.Encode(input=input, nbest_size=nbest_size, alpha=alpha, + out_type=str, enable_sampling=True, **kwargs) + + + def SampleEncodeAsIds(self, input, nbest_size=None, alpha=None,**kwargs): + return self.Encode(input=input, nbest_size=nbest_size, alpha=alpha, + out_type=int, enable_sampling=True, **kwargs) + + + def SampleEncodeAsSerializedProto(self, input, nbest_size=None, alpha=None, **kwargs): + return self.Encode(input=input, nbest_size=nbest_size, alpha=alpha, + out_type='serialized_proto', enable_sampling=True, **kwargs) + + + def SampleEncodeAsImmutableProto(self, input, nbest_size=None, alpha=None, **kwargs): + return self.Encode(input=input, nbest_size=nbest_size, alpha=alpha, + out_type='immutable_proto', enable_sampling=True, **kwargs) + + + def NBestEncode(self, + input, + out_type=None, + add_bos=None, + add_eos=None, + reverse=None, + emit_unk_piece=None, + nbest_size=None): + """NBestEncode text input to segmented ids or tokens. + + Args: + input: input string. accepsts list of string. + out_type: output type. int or str. + add_bos: Add to the result (Default = false) + add_eos: Add to the result (Default = false) / is added after reversing (if enabled). + reverse: Reverses the tokenized sequence (Default = false) + emit_unk_piece: Emits the unk literal string (Default = false) + nbest_size: nbest size + """ + + if out_type is None: + out_type = self._out_type + if add_bos is None: + add_bos = self._add_bos + if add_eos is None: + add_eos = self._add_eos + if reverse is None: + reverse = self._reverse + if emit_unk_piece is None: + emit_unk_piece = self._emit_unk_piece + if nbest_size is None: + nbest_size = self._nbest_size + + if nbest_size <= 0: + nbest_size=1 + + def _encode(text): + if out_type is int: + return self._NBestEncodeAsIds(text, nbest_size, + add_bos, add_eos, reverse, emit_unk_piece) + if out_type is str: + return self._NBestEncodeAsPieces(text, nbest_size, + add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'serialized_proto' or out_type == 'proto': + return self._NBestEncodeAsSerializedProto(text, nbest_size, + add_bos, add_eos, reverse, emit_unk_piece) + if out_type == 'immutable_proto': + return self._NBestEncodeAsImmutableProto(text, nbest_size, + add_bos, add_eos, reverse, emit_unk_piece) + + raise RuntimeError('unknown out_type') + + if type(input) is list: + return [_encode(n) for n in input] + + return _encode(input) + + + def NBestEncodeAsPieces(self, input, nbest_size=None, **kwargs): + return self.NBestEncode(input=input, nbest_size=nbest_size, + out_type=str, **kwargs) + + + def NBestEncodeAsIds(self, input, nbest_size=None, **kwargs): + return self.NBestEncode(input=input, nbest_size=nbest_size, + out_type=int, **kwargs) + + + def NBestEncodeAsSerializedProto(self, input, nbest_size=None, **kwargs): + return self.NBestEncode(input=input, nbest_size=nbest_size, + out_type='serialized_proto', **kwargs) + + + def NBestEncodeAsImmutableProto(self, input, nbest_size=None, **kwargs): + return self.NBestEncode(input=input, nbest_size=nbest_size, + out_type='immutable_proto', **kwargs) + + + def SampleEncodeAndScore(self, + input, + out_type=None, + add_bos=None, + add_eos=None, + reverse=None, + emit_unk_piece=None, + num_samples=None, + alpha=None, + wor=None, + include_best=None): + """SampleEncodeAndScore text input to segmented ids or tokens. + + Args: + input: input string. accepsts list of string. + out_type: output type. int or str or 'serialized_proto' or 'immutable_proto' + add_bos: Add to the result (Default = false) + add_eos: Add to the result (Default = false) / is added after reversing (if enabled). + reverse: Reverses the tokenized sequence (Default = false) + emit_unk_piece: Emits the unk literal string (Default = false) + num_samples: How many samples to return (Default = 1) + alpha: inverse temperature for sampling + wor: whether to sample without replacement (Default = false) + include_best: whether to include the best tokenization, requires wor=True (Default = false) + """ + + if out_type is None: + out_type = self._out_type + if add_bos is None: + add_bos = self._add_bos + if add_eos is None: + add_eos = self._add_eos + if reverse is None: + reverse = self._reverse + if emit_unk_piece is None: + emit_unk_piece = self._emit_unk_piece + if num_samples is None: + num_samples = 1 + if alpha is None: + alpha = 1. + if wor is None: + wor = False + if include_best is None: + include_best = False + + if num_samples <= 0: + raise RuntimeError('num_examples must be positive') + + if include_best and not wor: + raise RuntimeError('When include_best is True, We must specify "wor = True".') + + + def _encode(text): + if out_type is int: + return self._SampleEncodeAndScoreAsIds(text, num_samples, alpha, wor, include_best, + add_bos, add_eos, reverse, emit_unk_piece) + if out_type is str: + return self._SampleEncodeAndScoreAsPieces(text, num_samples, alpha, wor, include_best, + add_bos, add_eos, reverse, emit_unk_piece) + + if out_type == 'serialized_proto' or out_type == 'proto': + return self._SampleEncodeAndScoreAsSerializedProto(text, num_samples, alpha, wor, include_best, + add_bos, add_eos, reverse, emit_unk_piece) + + if out_type == 'immutable_proto': + return self._SampleEncodeAndScoreAsImmutableProto(text, num_samples, alpha, wor, include_best, + add_bos, add_eos, reverse, emit_unk_piece) + + raise RuntimeError('unknown output type') + + + if type(input) is list: + return [_encode(n) for n in input] + + return _encode(input) + + + def SampleEncodeAndScoreAsPieces(self, input, num_samples=None, alpha=None, **kwargs): + return self.SampleEncodeAndScore(input=input, num_samples=num_samples, alpha=alpha, + out_type=str, **kwargs) + + + def SampleEncodeAndScoreAsIds(self, input, num_samples=None, alpha=None, **kwargs): + return self.SampleEncodeAndScore(input=input, num_samples=num_samples, alpha=alpha, + out_type=int, **kwargs) + + + def SampleEncodeAndScoreAsSerializedProto(self, input, num_samples=None, alpha=None, **kwargs): + return self.SampleEncodeAndScore(input=input, num_samples=num_samples, alpha=alpha, + out_type='serialized_proto', **kwargs) + + + def SampleEncodeAndScoreAsImmutableProto(self, input, num_samples=None, alpha=None, **kwargs): + return self.SampleEncodeAndScore(input=input, num_samples=num_samples, alpha=alpha, + out_type='immutable_proto', **kwargs) + + + def Decode(self, input, out_type=str, num_threads=None): + """Decode processed id or token sequences. + + Args: + out_type: output type. str or 'serialized_proto' or 'immutable_proto' (Default = str) + num_threads: the number of threads used in the batch processing (Default = -1). + """ + + if num_threads is None: + num_threads = self._num_threads + + if num_threads is None or type(num_threads) is not int: + raise RuntimeError('num_threads must be int') + + if not input: + return '' + + if out_type is str: + if type(input) is int: + return self._DecodeIds([input]) + if type(input) is str: + return self._DecodePieces([input]) + + if type(input) is list: + if len(input) == 0 or type(input[0]) is int: + return self._DecodeIds(input) + if type(input[0]) is str: + return self._DecodePieces(input) + + if type(input[0]) is list: + if len(input[0]) == 0 or type(input[0][0]) is int: + return self._DecodeIdsBatch(input, num_threads) + if type(input[0][0]) is str: + return self._DecodePiecesBatch(input, num_threads) + + if out_type == 'serialized_proto': + if type(input) is int: + return self._DecodeIdsAsSerializedProto([input]) + if type(input) is str: + return self._DecodePiecesAsSerializedProto([input]) + + if type(input) is list: + if len(input) == 0 or type(input[0]) is int: + return self._DecodeIdsAsSerializedProto(input) + if type(input[0]) is str: + return self._DecodePiecesAsSerializedProto(input) + + if type(input[0]) is list: + if len(input[0]) == 0 or type(input[0][0]) is int: + return self._DecodeIdsAsSerializedProtoBatch(input, num_threads) + if type(input[0][0]) is str: + return self._DecodePiecesAsSerializedProtoBatch(input, num_threads) + + + if out_type == 'immutable_proto': + if type(input) is int: + return self._DecodeIdsAsImmutableProto([input]) + if type(input) is str: + return self._DecodePiecesAsImmutableProto([input]) + + if type(input) is list: + if len(input) == 0 or type(input[0]) is int: + return self._DecodeIdsAsImmutableProto(input) + if type(input[0]) is str: + return self._DecodePiecesAsImmutableProto(input) + + if type(input[0]) is list: + if len(input[0]) == 0 or type(input[0][0]) is int: + return self._DecodeIdsAsImmutableProtoBatch(input, num_threads) + if type(input[0][0]) is str: + return self._DecodePiecesAsImmutableProtoBatch(input, num_threads) + + + raise RuntimeError('unknown output or input type') + return None + + + def DecodePieces(self, input, out_type=str, **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def DecodeIds(self, input, out_type=str, **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def DecodePiecesAsSerializedProto(self, input, out_type='serialized_proto', **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def DecodeIdsAsSerializedProto(self, input, out_type='serialized_proto', **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def DecodePiecesAsImmutableProto(self, input, out_type='immutable_proto', **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def DecodeIdsAsImmutableProto(self, input, out_type='immutable_proto', **kwargs): + return self.Decode(input=input, out_type=out_type, **kwargs) + + + def CalculateEntropy(self, input, alpha, num_threads=None): + """Calculate sentence entropy""" + if type(input) is list: + if num_threads is None: + num_threads = self._num_threads + if num_threads is None or type(num_threads) is not int: + raise RuntimeError('num_threads must be int') + return self._CalculateEntropyBatch(input, alpha, num_threads) + + return self._CalculateEntropy(input, alpha) + + + def piece_size(self): + return self.GetPieceSize() + + + def vocab_size(self): + return self.GetPieceSize() + + + def __getstate__(self): + return self.serialized_model_proto() + + + def __setstate__(self, serialized_model_proto): + self.__init__() + self.LoadFromSerializedProto(serialized_model_proto) + + + def __len__(self): + return self.GetPieceSize() + + + def __getitem__(self, piece): + return self.PieceToId(piece) + + + def Load(self, model_file=None, model_proto=None): + """Overwride SentencePieceProcessor.Load to support both model_file and model_proto. + + Args: + model_file: The sentencepiece model file path. + model_proto: The sentencepiece model serialized proto. Either `model_file` + or `model_proto` must be set. + """ + if model_file and model_proto: + raise RuntimeError('model_file and model_proto must be exclusive.') + if model_proto: + return self.LoadFromSerializedProto(model_proto) + return self.LoadFromFile(model_file) + + +# Register SentencePieceProcessor in _sentencepiece: +_sentencepiece.SentencePieceProcessor_swigregister(SentencePieceProcessor) + + +def SetRandomGeneratorSeed(seed): + return _sentencepiece.SetRandomGeneratorSeed(seed) +class SentencePieceTrainer(object): + thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") + + def __init__(self, *args, **kwargs): + raise AttributeError("No constructor defined") + __repr__ = _swig_repr + + @staticmethod + def _TrainFromString(arg): + return _sentencepiece.SentencePieceTrainer__TrainFromString(arg) + + @staticmethod + def _TrainFromMap(args): + return _sentencepiece.SentencePieceTrainer__TrainFromMap(args) + + @staticmethod + def _TrainFromMap2(args, iter): + return _sentencepiece.SentencePieceTrainer__TrainFromMap2(args, iter) + + @staticmethod + def _TrainFromMap3(args): + return _sentencepiece.SentencePieceTrainer__TrainFromMap3(args) + + @staticmethod + def _TrainFromMap4(args, iter): + return _sentencepiece.SentencePieceTrainer__TrainFromMap4(args, iter) + + @staticmethod + def _Train(arg=None, **kwargs): + """Train Sentencepiece model. Accept both kwargs and legacy string arg.""" + if arg is not None and type(arg) is str: + return SentencePieceTrainer._TrainFromString(arg) + + def _encode(value): + """Encode value to CSV..""" + if type(value) is list: + if sys.version_info[0] == 3: + f = StringIO() + else: + f = BytesIO() + writer = csv.writer(f, lineterminator='') + writer.writerow([str(v) for v in value]) + return f.getvalue() + else: + return str(value) + + sentence_iterator = None + model_writer = None + new_kwargs = {} + for key, value in kwargs.items(): + if key in ['sentence_iterator', 'sentence_reader']: + sentence_iterator = value + elif key in ['model_writer']: + model_writer = value + else: + new_kwargs[key] = _encode(value) + + if model_writer: + if sentence_iterator: + model_proto = SentencePieceTrainer._TrainFromMap4(new_kwargs, + sentence_iterator) + else: + model_proto = SentencePieceTrainer._TrainFromMap3(new_kwargs) + model_writer.write(model_proto) + else: + if sentence_iterator: + return SentencePieceTrainer._TrainFromMap2(new_kwargs, sentence_iterator) + else: + return SentencePieceTrainer._TrainFromMap(new_kwargs) + + return None + + @staticmethod + def Train(arg=None, logstream=None, **kwargs): + with _LogStream(ostream=logstream): + SentencePieceTrainer._Train(arg=arg, **kwargs) + + +# Register SentencePieceTrainer in _sentencepiece: +_sentencepiece.SentencePieceTrainer_swigregister(SentencePieceTrainer) + +def SentencePieceTrainer__TrainFromString(arg): + return _sentencepiece.SentencePieceTrainer__TrainFromString(arg) + +def SentencePieceTrainer__TrainFromMap(args): + return _sentencepiece.SentencePieceTrainer__TrainFromMap(args) + +def SentencePieceTrainer__TrainFromMap2(args, iter): + return _sentencepiece.SentencePieceTrainer__TrainFromMap2(args, iter) + +def SentencePieceTrainer__TrainFromMap3(args): + return _sentencepiece.SentencePieceTrainer__TrainFromMap3(args) + +def SentencePieceTrainer__TrainFromMap4(args, iter): + return _sentencepiece.SentencePieceTrainer__TrainFromMap4(args, iter) + + + +import re +import csv +import sys +import os +from io import StringIO +from io import BytesIO + + +def _add_snake_case(classname): + """Added snake_cased method from CammelCased method.""" + + snake_map = {} + for k, v in classname.__dict__.items(): + if re.match(r'^[A-Z]+', k): + snake = re.sub(r'(?= v.piece_size()): + raise IndexError('piece id is out of range.') + return func(v, n) + + def _batched_func(self, arg): + if type(arg) is list: + return [_func(self, n) for n in arg] + else: + return _func(self, arg) + + setattr(classname, name, _batched_func) + + +_sentencepiece_processor_init_native = SentencePieceProcessor.__init__ +setattr(SentencePieceProcessor, '__init__', SentencePieceProcessor.Init) + +SentencePieceProcessor.Tokenize = SentencePieceProcessor.Encode +SentencePieceProcessor.Detokenize = SentencePieceProcessor.Decode + +for m in [ + 'PieceToId', 'IdToPiece', 'GetScore', 'IsUnknown', 'IsControl', 'IsUnused', + 'IsByte' +]: + _batchnize(SentencePieceProcessor, m) + +_add_snake_case(SentencePieceProcessor) +_add_snake_case(SentencePieceTrainer) +set_random_generator_seed = SetRandomGeneratorSeed + +from ._version import __version__ + +class _LogStream(object): + def __init__(self, ostream=None): + self.ostream = ostream + if self.ostream is not None: + self.orig_stream_fileno = sys.stderr.fileno() + + def __enter__(self): + if self.ostream is not None: + self.orig_stream_dup = os.dup(self.orig_stream_fileno) + os.dup2(self.ostream.fileno(), self.orig_stream_fileno) + + def __exit__(self, type, value, traceback): + if self.ostream is not None: + os.close(self.orig_stream_fileno) + os.dup2(self.orig_stream_dup, self.orig_stream_fileno) + os.close(self.orig_stream_dup) + self.ostream.close() + + + diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/__init__.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e252f637fd26c7711cf05a6b4de2aa5b8289dd2 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/_version.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa739d1e0c62ff675de2ee140edc817c821e451 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/_version.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_model_pb2.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_model_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3cbe1e169b45112c309358281ed00c191f0deb Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_model_pb2.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_pb2.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83b7ccd3551660e713222f11d1de521f3f7b9e2f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_pb2.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_sentencepiece.cpython-310-x86_64-linux-gnu.so b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_sentencepiece.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4793f899dfecee89eff9753c4664272a8cc0e942 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_sentencepiece.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d7039276b7c130c463512d4efcdf33197d1003225b843fac29696568c163b2d +size 2942720 diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_version.py b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..bd5418d59dea06eadc29ef566e59a92be8aa222e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/_version.py @@ -0,0 +1 @@ +__version__ = '0.1.99' diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_model_pb2.py b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_model_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..3b824d4a6ee56e935f347a0f04a204ec10172142 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_model_pb2.py @@ -0,0 +1,757 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentencepiece_model.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='sentencepiece_model.proto', + package='sentencepiece', + syntax='proto2', + serialized_options=b'H\003', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05\x12\x16\n\tbos_piece\x18. \x01(\t:\x03\x12\x17\n\teos_piece\x18/ \x01(\t:\x04\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' +) + + + +_TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor( + name='ModelType', + full_name='sentencepiece.TrainerSpec.ModelType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='UNIGRAM', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='BPE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='WORD', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CHAR', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=1517, + serialized_end=1570, +) +_sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE) + +_MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='sentencepiece.ModelProto.SentencePiece.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NORMAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CONTROL', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='USER_DEFINED', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='BYTE', index=4, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='UNUSED', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=2323, + serialized_end=2407, +) +_sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE) + + +_TRAINERSPEC = _descriptor.Descriptor( + name='TrainerSpec', + full_name='sentencepiece.TrainerSpec', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='sentencepiece.TrainerSpec.input', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_format', full_name='sentencepiece.TrainerSpec.input_format', index=1, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model_prefix', full_name='sentencepiece.TrainerSpec.model_prefix', index=2, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model_type', full_name='sentencepiece.TrainerSpec.model_type', index=3, + number=3, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='vocab_size', full_name='sentencepiece.TrainerSpec.vocab_size', index=4, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=8000, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='accept_language', full_name='sentencepiece.TrainerSpec.accept_language', index=5, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='self_test_sample_size', full_name='sentencepiece.TrainerSpec.self_test_sample_size', index=6, + number=6, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enable_differential_privacy', full_name='sentencepiece.TrainerSpec.enable_differential_privacy', index=7, + number=50, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='differential_privacy_noise_level', full_name='sentencepiece.TrainerSpec.differential_privacy_noise_level', index=8, + number=51, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='differential_privacy_clipping_threshold', full_name='sentencepiece.TrainerSpec.differential_privacy_clipping_threshold', index=9, + number=52, type=4, cpp_type=4, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='character_coverage', full_name='sentencepiece.TrainerSpec.character_coverage', index=10, + number=10, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.9995), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_sentence_size', full_name='sentencepiece.TrainerSpec.input_sentence_size', index=11, + number=11, type=4, cpp_type=4, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='shuffle_input_sentence', full_name='sentencepiece.TrainerSpec.shuffle_input_sentence', index=12, + number=19, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mining_sentence_size', full_name='sentencepiece.TrainerSpec.mining_sentence_size', index=13, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='training_sentence_size', full_name='sentencepiece.TrainerSpec.training_sentence_size', index=14, + number=13, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='seed_sentencepiece_size', full_name='sentencepiece.TrainerSpec.seed_sentencepiece_size', index=15, + number=14, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1000000, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='shrinking_factor', full_name='sentencepiece.TrainerSpec.shrinking_factor', index=16, + number=15, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.75), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='max_sentence_length', full_name='sentencepiece.TrainerSpec.max_sentence_length', index=17, + number=18, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=4192, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='num_threads', full_name='sentencepiece.TrainerSpec.num_threads', index=18, + number=16, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=16, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='num_sub_iterations', full_name='sentencepiece.TrainerSpec.num_sub_iterations', index=19, + number=17, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=2, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='max_sentencepiece_length', full_name='sentencepiece.TrainerSpec.max_sentencepiece_length', index=20, + number=20, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=16, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='split_by_unicode_script', full_name='sentencepiece.TrainerSpec.split_by_unicode_script', index=21, + number=21, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='split_by_number', full_name='sentencepiece.TrainerSpec.split_by_number', index=22, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='split_by_whitespace', full_name='sentencepiece.TrainerSpec.split_by_whitespace', index=23, + number=22, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='treat_whitespace_as_suffix', full_name='sentencepiece.TrainerSpec.treat_whitespace_as_suffix', index=24, + number=24, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='allow_whitespace_only_pieces', full_name='sentencepiece.TrainerSpec.allow_whitespace_only_pieces', index=25, + number=26, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='split_digits', full_name='sentencepiece.TrainerSpec.split_digits', index=26, + number=25, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pretokenization_delimiter', full_name='sentencepiece.TrainerSpec.pretokenization_delimiter', index=27, + number=53, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='control_symbols', full_name='sentencepiece.TrainerSpec.control_symbols', index=28, + number=30, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='user_defined_symbols', full_name='sentencepiece.TrainerSpec.user_defined_symbols', index=29, + number=31, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='required_chars', full_name='sentencepiece.TrainerSpec.required_chars', index=30, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='byte_fallback', full_name='sentencepiece.TrainerSpec.byte_fallback', index=31, + number=35, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='vocabulary_output_piece_score', full_name='sentencepiece.TrainerSpec.vocabulary_output_piece_score', index=32, + number=32, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='hard_vocab_limit', full_name='sentencepiece.TrainerSpec.hard_vocab_limit', index=33, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='use_all_vocab', full_name='sentencepiece.TrainerSpec.use_all_vocab', index=34, + number=34, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unk_id', full_name='sentencepiece.TrainerSpec.unk_id', index=35, + number=40, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='bos_id', full_name='sentencepiece.TrainerSpec.bos_id', index=36, + number=41, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eos_id', full_name='sentencepiece.TrainerSpec.eos_id', index=37, + number=42, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=2, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pad_id', full_name='sentencepiece.TrainerSpec.pad_id', index=38, + number=43, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=-1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unk_piece', full_name='sentencepiece.TrainerSpec.unk_piece', index=39, + number=45, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='bos_piece', full_name='sentencepiece.TrainerSpec.bos_piece', index=40, + number=46, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eos_piece', full_name='sentencepiece.TrainerSpec.eos_piece', index=41, + number=47, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pad_piece', full_name='sentencepiece.TrainerSpec.pad_piece', index=42, + number=48, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unk_surface', full_name='sentencepiece.TrainerSpec.unk_surface', index=43, + number=44, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=b" \342\201\207 ".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='train_extremely_large_corpus', full_name='sentencepiece.TrainerSpec.train_extremely_large_corpus', index=44, + number=49, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _TRAINERSPEC_MODELTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=45, + serialized_end=1581, +) + + +_NORMALIZERSPEC = _descriptor.Descriptor( + name='NormalizerSpec', + full_name='sentencepiece.NormalizerSpec', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='sentencepiece.NormalizerSpec.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='precompiled_charsmap', full_name='sentencepiece.NormalizerSpec.precompiled_charsmap', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='add_dummy_prefix', full_name='sentencepiece.NormalizerSpec.add_dummy_prefix', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='remove_extra_whitespaces', full_name='sentencepiece.NormalizerSpec.remove_extra_whitespaces', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='escape_whitespaces', full_name='sentencepiece.NormalizerSpec.escape_whitespaces', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='normalization_rule_tsv', full_name='sentencepiece.NormalizerSpec.normalization_rule_tsv', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=1584, + serialized_end=1793, +) + + +_SELFTESTDATA_SAMPLE = _descriptor.Descriptor( + name='Sample', + full_name='sentencepiece.SelfTestData.Sample', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='sentencepiece.SelfTestData.Sample.input', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='expected', full_name='sentencepiece.SelfTestData.Sample.expected', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1864, + serialized_end=1905, +) + +_SELFTESTDATA = _descriptor.Descriptor( + name='SelfTestData', + full_name='sentencepiece.SelfTestData', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='samples', full_name='sentencepiece.SelfTestData.samples', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SELFTESTDATA_SAMPLE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=1795, + serialized_end=1916, +) + + +_MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor( + name='SentencePiece', + full_name='sentencepiece.ModelProto.SentencePiece', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='piece', full_name='sentencepiece.ModelProto.SentencePiece.piece', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='score', full_name='sentencepiece.ModelProto.SentencePiece.score', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='sentencepiece.ModelProto.SentencePiece.type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _MODELPROTO_SENTENCEPIECE_TYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=2208, + serialized_end=2418, +) + +_MODELPROTO = _descriptor.Descriptor( + name='ModelProto', + full_name='sentencepiece.ModelProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='pieces', full_name='sentencepiece.ModelProto.pieces', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trainer_spec', full_name='sentencepiece.ModelProto.trainer_spec', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='normalizer_spec', full_name='sentencepiece.ModelProto.normalizer_spec', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='self_test_data', full_name='sentencepiece.ModelProto.self_test_data', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='denormalizer_spec', full_name='sentencepiece.ModelProto.denormalizer_spec', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MODELPROTO_SENTENCEPIECE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=1919, + serialized_end=2429, +) + +_TRAINERSPEC.fields_by_name['model_type'].enum_type = _TRAINERSPEC_MODELTYPE +_TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC +_SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA +_SELFTESTDATA.fields_by_name['samples'].message_type = _SELFTESTDATA_SAMPLE +_MODELPROTO_SENTENCEPIECE.fields_by_name['type'].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE +_MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO +_MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE +_MODELPROTO.fields_by_name['pieces'].message_type = _MODELPROTO_SENTENCEPIECE +_MODELPROTO.fields_by_name['trainer_spec'].message_type = _TRAINERSPEC +_MODELPROTO.fields_by_name['normalizer_spec'].message_type = _NORMALIZERSPEC +_MODELPROTO.fields_by_name['self_test_data'].message_type = _SELFTESTDATA +_MODELPROTO.fields_by_name['denormalizer_spec'].message_type = _NORMALIZERSPEC +DESCRIPTOR.message_types_by_name['TrainerSpec'] = _TRAINERSPEC +DESCRIPTOR.message_types_by_name['NormalizerSpec'] = _NORMALIZERSPEC +DESCRIPTOR.message_types_by_name['SelfTestData'] = _SELFTESTDATA +DESCRIPTOR.message_types_by_name['ModelProto'] = _MODELPROTO +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TrainerSpec = _reflection.GeneratedProtocolMessageType('TrainerSpec', (_message.Message,), { + 'DESCRIPTOR' : _TRAINERSPEC, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) + }) +_sym_db.RegisterMessage(TrainerSpec) + +NormalizerSpec = _reflection.GeneratedProtocolMessageType('NormalizerSpec', (_message.Message,), { + 'DESCRIPTOR' : _NORMALIZERSPEC, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) + }) +_sym_db.RegisterMessage(NormalizerSpec) + +SelfTestData = _reflection.GeneratedProtocolMessageType('SelfTestData', (_message.Message,), { + + 'Sample' : _reflection.GeneratedProtocolMessageType('Sample', (_message.Message,), { + 'DESCRIPTOR' : _SELFTESTDATA_SAMPLE, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) + }) + , + 'DESCRIPTOR' : _SELFTESTDATA, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) + }) +_sym_db.RegisterMessage(SelfTestData) +_sym_db.RegisterMessage(SelfTestData.Sample) + +ModelProto = _reflection.GeneratedProtocolMessageType('ModelProto', (_message.Message,), { + + 'SentencePiece' : _reflection.GeneratedProtocolMessageType('SentencePiece', (_message.Message,), { + 'DESCRIPTOR' : _MODELPROTO_SENTENCEPIECE, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) + }) + , + 'DESCRIPTOR' : _MODELPROTO, + '__module__' : 'sentencepiece_model_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) + }) +_sym_db.RegisterMessage(ModelProto) +_sym_db.RegisterMessage(ModelProto.SentencePiece) + + +DESCRIPTOR._options = None +_TRAINERSPEC.fields_by_name['mining_sentence_size']._options = None +_TRAINERSPEC.fields_by_name['training_sentence_size']._options = None +# @@protoc_insertion_point(module_scope) diff --git a/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_pb2.py b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..00f1b28280372317344a4b1ae419a7bb4e5d9623 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/sentencepiece/sentencepiece_pb2.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentencepiece.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='sentencepiece.proto', + package='sentencepiece', + syntax='proto2', + serialized_options=b'H\003', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x13sentencepiece.proto\x12\rsentencepiece\"\xdf\x01\n\x11SentencePieceText\x12\x0c\n\x04text\x18\x01 \x01(\t\x12>\n\x06pieces\x18\x02 \x03(\x0b\x32..sentencepiece.SentencePieceText.SentencePiece\x12\r\n\x05score\x18\x03 \x01(\x02\x1a\x62\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x0f\n\x07surface\x18\x03 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x04 \x01(\r\x12\x0b\n\x03\x65nd\x18\x05 \x01(\r*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"J\n\x16NBestSentencePieceText\x12\x30\n\x06nbests\x18\x01 \x03(\x0b\x32 .sentencepiece.SentencePieceTextB\x02H\x03' +) + + + + +_SENTENCEPIECETEXT_SENTENCEPIECE = _descriptor.Descriptor( + name='SentencePiece', + full_name='sentencepiece.SentencePieceText.SentencePiece', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='piece', full_name='sentencepiece.SentencePieceText.SentencePiece.piece', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='id', full_name='sentencepiece.SentencePieceText.SentencePiece.id', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='surface', full_name='sentencepiece.SentencePieceText.SentencePiece.surface', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='sentencepiece.SentencePieceText.SentencePiece.begin', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='sentencepiece.SentencePieceText.SentencePiece.end', index=4, + number=5, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=153, + serialized_end=251, +) + +_SENTENCEPIECETEXT = _descriptor.Descriptor( + name='SentencePieceText', + full_name='sentencepiece.SentencePieceText', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='text', full_name='sentencepiece.SentencePieceText.text', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pieces', full_name='sentencepiece.SentencePieceText.pieces', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='score', full_name='sentencepiece.SentencePieceText.score', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SENTENCEPIECETEXT_SENTENCEPIECE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(200, 536870912), ], + oneofs=[ + ], + serialized_start=39, + serialized_end=262, +) + + +_NBESTSENTENCEPIECETEXT = _descriptor.Descriptor( + name='NBestSentencePieceText', + full_name='sentencepiece.NBestSentencePieceText', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='nbests', full_name='sentencepiece.NBestSentencePieceText.nbests', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=264, + serialized_end=338, +) + +_SENTENCEPIECETEXT_SENTENCEPIECE.containing_type = _SENTENCEPIECETEXT +_SENTENCEPIECETEXT.fields_by_name['pieces'].message_type = _SENTENCEPIECETEXT_SENTENCEPIECE +_NBESTSENTENCEPIECETEXT.fields_by_name['nbests'].message_type = _SENTENCEPIECETEXT +DESCRIPTOR.message_types_by_name['SentencePieceText'] = _SENTENCEPIECETEXT +DESCRIPTOR.message_types_by_name['NBestSentencePieceText'] = _NBESTSENTENCEPIECETEXT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +SentencePieceText = _reflection.GeneratedProtocolMessageType('SentencePieceText', (_message.Message,), { + + 'SentencePiece' : _reflection.GeneratedProtocolMessageType('SentencePiece', (_message.Message,), { + 'DESCRIPTOR' : _SENTENCEPIECETEXT_SENTENCEPIECE, + '__module__' : 'sentencepiece_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText.SentencePiece) + }) + , + 'DESCRIPTOR' : _SENTENCEPIECETEXT, + '__module__' : 'sentencepiece_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText) + }) +_sym_db.RegisterMessage(SentencePieceText) +_sym_db.RegisterMessage(SentencePieceText.SentencePiece) + +NBestSentencePieceText = _reflection.GeneratedProtocolMessageType('NBestSentencePieceText', (_message.Message,), { + 'DESCRIPTOR' : _NBESTSENTENCEPIECETEXT, + '__module__' : 'sentencepiece_pb2' + # @@protoc_insertion_point(class_scope:sentencepiece.NBestSentencePieceText) + }) +_sym_db.RegisterMessage(NBestSentencePieceText) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope)