diff --git a/.gitattributes b/.gitattributes index 1261765c6748a6b50d529f863be157517889df33..8cd0a2111a6b5347787daf6bf9d3db200e9640b1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -287,3 +287,6 @@ wemm/lib/python3.10/__pycache__/turtle.cpython-310.pyc filter=lfs diff=lfs merge wemm/lib/python3.10/__pycache__/_pydecimal.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text wemm/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text moondream/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +pllava/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text +pllava/bin/xz filter=lfs diff=lfs merge=lfs -text +pllava/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text diff --git a/pllava/bin/xz b/pllava/bin/xz new file mode 100644 index 0000000000000000000000000000000000000000..05ab99d5f781e9e075210b3a65c76b09a203c0ce --- /dev/null +++ b/pllava/bin/xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bce26dc6fa5195f402a34490878bc69f5439321566af07a49739e9b9ec50dbf +size 108336 diff --git a/pllava/lib/libbz2.so.1.0.8 b/pllava/lib/libbz2.so.1.0.8 new file mode 100644 index 0000000000000000000000000000000000000000..7e057fb75b1b533e33984742a4a02254948e177f --- /dev/null +++ b/pllava/lib/libbz2.so.1.0.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301 +size 229016 diff --git a/pllava/lib/libncurses++.a b/pllava/lib/libncurses++.a new file mode 100644 index 0000000000000000000000000000000000000000..592b1b981d3fb155dffb6c4dcc9335849efc088c --- /dev/null +++ b/pllava/lib/libncurses++.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93b48c40f5d7b07e1a8c4bd9419df55c28e250cca1166be4aafd2fc7caf18823 +size 187604 diff --git a/pllava/lib/python3.10/LICENSE.txt b/pllava/lib/python3.10/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf --- /dev/null +++ b/pllava/lib/python3.10/LICENSE.txt @@ -0,0 +1,279 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/pllava/lib/python3.10/__future__.py b/pllava/lib/python3.10/__future__.py new file mode 100644 index 0000000000000000000000000000000000000000..97dc90c6e4644a71cd19683e31c8624ea3184824 --- /dev/null +++ b/pllava/lib/python3.10/__future__.py @@ -0,0 +1,147 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/cpython/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", + "generator_stop", + "annotations", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names defined in +# code.h and used by compile.h, so that an editor search will find them here. +# However, they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x20000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x400000 +CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators +CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime + + +class _Feature: + + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (4, 0, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) + +generator_stop = _Feature((3, 5, 0, "beta", 1), + (3, 7, 0, "alpha", 0), + CO_FUTURE_GENERATOR_STOP) + +annotations = _Feature((3, 7, 0, "beta", 1), + (3, 11, 0, "alpha", 0), + CO_FUTURE_ANNOTATIONS) diff --git a/pllava/lib/python3.10/__phello__.foo.py b/pllava/lib/python3.10/__phello__.foo.py new file mode 100644 index 0000000000000000000000000000000000000000..8e8623ee1daacbd61475bb84a840813dd99da18d --- /dev/null +++ b/pllava/lib/python3.10/__phello__.foo.py @@ -0,0 +1 @@ +# This file exists as a helper for the test.test_frozen module. diff --git a/pllava/lib/python3.10/_bootsubprocess.py b/pllava/lib/python3.10/_bootsubprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..014782f616c823bae543909e3b17dad3dccc8cd0 --- /dev/null +++ b/pllava/lib/python3.10/_bootsubprocess.py @@ -0,0 +1,97 @@ +""" +Basic subprocess implementation for POSIX which only uses os functions. Only +implement features required by setup.py to build C extension modules when +subprocess is unavailable. setup.py is not used on Windows. +""" +import os + + +# distutils.spawn used by distutils.command.build_ext +# calls subprocess.Popen().wait() +class Popen: + def __init__(self, cmd, env=None): + self._cmd = cmd + self._env = env + self.returncode = None + + def wait(self): + pid = os.fork() + if pid == 0: + # Child process + try: + if self._env is not None: + os.execve(self._cmd[0], self._cmd, self._env) + else: + os.execv(self._cmd[0], self._cmd) + finally: + os._exit(1) + else: + # Parent process + _, status = os.waitpid(pid, 0) + self.returncode = os.waitstatus_to_exitcode(status) + + return self.returncode + + +def _check_cmd(cmd): + # Use regex [a-zA-Z0-9./-]+: reject empty string, space, etc. + safe_chars = [] + for first, last in (("a", "z"), ("A", "Z"), ("0", "9")): + for ch in range(ord(first), ord(last) + 1): + safe_chars.append(chr(ch)) + safe_chars.append("./-") + safe_chars = ''.join(safe_chars) + + if isinstance(cmd, (tuple, list)): + check_strs = cmd + elif isinstance(cmd, str): + check_strs = [cmd] + else: + return False + + for arg in check_strs: + if not isinstance(arg, str): + return False + if not arg: + # reject empty string + return False + for ch in arg: + if ch not in safe_chars: + return False + + return True + + +# _aix_support used by distutil.util calls subprocess.check_output() +def check_output(cmd, **kwargs): + if kwargs: + raise NotImplementedError(repr(kwargs)) + + if not _check_cmd(cmd): + raise ValueError(f"unsupported command: {cmd!r}") + + tmp_filename = "check_output.tmp" + if not isinstance(cmd, str): + cmd = " ".join(cmd) + cmd = f"{cmd} >{tmp_filename}" + + try: + # system() spawns a shell + status = os.system(cmd) + exitcode = os.waitstatus_to_exitcode(status) + if exitcode: + raise ValueError(f"Command {cmd!r} returned non-zero " + f"exit status {exitcode!r}") + + try: + with open(tmp_filename, "rb") as fp: + stdout = fp.read() + except FileNotFoundError: + stdout = b'' + finally: + try: + os.unlink(tmp_filename) + except OSError: + pass + + return stdout diff --git a/pllava/lib/python3.10/_collections_abc.py b/pllava/lib/python3.10/_collections_abc.py new file mode 100644 index 0000000000000000000000000000000000000000..72fd633cf9ac2f94327b5c48c45b6344e8dad5aa --- /dev/null +++ b/pllava/lib/python3.10/_collections_abc.py @@ -0,0 +1,1171 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +GenericAlias = type(list[int]) +EllipsisType = type(...) +def _f(): pass +FunctionType = type(_f) +del _f + +__all__ = ["Awaitable", "Coroutine", + "AsyncIterable", "AsyncIterator", "AsyncGenerator", + "Hashable", "Iterable", "Iterator", "Generator", "Reversible", + "Sized", "Container", "Callable", "Collection", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +# This module has been renamed from collections.abc to _collections_abc to +# speed up interpreter startup. Some of the types such as MutableMapping are +# required early but collections module imports a lot of other modules. +# See issue #19218 +__name__ = "collections.abc" + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types might not be distinct +# and they may have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +longrange_iterator = type(iter(range(1 << 1000))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) +generator = type((lambda: (yield))()) +## coroutine ## +async def _coro(): pass +_coro = _coro() +coroutine = type(_coro) +_coro.close() # Prevent ResourceWarning +del _coro +## asynchronous generator ## +async def _ag(): yield +_ag = _ag() +async_generator = type(_ag) +del _ag + + +### ONE-TRICK PONIES ### + +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + return _check_methods(C, "__hash__") + return NotImplemented + + +class Awaitable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __await__(self): + yield + + @classmethod + def __subclasshook__(cls, C): + if cls is Awaitable: + return _check_methods(C, "__await__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Coroutine(Awaitable): + + __slots__ = () + + @abstractmethod + def send(self, value): + """Send a value into the coroutine. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside coroutine. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("coroutine ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Coroutine: + return _check_methods(C, '__await__', 'send', 'throw', 'close') + return NotImplemented + + +Coroutine.register(coroutine) + + +class AsyncIterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __aiter__(self): + return AsyncIterator() + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterable: + return _check_methods(C, "__aiter__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class AsyncIterator(AsyncIterable): + + __slots__ = () + + @abstractmethod + async def __anext__(self): + """Return the next item or raise StopAsyncIteration when exhausted.""" + raise StopAsyncIteration + + def __aiter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterator: + return _check_methods(C, "__anext__", "__aiter__") + return NotImplemented + + +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented + + +AsyncGenerator.register(async_generator) + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + return _check_methods(C, "__iter__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + return _check_methods(C, '__iter__', '__next__') + return NotImplemented + + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(longrange_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + + +class Reversible(Iterable): + + __slots__ = () + + @abstractmethod + def __reversed__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Reversible: + return _check_methods(C, "__reversed__", "__iter__") + return NotImplemented + + +class Generator(Iterator): + + __slots__ = () + + def __next__(self): + """Return the next item from the generator. + When exhausted, raise StopIteration. + """ + return self.send(None) + + @abstractmethod + def send(self, value): + """Send a value into the generator. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the generator. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside generator. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Generator: + return _check_methods(C, '__iter__', '__next__', + 'send', 'throw', 'close') + return NotImplemented + + +Generator.register(generator) + + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + return _check_methods(C, "__len__") + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + return _check_methods(C, "__contains__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Collection(Sized, Iterable, Container): + + __slots__ = () + + @classmethod + def __subclasshook__(cls, C): + if cls is Collection: + return _check_methods(C, "__len__", "__iter__", "__contains__") + return NotImplemented + + +class _CallableGenericAlias(GenericAlias): + """ Represent `Callable[argtypes, resulttype]`. + + This sets ``__args__`` to a tuple containing the flattened ``argtypes`` + followed by ``resulttype``. + + Example: ``Callable[[int, str], float]`` sets ``__args__`` to + ``(int, str, float)``. + """ + + __slots__ = () + + def __new__(cls, origin, args): + if not (isinstance(args, tuple) and len(args) == 2): + raise TypeError( + "Callable must be used as Callable[[arg, ...], result].") + t_args, t_result = args + if isinstance(t_args, list): + args = (*t_args, t_result) + elif not _is_param_expr(t_args): + raise TypeError(f"Expected a list of types, an ellipsis, " + f"ParamSpec, or Concatenate. Got {t_args}") + return super().__new__(cls, origin, args) + + @property + def __parameters__(self): + params = [] + for arg in self.__args__: + if isinstance(arg, type) and not isinstance(arg, GenericAlias): + continue + # Looks like a genericalias + if hasattr(arg, "__parameters__") and isinstance(arg.__parameters__, tuple): + params.extend(arg.__parameters__) + else: + if _is_typevarlike(arg): + params.append(arg) + return tuple(dict.fromkeys(params)) + + def __repr__(self): + if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]): + return super().__repr__() + return (f'collections.abc.Callable' + f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], ' + f'{_type_repr(self.__args__[-1])}]') + + def __reduce__(self): + args = self.__args__ + if not (len(args) == 2 and _is_param_expr(args[0])): + args = list(args[:-1]), args[-1] + return _CallableGenericAlias, (Callable, args) + + def __getitem__(self, item): + # Called during TypeVar substitution, returns the custom subclass + # rather than the default types.GenericAlias object. Most of the + # code is copied from typing's _GenericAlias and the builtin + # types.GenericAlias. + + # A special case in PEP 612 where if X = Callable[P, int], + # then X[int, str] == X[[int, str]]. + param_len = len(self.__parameters__) + if param_len == 0: + raise TypeError(f'{self} is not a generic class') + if not isinstance(item, tuple): + item = (item,) + if (param_len == 1 and _is_param_expr(self.__parameters__[0]) + and item and not _is_param_expr(item[0])): + item = (list(item),) + item_len = len(item) + if item_len != param_len: + raise TypeError(f'Too {"many" if item_len > param_len else "few"}' + f' arguments for {self};' + f' actual {item_len}, expected {param_len}') + subst = dict(zip(self.__parameters__, item)) + new_args = [] + for arg in self.__args__: + if isinstance(arg, type) and not isinstance(arg, GenericAlias): + new_args.append(arg) + continue + if _is_typevarlike(arg): + if _is_param_expr(arg): + arg = subst[arg] + if not _is_param_expr(arg): + raise TypeError(f"Expected a list of types, an ellipsis, " + f"ParamSpec, or Concatenate. Got {arg}") + else: + arg = subst[arg] + # Looks like a GenericAlias + elif hasattr(arg, '__parameters__') and isinstance(arg.__parameters__, tuple): + subparams = arg.__parameters__ + if subparams: + subargs = tuple(subst[x] for x in subparams) + arg = arg[subargs] + if isinstance(arg, tuple): + new_args.extend(arg) + else: + new_args.append(arg) + + # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612 + if not isinstance(new_args[0], list): + t_result = new_args[-1] + t_args = new_args[:-1] + new_args = (t_args, t_result) + return _CallableGenericAlias(Callable, tuple(new_args)) + + +def _is_typevarlike(arg): + obj = type(arg) + # looks like a TypeVar/ParamSpec + return (obj.__module__ == 'typing' + and obj.__name__ in {'ParamSpec', 'TypeVar'}) + +def _is_param_expr(obj): + """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or + ``_ConcatenateGenericAlias`` from typing.py + """ + if obj is Ellipsis: + return True + if isinstance(obj, list): + return True + obj = type(obj) + names = ('ParamSpec', '_ConcatenateGenericAlias') + return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names) + +def _type_repr(obj): + """Return the repr() of an object, special-casing types (internal helper). + + Copied from :mod:`typing` since collections.abc + shouldn't depend on that module. + """ + if isinstance(obj, GenericAlias): + return repr(obj) + if isinstance(obj, type): + if obj.__module__ == 'builtins': + return obj.__qualname__ + return f'{obj.__module__}.{obj.__qualname__}' + if obj is Ellipsis: + return '...' + if isinstance(obj, FunctionType): + return obj.__name__ + return repr(obj) + + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + return _check_methods(C, "__call__") + return NotImplemented + + __class_getitem__ = classmethod(_CallableGenericAlias) + + +### SETS ### + + +class Set(Collection): + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h ^= (h >> 11) ^ (h >> 25) + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError from None + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + + +MutableSet.register(set) + + +### MAPPINGS ### + +class Mapping(Collection): + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + """ + + __slots__ = () + + # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set. + __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return KeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return ItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + __reversed__ = None + +Mapping.register(mappingproxy) + + +class MappingView(Sized): + + __slots__ = '_mapping', + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + __class_getitem__ = classmethod(GenericAlias) + + +class KeysView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(cls, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + yield from self._mapping + + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(cls, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v is value or v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + + +ItemsView.register(dict_items) + + +class ValuesView(MappingView, Collection): + + __slots__ = () + + def __contains__(self, value): + for key in self._mapping: + v = self._mapping[key] + if v is value or v == value: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError from None + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(self, other=(), /, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + + +MutableMapping.register(dict) + + +### SEQUENCES ### + +class Sequence(Reversible, Collection): + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set. + __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v is value or v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value, start=0, stop=None): + '''S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + ''' + if start is not None and start < 0: + start = max(len(self) + start, 0) + if stop is not None and stop < 0: + stop += len(self) + + i = start + while stop is None or i < stop: + try: + v = self[i] + if v is value or v == value: + return i + except IndexError: + break + i += 1 + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v is value or v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) +Sequence.register(memoryview) + + +class ByteString(Sequence): + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + """All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + """ + + __slots__ = () + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, value) -- insert value before index' + raise IndexError + + def append(self, value): + 'S.append(value) -- append value to the end of the sequence' + self.insert(len(self), value) + + def clear(self): + 'S.clear() -> None -- remove all items from S' + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + if values is self: + values = list(values) + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/pllava/lib/python3.10/_markupbase.py b/pllava/lib/python3.10/_markupbase.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad7e279960f7e1f2bf79d89fe9b905e53f6a12b --- /dev/null +++ b/pllava/lib/python3.10/_markupbase.py @@ -0,0 +1,396 @@ +"""Shared support for scanning document type declarations in HTML and XHTML. + +This module is used as a foundation for the html.parser module. It has no +documented public API and should not be used directly. + +""" + +import re + +_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match +_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match +_commentclose = re.compile(r'--\s*>') +_markedsectionclose = re.compile(r']\s*]\s*>') + +# An analysis of the MS-Word extensions is available at +# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf + +_msmarkedsectionclose = re.compile(r']\s*>') + +del re + + +class ParserBase: + """Parser base class which provides some common support methods used + by the SGML/HTML and XHTML parsers.""" + + def __init__(self): + if self.__class__ is ParserBase: + raise RuntimeError( + "_markupbase.ParserBase must be subclassed") + + def reset(self): + self.lineno = 1 + self.offset = 0 + + def getpos(self): + """Return current line number and offset.""" + return self.lineno, self.offset + + # Internal -- update line number and offset. This should be + # called for each piece of data exactly once, in order -- in other + # words the concatenation of all the input strings to this + # function should be exactly the entire input. + def updatepos(self, i, j): + if i >= j: + return j + rawdata = self.rawdata + nlines = rawdata.count("\n", i, j) + if nlines: + self.lineno = self.lineno + nlines + pos = rawdata.rindex("\n", i, j) # Should not fail + self.offset = j-(pos+1) + else: + self.offset = self.offset + j-i + return j + + _decl_otherchars = '' + + # Internal -- parse declaration (for use by subclasses). + def parse_declaration(self, i): + # This is some sort of declaration; in "HTML as + # deployed," this should only be the document type + # declaration (""). + # ISO 8879:1986, however, has more complex + # declaration syntax for elements in , including: + # --comment-- + # [marked section] + # name in the following list: ENTITY, DOCTYPE, ELEMENT, + # ATTLIST, NOTATION, SHORTREF, USEMAP, + # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM + rawdata = self.rawdata + j = i + 2 + assert rawdata[i:j] == "": + # the empty comment + return j + 1 + if rawdata[j:j+1] in ("-", ""): + # Start of comment followed by buffer boundary, + # or just a buffer boundary. + return -1 + # A simple, practical version could look like: ((name|stringlit) S*) + '>' + n = len(rawdata) + if rawdata[j:j+2] == '--': #comment + # Locate --.*-- as the body of the comment + return self.parse_comment(i) + elif rawdata[j] == '[': #marked section + # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section + # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA + # Note that this is extended by Microsoft Office "Save as Web" function + # to include [if...] and [endif]. + return self.parse_marked_section(i) + else: #all other declaration elements + decltype, j = self._scan_name(j, i) + if j < 0: + return j + if decltype == "doctype": + self._decl_otherchars = '' + while j < n: + c = rawdata[j] + if c == ">": + # end of declaration syntax + data = rawdata[i+2:j] + if decltype == "doctype": + self.handle_decl(data) + else: + # According to the HTML5 specs sections "8.2.4.44 Bogus + # comment state" and "8.2.4.45 Markup declaration open + # state", a comment token should be emitted. + # Calling unknown_decl provides more flexibility though. + self.unknown_decl(data) + return j + 1 + if c in "\"'": + m = _declstringlit_match(rawdata, j) + if not m: + return -1 # incomplete + j = m.end() + elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": + name, j = self._scan_name(j, i) + elif c in self._decl_otherchars: + j = j + 1 + elif c == "[": + # this could be handled in a separate doctype parser + if decltype == "doctype": + j = self._parse_doctype_subset(j + 1, i) + elif decltype in {"attlist", "linktype", "link", "element"}: + # must tolerate []'d groups in a content model in an element declaration + # also in data attribute specifications of attlist declaration + # also link type declaration subsets in linktype declarations + # also link attribute specification lists in link declarations + raise AssertionError("unsupported '[' char in %s declaration" % decltype) + else: + raise AssertionError("unexpected '[' char in declaration") + else: + raise AssertionError("unexpected %r char in declaration" % rawdata[j]) + if j < 0: + return j + return -1 # incomplete + + # Internal -- parse a marked section + # Override this to handle MS-word extension syntax content + def parse_marked_section(self, i, report=1): + rawdata= self.rawdata + assert rawdata[i:i+3] == ' ending + match= _markedsectionclose.search(rawdata, i+3) + elif sectName in {"if", "else", "endif"}: + # look for MS Office ]> ending + match= _msmarkedsectionclose.search(rawdata, i+3) + else: + raise AssertionError( + 'unknown status keyword %r in marked section' % rawdata[i+3:j] + ) + if not match: + return -1 + if report: + j = match.start(0) + self.unknown_decl(rawdata[i+3: j]) + return match.end(0) + + # Internal -- parse comment, return length or -1 if not terminated + def parse_comment(self, i, report=1): + rawdata = self.rawdata + if rawdata[i:i+4] != ' + --> --> + + ''' + +__UNDEF__ = [] # a special sentinel object +def small(text): + if text: + return '' + text + '' + else: + return '' + +def strong(text): + if text: + return '' + text + '' + else: + return '' + +def grey(text): + if text: + return '' + text + '' + else: + return '' + +def lookup(name, frame, locals): + """Find the value for a given name in the given environment.""" + if name in locals: + return 'local', locals[name] + if name in frame.f_globals: + return 'global', frame.f_globals[name] + if '__builtins__' in frame.f_globals: + builtins = frame.f_globals['__builtins__'] + if type(builtins) is type({}): + if name in builtins: + return 'builtin', builtins[name] + else: + if hasattr(builtins, name): + return 'builtin', getattr(builtins, name) + return None, __UNDEF__ + +def scanvars(reader, frame, locals): + """Scan one logical line of Python and look up values of variables used.""" + vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ + for ttype, token, start, end, line in tokenize.generate_tokens(reader): + if ttype == tokenize.NEWLINE: break + if ttype == tokenize.NAME and token not in keyword.kwlist: + if lasttoken == '.': + if parent is not __UNDEF__: + value = getattr(parent, token, __UNDEF__) + vars.append((prefix + token, prefix, value)) + else: + where, value = lookup(token, frame, locals) + vars.append((token, where, value)) + elif token == '.': + prefix += lasttoken + '.' + parent = value + else: + parent, prefix = None, '' + lasttoken = token + return vars + +def html(einfo, context=5): + """Return a nice HTML document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = '' + pydoc.html.heading( + '%s' % + strong(pydoc.html.escape(str(etype))), + '#ffffff', '#6622aa', pyver + '
' + date) + ''' +

A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred.

''' + + indent = '' + small(' ' * 5) + ' ' + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + if file: + file = os.path.abspath(file) + link = '%s' % (file, pydoc.html.escape(file)) + else: + file = link = '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + strong(pydoc.html.escape(func)) + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.html.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = ['%s%s %s' % + (' ', link, call)] + if index is not None: + i = lnum - index + for line in lines: + num = small(' ' * (5-len(str(i))) + str(i)) + ' ' + if i in highlight: + line = '=>%s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % line) + else: + line = '  %s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % grey(line)) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where in ('global', 'builtin'): + name = ('%s ' % where) + strong(name) + elif where == 'local': + name = strong(name) + else: + name = where + strong(name.split('.')[-1]) + dump.append('%s = %s' % (name, pydoc.html.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('%s' % small(grey(', '.join(dump)))) + frames.append(''' + +%s
''' % '\n'.join(rows)) + + exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), + pydoc.html.escape(str(evalue)))] + for name in dir(evalue): + if name[:1] == '_': continue + value = pydoc.html.repr(getattr(evalue, name)) + exception.append('\n
%s%s =\n%s' % (indent, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + + + +''' % pydoc.html.escape( + ''.join(traceback.format_exception(etype, evalue, etb))) + +def text(einfo, context=5): + """Return a plain text document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' +A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred. +''' + + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + file = file and os.path.abspath(file) or '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + func + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.text.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = [' %s %s' % (file, call)] + if index is not None: + i = lnum - index + for line in lines: + num = '%5d ' % i + rows.append(num+line.rstrip()) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where == 'global': name = 'global ' + name + elif where != 'local': name = where + name.split('.')[-1] + dump.append('%s = %s' % (name, pydoc.text.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('\n'.join(dump)) + frames.append('\n%s\n' % '\n'.join(rows)) + + exception = ['%s: %s' % (str(etype), str(evalue))] + for name in dir(evalue): + value = pydoc.text.repr(getattr(evalue, name)) + exception.append('\n%s%s = %s' % (" "*4, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + +The above is a description of an error in a Python program. Here is +the original traceback: + +%s +''' % ''.join(traceback.format_exception(etype, evalue, etb)) + +class Hook: + """A hook to replace sys.excepthook that shows tracebacks in HTML.""" + + def __init__(self, display=1, logdir=None, context=5, file=None, + format="html"): + self.display = display # send tracebacks to browser if true + self.logdir = logdir # log tracebacks to files if not None + self.context = context # number of source code lines per frame + self.file = file or sys.stdout # place to send the output + self.format = format + + def __call__(self, etype, evalue, etb): + self.handle((etype, evalue, etb)) + + def handle(self, info=None): + info = info or sys.exc_info() + if self.format == "html": + self.file.write(reset()) + + formatter = (self.format=="html") and html or text + plain = False + try: + doc = formatter(info, self.context) + except: # just in case something goes wrong + doc = ''.join(traceback.format_exception(*info)) + plain = True + + if self.display: + if plain: + doc = pydoc.html.escape(doc) + self.file.write('

' + doc + '
\n') + else: + self.file.write(doc + '\n') + else: + self.file.write('

A problem occurred in a Python script.\n') + + if self.logdir is not None: + suffix = ['.txt', '.html'][self.format=="html"] + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + + try: + with os.fdopen(fd, 'w') as file: + file.write(doc) + msg = '%s contains the description of this error.' % path + except: + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

%s

\n' % msg) + else: + self.file.write(msg + '\n') + try: + self.file.flush() + except: pass + +handler = Hook().handle +def enable(display=1, logdir=None, context=5, format="html"): + """Install an exception handler that formats tracebacks as HTML. + + The optional argument 'display' can be set to 0 to suppress sending the + traceback to the browser, and 'logdir' can be set to a directory to cause + tracebacks to be written to files there.""" + sys.excepthook = Hook(display=display, logdir=logdir, + context=context, format=format) diff --git a/pllava/lib/python3.10/code.py b/pllava/lib/python3.10/code.py new file mode 100644 index 0000000000000000000000000000000000000000..76000f8c8b2c1e1c98f8fb4c831c2ea3e2de268d --- /dev/null +++ b/pllava/lib/python3.10/code.py @@ -0,0 +1,315 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(ei[0], ei[1], last_tb) + finally: + last_tb = ei = None + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None, exitmsg=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None, exitmsg=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner, exitmsg) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-q', action='store_true', + help="don't print version and copyright messages") + args = parser.parse_args() + if args.q or sys.flags.quiet: + banner = '' + else: + banner = None + interact(banner) diff --git a/pllava/lib/python3.10/colorsys.py b/pllava/lib/python3.10/colorsys.py new file mode 100644 index 0000000000000000000000000000000000000000..0f52512a67d87c571835467b411ec8ec4e691230 --- /dev/null +++ b/pllava/lib/python3.10/colorsys.py @@ -0,0 +1,165 @@ +"""Conversion functions between RGB and other color systems. + +This modules provides two functions for each color system ABC: + + rgb_to_abc(r, g, b) --> a, b, c + abc_to_rgb(a, b, c) --> r, g, b + +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. + +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" + +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space + +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components +# +# There are a great many versions of the constants used in these formulae. +# The ones in this library uses constants from the FCC version of NTSC. + +def rgb_to_yiq(r, g, b): + y = 0.30*r + 0.59*g + 0.11*b + i = 0.74*(r-y) - 0.27*(b-y) + q = 0.48*(r-y) + 0.41*(b-y) + return (y, i, q) + +def yiq_to_rgb(y, i, q): + # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) + # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) + # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 + + r = y + 0.9468822170900693*i + 0.6235565819861433*q + g = y - 0.27478764629897834*i - 0.6356910791873801*q + b = y - 1.1085450346420322*i + 1.7090069284064666*q + + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + sumc = (maxc+minc) + rangec = (maxc-minc) + l = sumc/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = rangec / sumc + else: + s = rangec / (2.0-sumc) + rc = (maxc-r) / rangec + gc = (maxc-g) / rangec + bc = (maxc-b) / rangec + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = (maxc-minc) / maxc + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/pllava/lib/python3.10/compileall.py b/pllava/lib/python3.10/compileall.py new file mode 100644 index 0000000000000000000000000000000000000000..50183ea85468aa74c367576a6666c490e4d4de2f --- /dev/null +++ b/pllava/lib/python3.10/compileall.py @@ -0,0 +1,463 @@ +"""Module/script to byte-compile all .py files to .pyc files. + +When called as a script with arguments, this compiles the directories +given as arguments recursively; the -l option prevents it from +recursing into directories. + +Without arguments, if compiles all modules on sys.path, without +recursing into subdirectories. (Even though it should do so for +packages -- for now, you'll have to deal with packages separately.) + +See module py_compile for details of the actual byte-compilation. +""" +import os +import sys +import importlib.util +import py_compile +import struct +import filecmp + +from functools import partial +from pathlib import Path + +__all__ = ["compile_dir","compile_file","compile_path"] + +def _walk_dir(dir, maxlevels, quiet=0): + if quiet < 2 and isinstance(dir, os.PathLike): + dir = os.fspath(dir) + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + if quiet < 2: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if not os.path.isdir(fullname): + yield fullname + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, maxlevels=maxlevels - 1, + quiet=quiet) + +def compile_dir(dir, maxlevels=None, ddir=None, force=False, + rx=None, quiet=0, legacy=False, optimize=-1, workers=1, + invalidation_mode=None, *, stripdir=None, + prependdir=None, limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile all modules in the given directory tree. + + Arguments (only dir is required): + + dir: the directory to byte-compile + maxlevels: maximum recursion level (default `sys.getrecursionlimit()`) + ddir: the directory that will be prepended to the path to the + file as it is compiled into each byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + workers: maximum number of parallel workers + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path + hardlink_dupes: hardlink duplicated pyc files + """ + ProcessPoolExecutor = None + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + if ddir is not None: + stripdir = dir + prependdir = ddir + ddir = None + if workers < 0: + raise ValueError('workers must be greater or equal to 0') + if workers != 1: + # Check if this is a system where ProcessPoolExecutor can function. + from concurrent.futures.process import _check_system_limits + try: + _check_system_limits() + except NotImplementedError: + workers = 1 + else: + from concurrent.futures import ProcessPoolExecutor + if maxlevels is None: + maxlevels = sys.getrecursionlimit() + files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) + success = True + if workers != 1 and ProcessPoolExecutor is not None: + # If workers == 0, let ProcessPoolExecutor choose + workers = workers or None + with ProcessPoolExecutor(max_workers=workers) as executor: + results = executor.map(partial(compile_file, + ddir=ddir, force=force, + rx=rx, quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + stripdir=stripdir, + prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes), + files) + success = min(results, default=True) + else: + for file in files: + if not compile_file(file, ddir, force, rx, quiet, + legacy, optimize, invalidation_mode, + stripdir=stripdir, prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes): + success = False + return success + +def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None, *, stripdir=None, prependdir=None, + limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile + ddir: if given, the directory name compiled in to the + byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path. + hardlink_dupes: hardlink duplicated pyc files + """ + + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + + success = True + fullname = os.fspath(fullname) + stripdir = os.fspath(stripdir) if stripdir is not None else None + name = os.path.basename(fullname) + + dfile = None + + if ddir is not None: + dfile = os.path.join(ddir, name) + + if stripdir is not None: + fullname_parts = fullname.split(os.path.sep) + stripdir_parts = stripdir.split(os.path.sep) + ddir_parts = list(fullname_parts) + + for spart, opart in zip(stripdir_parts, fullname_parts): + if spart == opart: + ddir_parts.remove(spart) + + dfile = os.path.join(*ddir_parts) + + if prependdir is not None: + if dfile is None: + dfile = os.path.join(prependdir, fullname) + else: + dfile = os.path.join(prependdir, dfile) + + if isinstance(optimize, int): + optimize = [optimize] + + # Use set() to remove duplicates. + # Use sorted() to create pyc files in a deterministic order. + optimize = sorted(set(optimize)) + + if hardlink_dupes and len(optimize) < 2: + raise ValueError("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level") + + if rx is not None: + mo = rx.search(fullname) + if mo: + return success + + if limit_sl_dest is not None and os.path.islink(fullname): + if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents: + return success + + opt_cfiles = {} + + if os.path.isfile(fullname): + for opt_level in optimize: + if legacy: + opt_cfiles[opt_level] = fullname + 'c' + else: + if opt_level >= 0: + opt = opt_level if opt_level >= 1 else '' + cfile = (importlib.util.cache_from_source( + fullname, optimization=opt)) + opt_cfiles[opt_level] = cfile + else: + cfile = importlib.util.cache_from_source(fullname) + opt_cfiles[opt_level] = cfile + + head, tail = name[:-3], name[-3:] + if tail == '.py': + if not force: + try: + mtime = int(os.stat(fullname).st_mtime) + expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER, + 0, mtime & 0xFFFF_FFFF) + for cfile in opt_cfiles.values(): + with open(cfile, 'rb') as chandle: + actual = chandle.read(12) + if expect != actual: + break + else: + return success + except OSError: + pass + if not quiet: + print('Compiling {!r}...'.format(fullname)) + try: + for index, opt_level in enumerate(optimize): + cfile = opt_cfiles[opt_level] + ok = py_compile.compile(fullname, cfile, dfile, True, + optimize=opt_level, + invalidation_mode=invalidation_mode) + if index > 0 and hardlink_dupes: + previous_cfile = opt_cfiles[optimize[index - 1]] + if filecmp.cmp(cfile, previous_cfile, shallow=False): + os.unlink(cfile) + os.link(previous_cfile, cfile) + except py_compile.PyCompileError as err: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + # escape non-printable characters in msg + encoding = sys.stdout.encoding or sys.getdefaultencoding() + msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding) + print(msg) + except (SyntaxError, UnicodeError, OSError) as e: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + print(e.__class__.__name__ + ':', e) + else: + if ok == 0: + success = False + return success + +def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None): + """Byte-compile all module on sys.path. + + Arguments (all optional): + + skip_curdir: if true, skip current directory (default True) + maxlevels: max recursion level (default 0) + force: as for compile_dir() (default False) + quiet: as for compile_dir() (default 0) + legacy: as for compile_dir() (default False) + optimize: as for compile_dir() (default -1) + invalidation_mode: as for compiler_dir() + """ + success = True + for dir in sys.path: + if (not dir or dir == os.curdir) and skip_curdir: + if quiet < 2: + print('Skipping current directory') + else: + success = success and compile_dir( + dir, + maxlevels, + None, + force, + quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + ) + return success + + +def main(): + """Script main program.""" + import argparse + + parser = argparse.ArgumentParser( + description='Utilities to support installing Python libraries.') + parser.add_argument('-l', action='store_const', const=0, + default=None, dest='maxlevels', + help="don't recurse into subdirectories") + parser.add_argument('-r', type=int, dest='recursion', + help=('control the maximum recursion level. ' + 'if `-l` and `-r` options are specified, ' + 'then `-r` takes precedence.')) + parser.add_argument('-f', action='store_true', dest='force', + help='force rebuild even if timestamps are up to date') + parser.add_argument('-q', action='count', dest='quiet', default=0, + help='output only error messages; -qq will suppress ' + 'the error messages as well.') + parser.add_argument('-b', action='store_true', dest='legacy', + help='use legacy (pre-PEP3147) compiled file locations') + parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, + help=('directory to prepend to file paths for use in ' + 'compile-time tracebacks and in runtime ' + 'tracebacks in cases where the source file is ' + 'unavailable')) + parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir', + default=None, + help=('part of path to left-strip from path ' + 'to source file - for example buildroot. ' + '`-d` and `-s` options cannot be ' + 'specified together.')) + parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir', + default=None, + help=('path to add as prefix to path ' + 'to source file - for example / to make ' + 'it absolute when some part is removed ' + 'by `-s` option. ' + '`-d` and `-p` options cannot be ' + 'specified together.')) + parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, + help=('skip files matching the regular expression; ' + 'the regexp is searched for in the full path ' + 'of each file considered for compilation')) + parser.add_argument('-i', metavar='FILE', dest='flist', + help=('add all the files and directories listed in ' + 'FILE to the list considered for compilation; ' + 'if "-", names are read from stdin')) + parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', + help=('zero or more file and directory names ' + 'to compile; if no arguments given, defaults ' + 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + invalidation_modes = [mode.name.lower().replace('_', '-') + for mode in py_compile.PycInvalidationMode] + parser.add_argument('--invalidation-mode', + choices=sorted(invalidation_modes), + help=('set .pyc invalidation mode; defaults to ' + '"checked-hash" if the SOURCE_DATE_EPOCH ' + 'environment variable is set, and ' + '"timestamp" otherwise.')) + parser.add_argument('-o', action='append', type=int, dest='opt_levels', + help=('Optimization levels to run compilation with. ' + 'Default is -1 which uses the optimization level ' + 'of the Python interpreter itself (see -O).')) + parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest', + help='Ignore symlinks pointing outsite of the DIR') + parser.add_argument('--hardlink-dupes', action='store_true', + dest='hardlink_dupes', + help='Hardlink duplicated pyc files') + + args = parser.parse_args() + compile_dests = args.compile_dest + + if args.rx: + import re + args.rx = re.compile(args.rx) + + if args.limit_sl_dest == "": + args.limit_sl_dest = None + + if args.recursion is not None: + maxlevels = args.recursion + else: + maxlevels = args.maxlevels + + if args.opt_levels is None: + args.opt_levels = [-1] + + if len(args.opt_levels) == 1 and args.hardlink_dupes: + parser.error(("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level.")) + + if args.ddir is not None and ( + args.stripdir is not None or args.prependdir is not None + ): + parser.error("-d cannot be used in combination with -s or -p") + + # if flist is provided then load it + if args.flist: + try: + with (sys.stdin if args.flist=='-' else + open(args.flist, encoding="utf-8")) as f: + for line in f: + compile_dests.append(line.strip()) + except OSError: + if args.quiet < 2: + print("Error reading file list {}".format(args.flist)) + return False + + if args.invalidation_mode: + ivl_mode = args.invalidation_mode.replace('-', '_').upper() + invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] + else: + invalidation_mode = None + + success = True + try: + if compile_dests: + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + else: + if not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + return success + else: + return compile_path(legacy=args.legacy, force=args.force, + quiet=args.quiet, + invalidation_mode=invalidation_mode) + except KeyboardInterrupt: + if args.quiet < 2: + print("\n[interrupted]") + return False + return True + + +if __name__ == '__main__': + exit_status = int(not main()) + sys.exit(exit_status) diff --git a/pllava/lib/python3.10/datetime.py b/pllava/lib/python3.10/datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..d087c9852c7d2a0539a9b5bcd6bab93bc0c1753a --- /dev/null +++ b/pllava/lib/python3.10/datetime.py @@ -0,0 +1,2524 @@ +"""Concrete date/time and related types. + +See http://www.iana.org/time-zones/repository/tz-link.html for +time zone and DST data sources. +""" + +__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo", + "MINYEAR", "MAXYEAR") + + +import time as _time +import math as _math +import sys +from operator import index as _index + +def _cmp(x, y): + return 0 if x == y else 1 if x > y else -1 + +MINYEAR = 1 +MAXYEAR = 9999 +_MAXORDINAL = 3652059 # date.max.toordinal() + +# Utility functions, adapted from Python's Demo/classes/Dates.py, which +# also assumes the current Gregorian calendar indefinitely extended in +# both directions. Difference: Dates.py calls January 1 of year 0 day +# number 1. The code here calls January 1 of year 1 day number 1. This is +# to match the definition of the "proleptic Gregorian" calendar in Dershowitz +# and Reingold's "Calendrical Calculations", where it's the base calendar +# for all computations. See the book for algorithms for converting between +# proleptic Gregorian ordinals and many other calendar systems. + +# -1 is a placeholder for indexing purposes. +_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes. +dbm = 0 +for dim in _DAYS_IN_MONTH[1:]: + _DAYS_BEFORE_MONTH.append(dbm) + dbm += dim +del dbm, dim + +def _is_leap(year): + "year -> 1 if leap year, else 0." + return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) + +def _days_before_year(year): + "year -> number of days before January 1st of year." + y = year - 1 + return y*365 + y//4 - y//100 + y//400 + +def _days_in_month(year, month): + "year, month -> number of days in that month in that year." + assert 1 <= month <= 12, month + if month == 2 and _is_leap(year): + return 29 + return _DAYS_IN_MONTH[month] + +def _days_before_month(year, month): + "year, month -> number of days in year preceding first day of month." + assert 1 <= month <= 12, 'month must be in 1..12' + return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) + +def _ymd2ord(year, month, day): + "year, month, day -> ordinal, considering 01-Jan-0001 as day 1." + assert 1 <= month <= 12, 'month must be in 1..12' + dim = _days_in_month(year, month) + assert 1 <= day <= dim, ('day must be in 1..%d' % dim) + return (_days_before_year(year) + + _days_before_month(year, month) + + day) + +_DI400Y = _days_before_year(401) # number of days in 400 years +_DI100Y = _days_before_year(101) # " " " " 100 " +_DI4Y = _days_before_year(5) # " " " " 4 " + +# A 4-year cycle has an extra leap day over what we'd get from pasting +# together 4 single years. +assert _DI4Y == 4 * 365 + 1 + +# Similarly, a 400-year cycle has an extra leap day over what we'd get from +# pasting together 4 100-year cycles. +assert _DI400Y == 4 * _DI100Y + 1 + +# OTOH, a 100-year cycle has one fewer leap day than we'd get from +# pasting together 25 4-year cycles. +assert _DI100Y == 25 * _DI4Y - 1 + +def _ord2ymd(n): + "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." + + # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years + # repeats exactly every 400 years. The basic strategy is to find the + # closest 400-year boundary at or before n, then work with the offset + # from that boundary to n. Life is much clearer if we subtract 1 from + # n first -- then the values of n at 400-year boundaries are exactly + # those divisible by _DI400Y: + # + # D M Y n n-1 + # -- --- ---- ---------- ---------------- + # 31 Dec -400 -_DI400Y -_DI400Y -1 + # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary + # ... + # 30 Dec 000 -1 -2 + # 31 Dec 000 0 -1 + # 1 Jan 001 1 0 400-year boundary + # 2 Jan 001 2 1 + # 3 Jan 001 3 2 + # ... + # 31 Dec 400 _DI400Y _DI400Y -1 + # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary + n -= 1 + n400, n = divmod(n, _DI400Y) + year = n400 * 400 + 1 # ..., -399, 1, 401, ... + + # Now n is the (non-negative) offset, in days, from January 1 of year, to + # the desired date. Now compute how many 100-year cycles precede n. + # Note that it's possible for n100 to equal 4! In that case 4 full + # 100-year cycles precede the desired day, which implies the desired + # day is December 31 at the end of a 400-year cycle. + n100, n = divmod(n, _DI100Y) + + # Now compute how many 4-year cycles precede it. + n4, n = divmod(n, _DI4Y) + + # And now how many single years. Again n1 can be 4, and again meaning + # that the desired day is December 31 at the end of the 4-year cycle. + n1, n = divmod(n, 365) + + year += n100 * 100 + n4 * 4 + n1 + if n1 == 4 or n100 == 4: + assert n == 0 + return year-1, 12, 31 + + # Now the year is correct, and n is the offset from January 1. We find + # the month via an estimate that's either exact or one too large. + leapyear = n1 == 3 and (n4 != 24 or n100 == 3) + assert leapyear == _is_leap(year) + month = (n + 50) >> 5 + preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) + if preceding > n: # estimate is too large + month -= 1 + preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) + n -= preceding + assert 0 <= n < _days_in_month(year, month) + + # Now the year and month are correct, and n is the offset from the + # start of that month: we're done! + return year, month, n+1 + +# Month and day names. For localized versions, see the calendar module. +_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] +_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + +def _build_struct_time(y, m, d, hh, mm, ss, dstflag): + wday = (_ymd2ord(y, m, d) + 6) % 7 + dnum = _days_before_month(y, m) + d + return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag)) + +def _format_time(hh, mm, ss, us, timespec='auto'): + specs = { + 'hours': '{:02d}', + 'minutes': '{:02d}:{:02d}', + 'seconds': '{:02d}:{:02d}:{:02d}', + 'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}', + 'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}' + } + + if timespec == 'auto': + # Skip trailing microseconds when us==0. + timespec = 'microseconds' if us else 'seconds' + elif timespec == 'milliseconds': + us //= 1000 + try: + fmt = specs[timespec] + except KeyError: + raise ValueError('Unknown timespec value') + else: + return fmt.format(hh, mm, ss, us) + +def _format_offset(off): + s = '' + if off is not None: + if off.days < 0: + sign = "-" + off = -off + else: + sign = "+" + hh, mm = divmod(off, timedelta(hours=1)) + mm, ss = divmod(mm, timedelta(minutes=1)) + s += "%s%02d:%02d" % (sign, hh, mm) + if ss or ss.microseconds: + s += ":%02d" % ss.seconds + + if ss.microseconds: + s += '.%06d' % ss.microseconds + return s + +# Correctly substitute for %z and %Z escapes in strftime formats. +def _wrap_strftime(object, format, timetuple): + # Don't call utcoffset() or tzname() unless actually needed. + freplace = None # the string to use for %f + zreplace = None # the string to use for %z + Zreplace = None # the string to use for %Z + + # Scan format for %z and %Z escapes, replacing as needed. + newformat = [] + push = newformat.append + i, n = 0, len(format) + while i < n: + ch = format[i] + i += 1 + if ch == '%': + if i < n: + ch = format[i] + i += 1 + if ch == 'f': + if freplace is None: + freplace = '%06d' % getattr(object, + 'microsecond', 0) + newformat.append(freplace) + elif ch == 'z': + if zreplace is None: + zreplace = "" + if hasattr(object, "utcoffset"): + offset = object.utcoffset() + if offset is not None: + sign = '+' + if offset.days < 0: + offset = -offset + sign = '-' + h, rest = divmod(offset, timedelta(hours=1)) + m, rest = divmod(rest, timedelta(minutes=1)) + s = rest.seconds + u = offset.microseconds + if u: + zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u) + elif s: + zreplace = '%c%02d%02d%02d' % (sign, h, m, s) + else: + zreplace = '%c%02d%02d' % (sign, h, m) + assert '%' not in zreplace + newformat.append(zreplace) + elif ch == 'Z': + if Zreplace is None: + Zreplace = "" + if hasattr(object, "tzname"): + s = object.tzname() + if s is not None: + # strftime is going to have at this: escape % + Zreplace = s.replace('%', '%%') + newformat.append(Zreplace) + else: + push('%') + push(ch) + else: + push('%') + else: + push(ch) + newformat = "".join(newformat) + return _time.strftime(newformat, timetuple) + +# Helpers for parsing the result of isoformat() +def _parse_isoformat_date(dtstr): + # It is assumed that this function will only be called with a + # string of length exactly 10, and (though this is not used) ASCII-only + year = int(dtstr[0:4]) + if dtstr[4] != '-': + raise ValueError('Invalid date separator: %s' % dtstr[4]) + + month = int(dtstr[5:7]) + + if dtstr[7] != '-': + raise ValueError('Invalid date separator') + + day = int(dtstr[8:10]) + + return [year, month, day] + +def _parse_hh_mm_ss_ff(tstr): + # Parses things of the form HH[:MM[:SS[.fff[fff]]]] + len_str = len(tstr) + + time_comps = [0, 0, 0, 0] + pos = 0 + for comp in range(0, 3): + if (len_str - pos) < 2: + raise ValueError('Incomplete time component') + + time_comps[comp] = int(tstr[pos:pos+2]) + + pos += 2 + next_char = tstr[pos:pos+1] + + if not next_char or comp >= 2: + break + + if next_char != ':': + raise ValueError('Invalid time separator: %c' % next_char) + + pos += 1 + + if pos < len_str: + if tstr[pos] != '.': + raise ValueError('Invalid microsecond component') + else: + pos += 1 + + len_remainder = len_str - pos + if len_remainder not in (3, 6): + raise ValueError('Invalid microsecond component') + + time_comps[3] = int(tstr[pos:]) + if len_remainder == 3: + time_comps[3] *= 1000 + + return time_comps + +def _parse_isoformat_time(tstr): + # Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]] + len_str = len(tstr) + if len_str < 2: + raise ValueError('Isoformat time too short') + + # This is equivalent to re.search('[+-]', tstr), but faster + tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1) + timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr + + time_comps = _parse_hh_mm_ss_ff(timestr) + + tzi = None + if tz_pos > 0: + tzstr = tstr[tz_pos:] + + # Valid time zone strings are: + # HH:MM len: 5 + # HH:MM:SS len: 8 + # HH:MM:SS.ffffff len: 15 + + if len(tzstr) not in (5, 8, 15): + raise ValueError('Malformed time zone string') + + tz_comps = _parse_hh_mm_ss_ff(tzstr) + if all(x == 0 for x in tz_comps): + tzi = timezone.utc + else: + tzsign = -1 if tstr[tz_pos - 1] == '-' else 1 + + td = timedelta(hours=tz_comps[0], minutes=tz_comps[1], + seconds=tz_comps[2], microseconds=tz_comps[3]) + + tzi = timezone(tzsign * td) + + time_comps.append(tzi) + + return time_comps + + +# Just raise TypeError if the arg isn't None or a string. +def _check_tzname(name): + if name is not None and not isinstance(name, str): + raise TypeError("tzinfo.tzname() must return None or string, " + "not '%s'" % type(name)) + +# name is the offset-producing method, "utcoffset" or "dst". +# offset is what it returned. +# If offset isn't None or timedelta, raises TypeError. +# If offset is None, returns None. +# Else offset is checked for being in range. +# If it is, its integer value is returned. Else ValueError is raised. +def _check_utc_offset(name, offset): + assert name in ("utcoffset", "dst") + if offset is None: + return + if not isinstance(offset, timedelta): + raise TypeError("tzinfo.%s() must return None " + "or timedelta, not '%s'" % (name, type(offset))) + if not -timedelta(1) < offset < timedelta(1): + raise ValueError("%s()=%s, must be strictly between " + "-timedelta(hours=24) and timedelta(hours=24)" % + (name, offset)) + +def _check_date_fields(year, month, day): + year = _index(year) + month = _index(month) + day = _index(day) + if not MINYEAR <= year <= MAXYEAR: + raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) + if not 1 <= month <= 12: + raise ValueError('month must be in 1..12', month) + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + raise ValueError('day must be in 1..%d' % dim, day) + return year, month, day + +def _check_time_fields(hour, minute, second, microsecond, fold): + hour = _index(hour) + minute = _index(minute) + second = _index(second) + microsecond = _index(microsecond) + if not 0 <= hour <= 23: + raise ValueError('hour must be in 0..23', hour) + if not 0 <= minute <= 59: + raise ValueError('minute must be in 0..59', minute) + if not 0 <= second <= 59: + raise ValueError('second must be in 0..59', second) + if not 0 <= microsecond <= 999999: + raise ValueError('microsecond must be in 0..999999', microsecond) + if fold not in (0, 1): + raise ValueError('fold must be either 0 or 1', fold) + return hour, minute, second, microsecond, fold + +def _check_tzinfo_arg(tz): + if tz is not None and not isinstance(tz, tzinfo): + raise TypeError("tzinfo argument must be None or of a tzinfo subclass") + +def _cmperror(x, y): + raise TypeError("can't compare '%s' to '%s'" % ( + type(x).__name__, type(y).__name__)) + +def _divide_and_round(a, b): + """divide a by b and round result to the nearest integer + + When the ratio is exactly half-way between two integers, + the even integer is returned. + """ + # Based on the reference implementation for divmod_near + # in Objects/longobject.c. + q, r = divmod(a, b) + # round up if either r / b > 0.5, or r / b == 0.5 and q is odd. + # The expression r / b > 0.5 is equivalent to 2 * r > b if b is + # positive, 2 * r < b if b negative. + r *= 2 + greater_than_half = r > b if b > 0 else r < b + if greater_than_half or r == b and q % 2 == 1: + q += 1 + + return q + + +class timedelta: + """Represent the difference between two datetime objects. + + Supported operators: + + - add, subtract timedelta + - unary plus, minus, abs + - compare to timedelta + - multiply, divide by int + + In addition, datetime supports subtraction of two datetime objects + returning a timedelta, and addition or subtraction of a datetime + and a timedelta giving a datetime. + + Representation: (days, seconds, microseconds). Why? Because I + felt like it. + """ + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' + + def __new__(cls, days=0, seconds=0, microseconds=0, + milliseconds=0, minutes=0, hours=0, weeks=0): + # Doing this efficiently and accurately in C is going to be difficult + # and error-prone, due to ubiquitous overflow possibilities, and that + # C double doesn't have enough bits of precision to represent + # microseconds over 10K years faithfully. The code here tries to make + # explicit where go-fast assumptions can be relied on, in order to + # guide the C implementation; it's way more convoluted than speed- + # ignoring auto-overflow-to-long idiomatic Python could be. + + # XXX Check that all inputs are ints or floats. + + # Final values, all integer. + # s and us fit in 32-bit signed ints; d isn't bounded. + d = s = us = 0 + + # Normalize everything to days, seconds, microseconds. + days += weeks*7 + seconds += minutes*60 + hours*3600 + microseconds += milliseconds*1000 + + # Get rid of all fractions, and normalize s and us. + # Take a deep breath . + if isinstance(days, float): + dayfrac, days = _math.modf(days) + daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) + assert daysecondswhole == int(daysecondswhole) # can't overflow + s = int(daysecondswhole) + assert days == int(days) + d = int(days) + else: + daysecondsfrac = 0.0 + d = days + assert isinstance(daysecondsfrac, float) + assert abs(daysecondsfrac) <= 1.0 + assert isinstance(d, int) + assert abs(s) <= 24 * 3600 + # days isn't referenced again before redefinition + + if isinstance(seconds, float): + secondsfrac, seconds = _math.modf(seconds) + assert seconds == int(seconds) + seconds = int(seconds) + secondsfrac += daysecondsfrac + assert abs(secondsfrac) <= 2.0 + else: + secondsfrac = daysecondsfrac + # daysecondsfrac isn't referenced again + assert isinstance(secondsfrac, float) + assert abs(secondsfrac) <= 2.0 + + assert isinstance(seconds, int) + days, seconds = divmod(seconds, 24*3600) + d += days + s += int(seconds) # can't overflow + assert isinstance(s, int) + assert abs(s) <= 2 * 24 * 3600 + # seconds isn't referenced again before redefinition + + usdouble = secondsfrac * 1e6 + assert abs(usdouble) < 2.1e6 # exact value not critical + # secondsfrac isn't referenced again + + if isinstance(microseconds, float): + microseconds = round(microseconds + usdouble) + seconds, microseconds = divmod(microseconds, 1000000) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds + else: + microseconds = int(microseconds) + seconds, microseconds = divmod(microseconds, 1000000) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds + microseconds = round(microseconds + usdouble) + assert isinstance(s, int) + assert isinstance(microseconds, int) + assert abs(s) <= 3 * 24 * 3600 + assert abs(microseconds) < 3.1e6 + + # Just a little bit of carrying possible for microseconds and seconds. + seconds, us = divmod(microseconds, 1000000) + s += seconds + days, s = divmod(s, 24*3600) + d += days + + assert isinstance(d, int) + assert isinstance(s, int) and 0 <= s < 24*3600 + assert isinstance(us, int) and 0 <= us < 1000000 + + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + + self = object.__new__(cls) + self._days = d + self._seconds = s + self._microseconds = us + self._hashcode = -1 + return self + + def __repr__(self): + args = [] + if self._days: + args.append("days=%d" % self._days) + if self._seconds: + args.append("seconds=%d" % self._seconds) + if self._microseconds: + args.append("microseconds=%d" % self._microseconds) + if not args: + args.append('0') + return "%s.%s(%s)" % (self.__class__.__module__, + self.__class__.__qualname__, + ', '.join(args)) + + def __str__(self): + mm, ss = divmod(self._seconds, 60) + hh, mm = divmod(mm, 60) + s = "%d:%02d:%02d" % (hh, mm, ss) + if self._days: + def plural(n): + return n, abs(n) != 1 and "s" or "" + s = ("%d day%s, " % plural(self._days)) + s + if self._microseconds: + s = s + ".%06d" % self._microseconds + return s + + def total_seconds(self): + """Total seconds in the duration.""" + return ((self.days * 86400 + self.seconds) * 10**6 + + self.microseconds) / 10**6 + + # Read-only field accessors + @property + def days(self): + """days""" + return self._days + + @property + def seconds(self): + """seconds""" + return self._seconds + + @property + def microseconds(self): + """microseconds""" + return self._microseconds + + def __add__(self, other): + if isinstance(other, timedelta): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds) + return NotImplemented + + __radd__ = __add__ + + def __sub__(self, other): + if isinstance(other, timedelta): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds) + return NotImplemented + + def __rsub__(self, other): + if isinstance(other, timedelta): + return -self + other + return NotImplemented + + def __neg__(self): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(-self._days, + -self._seconds, + -self._microseconds) + + def __pos__(self): + return self + + def __abs__(self): + if self._days < 0: + return -self + else: + return self + + def __mul__(self, other): + if isinstance(other, int): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days * other, + self._seconds * other, + self._microseconds * other) + if isinstance(other, float): + usec = self._to_microseconds() + a, b = other.as_integer_ratio() + return timedelta(0, 0, _divide_and_round(usec * a, b)) + return NotImplemented + + __rmul__ = __mul__ + + def _to_microseconds(self): + return ((self._days * (24*3600) + self._seconds) * 1000000 + + self._microseconds) + + def __floordiv__(self, other): + if not isinstance(other, (int, timedelta)): + return NotImplemented + usec = self._to_microseconds() + if isinstance(other, timedelta): + return usec // other._to_microseconds() + if isinstance(other, int): + return timedelta(0, 0, usec // other) + + def __truediv__(self, other): + if not isinstance(other, (int, float, timedelta)): + return NotImplemented + usec = self._to_microseconds() + if isinstance(other, timedelta): + return usec / other._to_microseconds() + if isinstance(other, int): + return timedelta(0, 0, _divide_and_round(usec, other)) + if isinstance(other, float): + a, b = other.as_integer_ratio() + return timedelta(0, 0, _divide_and_round(b * usec, a)) + + def __mod__(self, other): + if isinstance(other, timedelta): + r = self._to_microseconds() % other._to_microseconds() + return timedelta(0, 0, r) + return NotImplemented + + def __divmod__(self, other): + if isinstance(other, timedelta): + q, r = divmod(self._to_microseconds(), + other._to_microseconds()) + return q, timedelta(0, 0, r) + return NotImplemented + + # Comparisons of timedelta objects with other. + + def __eq__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) == 0 + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) <= 0 + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) < 0 + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) >= 0 + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) > 0 + else: + return NotImplemented + + def _cmp(self, other): + assert isinstance(other, timedelta) + return _cmp(self._getstate(), other._getstate()) + + def __hash__(self): + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode + + def __bool__(self): + return (self._days != 0 or + self._seconds != 0 or + self._microseconds != 0) + + # Pickle support. + + def _getstate(self): + return (self._days, self._seconds, self._microseconds) + + def __reduce__(self): + return (self.__class__, self._getstate()) + +timedelta.min = timedelta(-999999999) +timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, + microseconds=999999) +timedelta.resolution = timedelta(microseconds=1) + +class date: + """Concrete date type. + + Constructors: + + __new__() + fromtimestamp() + today() + fromordinal() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + __add__, __radd__, __sub__ (add/radd only with timedelta arg) + + Methods: + + timetuple() + toordinal() + weekday() + isoweekday(), isocalendar(), isoformat() + ctime() + strftime() + + Properties (readonly): + year, month, day + """ + __slots__ = '_year', '_month', '_day', '_hashcode' + + def __new__(cls, year, month=None, day=None): + """Constructor. + + Arguments: + + year, month, day (required, base 1) + """ + if (month is None and + isinstance(year, (bytes, str)) and len(year) == 4 and + 1 <= ord(year[2:3]) <= 12): + # Pickle support + if isinstance(year, str): + try: + year = year.encode('latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a date object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(year) + self._hashcode = -1 + return self + year, month, day = _check_date_fields(year, month, day) + self = object.__new__(cls) + self._year = year + self._month = month + self._day = day + self._hashcode = -1 + return self + + # Additional constructors + + @classmethod + def fromtimestamp(cls, t): + "Construct a date from a POSIX timestamp (like time.time())." + y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t) + return cls(y, m, d) + + @classmethod + def today(cls): + "Construct a date from time.time()." + t = _time.time() + return cls.fromtimestamp(t) + + @classmethod + def fromordinal(cls, n): + """Construct a date from a proleptic Gregorian ordinal. + + January 1 of year 1 is day 1. Only the year, month and day are + non-zero in the result. + """ + y, m, d = _ord2ymd(n) + return cls(y, m, d) + + @classmethod + def fromisoformat(cls, date_string): + """Construct a date from the output of date.isoformat().""" + if not isinstance(date_string, str): + raise TypeError('fromisoformat: argument must be str') + + try: + assert len(date_string) == 10 + return cls(*_parse_isoformat_date(date_string)) + except Exception: + raise ValueError(f'Invalid isoformat string: {date_string!r}') + + @classmethod + def fromisocalendar(cls, year, week, day): + """Construct a date from the ISO year, week number and weekday. + + This is the inverse of the date.isocalendar() function""" + # Year is bounded this way because 9999-12-31 is (9999, 52, 5) + if not MINYEAR <= year <= MAXYEAR: + raise ValueError(f"Year is out of range: {year}") + + if not 0 < week < 53: + out_of_range = True + + if week == 53: + # ISO years have 53 weeks in them on years starting with a + # Thursday and leap years starting on a Wednesday + first_weekday = _ymd2ord(year, 1, 1) % 7 + if (first_weekday == 4 or (first_weekday == 3 and + _is_leap(year))): + out_of_range = False + + if out_of_range: + raise ValueError(f"Invalid week: {week}") + + if not 0 < day < 8: + raise ValueError(f"Invalid weekday: {day} (range is [1, 7])") + + # Now compute the offset from (Y, 1, 1) in days: + day_offset = (week - 1) * 7 + (day - 1) + + # Calculate the ordinal day for monday, week 1 + day_1 = _isoweek1monday(year) + ord_day = day_1 + day_offset + + return cls(*_ord2ymd(ord_day)) + + # Conversions to string + + def __repr__(self): + """Convert to formal string, for repr(). + + >>> dt = datetime(2010, 1, 1) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0)' + + >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' + """ + return "%s.%s(%d, %d, %d)" % (self.__class__.__module__, + self.__class__.__qualname__, + self._year, + self._month, + self._day) + # XXX These shouldn't depend on time.localtime(), because that + # clips the usable dates to [1970 .. 2038). At least ctime() is + # easily done without using strftime() -- that's better too because + # strftime("%c", ...) is locale specific. + + + def ctime(self): + "Return ctime() style string." + weekday = self.toordinal() % 7 or 7 + return "%s %s %2d 00:00:00 %04d" % ( + _DAYNAMES[weekday], + _MONTHNAMES[self._month], + self._day, self._year) + + def strftime(self, fmt): + "Format using strftime()." + return _wrap_strftime(self, fmt, self.timetuple()) + + def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) + if len(fmt) != 0: + return self.strftime(fmt) + return str(self) + + def isoformat(self): + """Return the date formatted according to ISO. + + This is 'YYYY-MM-DD'. + + References: + - http://www.w3.org/TR/NOTE-datetime + - http://www.cl.cam.ac.uk/~mgk25/iso-time.html + """ + return "%04d-%02d-%02d" % (self._year, self._month, self._day) + + __str__ = isoformat + + # Read-only field accessors + @property + def year(self): + """year (1-9999)""" + return self._year + + @property + def month(self): + """month (1-12)""" + return self._month + + @property + def day(self): + """day (1-31)""" + return self._day + + # Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__, + # __hash__ (and helpers) + + def timetuple(self): + "Return local time tuple compatible with time.localtime()." + return _build_struct_time(self._year, self._month, self._day, + 0, 0, 0, -1) + + def toordinal(self): + """Return proleptic Gregorian ordinal for the year, month and day. + + January 1 of year 1 is day 1. Only the year, month and day values + contribute to the result. + """ + return _ymd2ord(self._year, self._month, self._day) + + def replace(self, year=None, month=None, day=None): + """Return a new date with new values for the specified fields.""" + if year is None: + year = self._year + if month is None: + month = self._month + if day is None: + day = self._day + return type(self)(year, month, day) + + # Comparisons of date objects with other. + + def __eq__(self, other): + if isinstance(other, date): + return self._cmp(other) == 0 + return NotImplemented + + def __le__(self, other): + if isinstance(other, date): + return self._cmp(other) <= 0 + return NotImplemented + + def __lt__(self, other): + if isinstance(other, date): + return self._cmp(other) < 0 + return NotImplemented + + def __ge__(self, other): + if isinstance(other, date): + return self._cmp(other) >= 0 + return NotImplemented + + def __gt__(self, other): + if isinstance(other, date): + return self._cmp(other) > 0 + return NotImplemented + + def _cmp(self, other): + assert isinstance(other, date) + y, m, d = self._year, self._month, self._day + y2, m2, d2 = other._year, other._month, other._day + return _cmp((y, m, d), (y2, m2, d2)) + + def __hash__(self): + "Hash." + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode + + # Computations + + def __add__(self, other): + "Add a date to a timedelta." + if isinstance(other, timedelta): + o = self.toordinal() + other.days + if 0 < o <= _MAXORDINAL: + return type(self).fromordinal(o) + raise OverflowError("result out of range") + return NotImplemented + + __radd__ = __add__ + + def __sub__(self, other): + """Subtract two dates, or a date and a timedelta.""" + if isinstance(other, timedelta): + return self + timedelta(-other.days) + if isinstance(other, date): + days1 = self.toordinal() + days2 = other.toordinal() + return timedelta(days1 - days2) + return NotImplemented + + def weekday(self): + "Return day of the week, where Monday == 0 ... Sunday == 6." + return (self.toordinal() + 6) % 7 + + # Day-of-the-week and week-of-the-year, according to ISO + + def isoweekday(self): + "Return day of the week, where Monday == 1 ... Sunday == 7." + # 1-Jan-0001 is a Monday + return self.toordinal() % 7 or 7 + + def isocalendar(self): + """Return a named tuple containing ISO year, week number, and weekday. + + The first ISO week of the year is the (Mon-Sun) week + containing the year's first Thursday; everything else derives + from that. + + The first week is 1; Monday is 1 ... Sunday is 7. + + ISO calendar algorithm taken from + http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm + (used with permission) + """ + year = self._year + week1monday = _isoweek1monday(year) + today = _ymd2ord(self._year, self._month, self._day) + # Internally, week and day have origin 0 + week, day = divmod(today - week1monday, 7) + if week < 0: + year -= 1 + week1monday = _isoweek1monday(year) + week, day = divmod(today - week1monday, 7) + elif week >= 52: + if today >= _isoweek1monday(year+1): + year += 1 + week = 0 + return _IsoCalendarDate(year, week+1, day+1) + + # Pickle support. + + def _getstate(self): + yhi, ylo = divmod(self._year, 256) + return bytes([yhi, ylo, self._month, self._day]), + + def __setstate(self, string): + yhi, ylo, self._month, self._day = string + self._year = yhi * 256 + ylo + + def __reduce__(self): + return (self.__class__, self._getstate()) + +_date_class = date # so functions w/ args named "date" can get at the class + +date.min = date(1, 1, 1) +date.max = date(9999, 12, 31) +date.resolution = timedelta(days=1) + + +class tzinfo: + """Abstract base class for time zone info classes. + + Subclasses must override the name(), utcoffset() and dst() methods. + """ + __slots__ = () + + def tzname(self, dt): + "datetime -> string name of time zone." + raise NotImplementedError("tzinfo subclass must override tzname()") + + def utcoffset(self, dt): + "datetime -> timedelta, positive for east of UTC, negative for west of UTC" + raise NotImplementedError("tzinfo subclass must override utcoffset()") + + def dst(self, dt): + """datetime -> DST offset as timedelta, positive for east of UTC. + + Return 0 if DST not in effect. utcoffset() must include the DST + offset. + """ + raise NotImplementedError("tzinfo subclass must override dst()") + + def fromutc(self, dt): + "datetime in UTC -> datetime in local time." + + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # See the long comment block at the end of this file for an + # explanation of this algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + if delta: + dt += delta + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + # Pickle support. + + def __reduce__(self): + getinitargs = getattr(self, "__getinitargs__", None) + if getinitargs: + args = getinitargs() + else: + args = () + getstate = getattr(self, "__getstate__", None) + if getstate: + state = getstate() + else: + state = getattr(self, "__dict__", None) or None + if state is None: + return (self.__class__, args) + else: + return (self.__class__, args, state) + + +class IsoCalendarDate(tuple): + + def __new__(cls, year, week, weekday, /): + return super().__new__(cls, (year, week, weekday)) + + @property + def year(self): + return self[0] + + @property + def week(self): + return self[1] + + @property + def weekday(self): + return self[2] + + def __reduce__(self): + # This code is intended to pickle the object without making the + # class public. See https://bugs.python.org/msg352381 + return (tuple, (tuple(self),)) + + def __repr__(self): + return (f'{self.__class__.__name__}' + f'(year={self[0]}, week={self[1]}, weekday={self[2]})') + + +_IsoCalendarDate = IsoCalendarDate +del IsoCalendarDate +_tzinfo_class = tzinfo + +class time: + """Time with time zone. + + Constructors: + + __new__() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + + Methods: + + strftime() + isoformat() + utcoffset() + tzname() + dst() + + Properties (readonly): + hour, minute, second, microsecond, tzinfo, fold + """ + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold' + + def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0): + """Constructor. + + Arguments: + + hour, minute (required) + second, microsecond (default to zero) + tzinfo (default to None) + fold (keyword only, default to zero) + """ + if (isinstance(hour, (bytes, str)) and len(hour) == 6 and + ord(hour[0:1])&0x7F < 24): + # Pickle support + if isinstance(hour, str): + try: + hour = hour.encode('latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a time object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(hour, minute or None) + self._hashcode = -1 + return self + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) + _check_tzinfo_arg(tzinfo) + self = object.__new__(cls) + self._hour = hour + self._minute = minute + self._second = second + self._microsecond = microsecond + self._tzinfo = tzinfo + self._hashcode = -1 + self._fold = fold + return self + + # Read-only field accessors + @property + def hour(self): + """hour (0-23)""" + return self._hour + + @property + def minute(self): + """minute (0-59)""" + return self._minute + + @property + def second(self): + """second (0-59)""" + return self._second + + @property + def microsecond(self): + """microsecond (0-999999)""" + return self._microsecond + + @property + def tzinfo(self): + """timezone info object""" + return self._tzinfo + + @property + def fold(self): + return self._fold + + # Standard conversions, __hash__ (and helpers) + + # Comparisons of time objects with other. + + def __eq__(self, other): + if isinstance(other, time): + return self._cmp(other, allow_mixed=True) == 0 + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, time): + return self._cmp(other) <= 0 + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, time): + return self._cmp(other) < 0 + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, time): + return self._cmp(other) >= 0 + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, time): + return self._cmp(other) > 0 + else: + return NotImplemented + + def _cmp(self, other, allow_mixed=False): + assert isinstance(other, time) + mytz = self._tzinfo + ottz = other._tzinfo + myoff = otoff = None + + if mytz is ottz: + base_compare = True + else: + myoff = self.utcoffset() + otoff = other.utcoffset() + base_compare = myoff == otoff + + if base_compare: + return _cmp((self._hour, self._minute, self._second, + self._microsecond), + (other._hour, other._minute, other._second, + other._microsecond)) + if myoff is None or otoff is None: + if allow_mixed: + return 2 # arbitrary non-zero value + else: + raise TypeError("cannot compare naive and aware times") + myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1) + othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1) + return _cmp((myhhmm, self._second, self._microsecond), + (othhmm, other._second, other._microsecond)) + + def __hash__(self): + """Hash.""" + if self._hashcode == -1: + if self.fold: + t = self.replace(fold=0) + else: + t = self + tzoff = t.utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(t._getstate()[0]) + else: + h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, + timedelta(hours=1)) + assert not m % timedelta(minutes=1), "whole minute" + m //= timedelta(minutes=1) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode + + # Conversion to string + + def _tzstr(self): + """Return formatted timezone offset (+xx:xx) or an empty string.""" + off = self.utcoffset() + return _format_offset(off) + + def __repr__(self): + """Convert to formal string, for repr().""" + if self._microsecond != 0: + s = ", %d, %d" % (self._second, self._microsecond) + elif self._second != 0: + s = ", %d" % self._second + else: + s = "" + s= "%s.%s(%d, %d%s)" % (self.__class__.__module__, + self.__class__.__qualname__, + self._hour, self._minute, s) + if self._tzinfo is not None: + assert s[-1:] == ")" + s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" + if self._fold: + assert s[-1:] == ")" + s = s[:-1] + ", fold=1)" + return s + + def isoformat(self, timespec='auto'): + """Return the time formatted according to ISO. + + The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional + part is omitted if self.microsecond == 0. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + """ + s = _format_time(self._hour, self._minute, self._second, + self._microsecond, timespec) + tz = self._tzstr() + if tz: + s += tz + return s + + __str__ = isoformat + + @classmethod + def fromisoformat(cls, time_string): + """Construct a time from the output of isoformat().""" + if not isinstance(time_string, str): + raise TypeError('fromisoformat: argument must be str') + + try: + return cls(*_parse_isoformat_time(time_string)) + except Exception: + raise ValueError(f'Invalid isoformat string: {time_string!r}') + + + def strftime(self, fmt): + """Format using strftime(). The date part of the timestamp passed + to underlying strftime should not be used. + """ + # The year must be >= 1000 else Python's strftime implementation + # can raise a bogus exception. + timetuple = (1900, 1, 1, + self._hour, self._minute, self._second, + 0, 1, -1) + return _wrap_strftime(self, fmt, timetuple) + + def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) + if len(fmt) != 0: + return self.strftime(fmt) + return str(self) + + # Timezone functions + + def utcoffset(self): + """Return the timezone offset as timedelta, positive east of UTC + (negative west of UTC).""" + if self._tzinfo is None: + return None + offset = self._tzinfo.utcoffset(None) + _check_utc_offset("utcoffset", offset) + return offset + + def tzname(self): + """Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + """ + if self._tzinfo is None: + return None + name = self._tzinfo.tzname(None) + _check_tzname(name) + return name + + def dst(self): + """Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + """ + if self._tzinfo is None: + return None + offset = self._tzinfo.dst(None) + _check_utc_offset("dst", offset) + return offset + + def replace(self, hour=None, minute=None, second=None, microsecond=None, + tzinfo=True, *, fold=None): + """Return a new time with new values for the specified fields.""" + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self._fold + return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold) + + # Pickle support. + + def _getstate(self, protocol=3): + us2, us3 = divmod(self._microsecond, 256) + us1, us2 = divmod(us2, 256) + h = self._hour + if self._fold and protocol > 3: + h += 128 + basestate = bytes([h, self._minute, self._second, + us1, us2, us3]) + if self._tzinfo is None: + return (basestate,) + else: + return (basestate, self._tzinfo) + + def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") + h, self._minute, self._second, us1, us2, us3 = string + if h > 127: + self._fold = 1 + self._hour = h - 128 + else: + self._fold = 0 + self._hour = h + self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._tzinfo = tzinfo + + def __reduce_ex__(self, protocol): + return (self.__class__, self._getstate(protocol)) + + def __reduce__(self): + return self.__reduce_ex__(2) + +_time_class = time # so functions w/ args named "time" can get at the class + +time.min = time(0, 0, 0) +time.max = time(23, 59, 59, 999999) +time.resolution = timedelta(microseconds=1) + + +class datetime(date): + """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + + The year, month and day arguments are required. tzinfo may be None, or an + instance of a tzinfo subclass. The remaining arguments may be ints. + """ + __slots__ = date.__slots__ + time.__slots__ + + def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, + microsecond=0, tzinfo=None, *, fold=0): + if (isinstance(year, (bytes, str)) and len(year) == 10 and + 1 <= ord(year[2:3])&0x7F <= 12): + # Pickle support + if isinstance(year, str): + try: + year = bytes(year, 'latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a datetime object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(year, month) + self._hashcode = -1 + return self + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) + _check_tzinfo_arg(tzinfo) + self = object.__new__(cls) + self._year = year + self._month = month + self._day = day + self._hour = hour + self._minute = minute + self._second = second + self._microsecond = microsecond + self._tzinfo = tzinfo + self._hashcode = -1 + self._fold = fold + return self + + # Read-only field accessors + @property + def hour(self): + """hour (0-23)""" + return self._hour + + @property + def minute(self): + """minute (0-59)""" + return self._minute + + @property + def second(self): + """second (0-59)""" + return self._second + + @property + def microsecond(self): + """microsecond (0-999999)""" + return self._microsecond + + @property + def tzinfo(self): + """timezone info object""" + return self._tzinfo + + @property + def fold(self): + return self._fold + + @classmethod + def _fromtimestamp(cls, t, utc, tz): + """Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + """ + frac, t = _math.modf(t) + us = round(frac * 1e6) + if us >= 1000000: + t += 1 + us -= 1000000 + elif us < 0: + t -= 1 + us += 1000000 + + converter = _time.gmtime if utc else _time.localtime + y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) + ss = min(ss, 59) # clamp out leap seconds if the platform has them + result = cls(y, m, d, hh, mm, ss, us, tz) + if tz is None and not utc: + # As of version 2015f max fold in IANA database is + # 23 hours at 1969-09-30 13:00:00 in Kwajalein. + # Let's probe 24 hours in the past to detect a transition: + max_fold_seconds = 24 * 3600 + + # On Windows localtime_s throws an OSError for negative values, + # thus we can't perform fold detection for values of time less + # than the max time fold. See comments in _datetimemodule's + # version of this method for more details. + if t < max_fold_seconds and sys.platform.startswith("win"): + return result + + y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6] + probe1 = cls(y, m, d, hh, mm, ss, us, tz) + trans = result - probe1 - timedelta(0, max_fold_seconds) + if trans.days < 0: + y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6] + probe2 = cls(y, m, d, hh, mm, ss, us, tz) + if probe2 == result: + result._fold = 1 + elif tz is not None: + result = tz.fromutc(result) + return result + + @classmethod + def fromtimestamp(cls, t, tz=None): + """Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + """ + _check_tzinfo_arg(tz) + + return cls._fromtimestamp(t, tz is not None, tz) + + @classmethod + def utcfromtimestamp(cls, t): + """Construct a naive UTC datetime from a POSIX timestamp.""" + return cls._fromtimestamp(t, True, None) + + @classmethod + def now(cls, tz=None): + "Construct a datetime from time.time() and optional time zone info." + t = _time.time() + return cls.fromtimestamp(t, tz) + + @classmethod + def utcnow(cls): + "Construct a UTC datetime from time.time()." + t = _time.time() + return cls.utcfromtimestamp(t) + + @classmethod + def combine(cls, date, time, tzinfo=True): + "Construct a datetime from a given date and a given time." + if not isinstance(date, _date_class): + raise TypeError("date argument must be a date instance") + if not isinstance(time, _time_class): + raise TypeError("time argument must be a time instance") + if tzinfo is True: + tzinfo = time.tzinfo + return cls(date.year, date.month, date.day, + time.hour, time.minute, time.second, time.microsecond, + tzinfo, fold=time.fold) + + @classmethod + def fromisoformat(cls, date_string): + """Construct a datetime from the output of datetime.isoformat().""" + if not isinstance(date_string, str): + raise TypeError('fromisoformat: argument must be str') + + # Split this at the separator + dstr = date_string[0:10] + tstr = date_string[11:] + + try: + date_components = _parse_isoformat_date(dstr) + except ValueError: + raise ValueError(f'Invalid isoformat string: {date_string!r}') + + if tstr: + try: + time_components = _parse_isoformat_time(tstr) + except ValueError: + raise ValueError(f'Invalid isoformat string: {date_string!r}') + else: + time_components = [0, 0, 0, 0, None] + + return cls(*(date_components + time_components)) + + def timetuple(self): + "Return local time tuple compatible with time.localtime()." + dst = self.dst() + if dst is None: + dst = -1 + elif dst: + dst = 1 + else: + dst = 0 + return _build_struct_time(self.year, self.month, self.day, + self.hour, self.minute, self.second, + dst) + + def _mktime(self): + """Return integer POSIX timestamp.""" + epoch = datetime(1970, 1, 1) + max_fold_seconds = 24 * 3600 + t = (self - epoch) // timedelta(0, 1) + def local(u): + y, m, d, hh, mm, ss = _time.localtime(u)[:6] + return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1) + + # Our goal is to solve t = local(u) for u. + a = local(t) - t + u1 = t - a + t1 = local(u1) + if t1 == t: + # We found one solution, but it may not be the one we need. + # Look for an earlier solution (if `fold` is 0), or a + # later one (if `fold` is 1). + u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold] + b = local(u2) - u2 + if a == b: + return u1 + else: + b = t1 - u1 + assert a != b + u2 = t - b + t2 = local(u2) + if t2 == t: + return u2 + if t1 == t: + return u1 + # We have found both offsets a and b, but neither t - a nor t - b is + # a solution. This means t is in the gap. + return (max, min)[self.fold](u1, u2) + + + def timestamp(self): + "Return POSIX timestamp as float" + if self._tzinfo is None: + s = self._mktime() + return s + self.microsecond / 1e6 + else: + return (self - _EPOCH).total_seconds() + + def utctimetuple(self): + "Return UTC time tuple compatible with time.gmtime()." + offset = self.utcoffset() + if offset: + self -= offset + y, m, d = self.year, self.month, self.day + hh, mm, ss = self.hour, self.minute, self.second + return _build_struct_time(y, m, d, hh, mm, ss, 0) + + def date(self): + "Return the date part." + return date(self._year, self._month, self._day) + + def time(self): + "Return the time part, with tzinfo None." + return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold) + + def timetz(self): + "Return the time part, with same tzinfo." + return time(self.hour, self.minute, self.second, self.microsecond, + self._tzinfo, fold=self.fold) + + def replace(self, year=None, month=None, day=None, hour=None, + minute=None, second=None, microsecond=None, tzinfo=True, + *, fold=None): + """Return a new datetime with new values for the specified fields.""" + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + return type(self)(year, month, day, hour, minute, second, + microsecond, tzinfo, fold=fold) + + def _local_timezone(self): + if self.tzinfo is None: + ts = self._mktime() + else: + ts = (self - _EPOCH) // timedelta(seconds=1) + localtm = _time.localtime(ts) + local = datetime(*localtm[:6]) + # Extract TZ data + gmtoff = localtm.tm_gmtoff + zone = localtm.tm_zone + return timezone(timedelta(seconds=gmtoff), zone) + + def astimezone(self, tz=None): + if tz is None: + tz = self._local_timezone() + elif not isinstance(tz, tzinfo): + raise TypeError("tz argument must be an instance of tzinfo") + + mytz = self.tzinfo + if mytz is None: + mytz = self._local_timezone() + myoffset = mytz.utcoffset(self) + else: + myoffset = mytz.utcoffset(self) + if myoffset is None: + mytz = self.replace(tzinfo=None)._local_timezone() + myoffset = mytz.utcoffset(self) + + if tz is mytz: + return self + + # Convert self to UTC, and attach the new time zone object. + utc = (self - myoffset).replace(tzinfo=tz) + + # Convert from UTC to tz's local time. + return tz.fromutc(utc) + + # Ways to produce a string. + + def ctime(self): + "Return ctime() style string." + weekday = self.toordinal() % 7 or 7 + return "%s %s %2d %02d:%02d:%02d %04d" % ( + _DAYNAMES[weekday], + _MONTHNAMES[self._month], + self._day, + self._hour, self._minute, self._second, + self._year) + + def isoformat(self, sep='T', timespec='auto'): + """Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. + By default, the fractional part is omitted if self.microsecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. + + Optional argument sep specifies the separator between date and + time, default 'T'. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + """ + s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + + _format_time(self._hour, self._minute, self._second, + self._microsecond, timespec)) + + off = self.utcoffset() + tz = _format_offset(off) + if tz: + s += tz + + return s + + def __repr__(self): + """Convert to formal string, for repr().""" + L = [self._year, self._month, self._day, # These are never zero + self._hour, self._minute, self._second, self._microsecond] + if L[-1] == 0: + del L[-1] + if L[-1] == 0: + del L[-1] + s = "%s.%s(%s)" % (self.__class__.__module__, + self.__class__.__qualname__, + ", ".join(map(str, L))) + if self._tzinfo is not None: + assert s[-1:] == ")" + s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" + if self._fold: + assert s[-1:] == ")" + s = s[:-1] + ", fold=1)" + return s + + def __str__(self): + "Convert to string, for str()." + return self.isoformat(sep=' ') + + @classmethod + def strptime(cls, date_string, format): + 'string, format -> new datetime parsed from a string (like time.strptime()).' + import _strptime + return _strptime._strptime_datetime(cls, date_string, format) + + def utcoffset(self): + """Return the timezone offset as timedelta positive east of UTC (negative west of + UTC).""" + if self._tzinfo is None: + return None + offset = self._tzinfo.utcoffset(self) + _check_utc_offset("utcoffset", offset) + return offset + + def tzname(self): + """Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + """ + if self._tzinfo is None: + return None + name = self._tzinfo.tzname(self) + _check_tzname(name) + return name + + def dst(self): + """Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + """ + if self._tzinfo is None: + return None + offset = self._tzinfo.dst(self) + _check_utc_offset("dst", offset) + return offset + + # Comparisons of datetime objects with other. + + def __eq__(self, other): + if isinstance(other, datetime): + return self._cmp(other, allow_mixed=True) == 0 + elif not isinstance(other, date): + return NotImplemented + else: + return False + + def __le__(self, other): + if isinstance(other, datetime): + return self._cmp(other) <= 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __lt__(self, other): + if isinstance(other, datetime): + return self._cmp(other) < 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __ge__(self, other): + if isinstance(other, datetime): + return self._cmp(other) >= 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __gt__(self, other): + if isinstance(other, datetime): + return self._cmp(other) > 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def _cmp(self, other, allow_mixed=False): + assert isinstance(other, datetime) + mytz = self._tzinfo + ottz = other._tzinfo + myoff = otoff = None + + if mytz is ottz: + base_compare = True + else: + myoff = self.utcoffset() + otoff = other.utcoffset() + # Assume that allow_mixed means that we are called from __eq__ + if allow_mixed: + if myoff != self.replace(fold=not self.fold).utcoffset(): + return 2 + if otoff != other.replace(fold=not other.fold).utcoffset(): + return 2 + base_compare = myoff == otoff + + if base_compare: + return _cmp((self._year, self._month, self._day, + self._hour, self._minute, self._second, + self._microsecond), + (other._year, other._month, other._day, + other._hour, other._minute, other._second, + other._microsecond)) + if myoff is None or otoff is None: + if allow_mixed: + return 2 # arbitrary non-zero value + else: + raise TypeError("cannot compare naive and aware datetimes") + # XXX What follows could be done more efficiently... + diff = self - other # this will take offsets into account + if diff.days < 0: + return -1 + return diff and 1 or 0 + + def __add__(self, other): + "Add a datetime and a timedelta." + if not isinstance(other, timedelta): + return NotImplemented + delta = timedelta(self.toordinal(), + hours=self._hour, + minutes=self._minute, + seconds=self._second, + microseconds=self._microsecond) + delta += other + hour, rem = divmod(delta.seconds, 3600) + minute, second = divmod(rem, 60) + if 0 < delta.days <= _MAXORDINAL: + return type(self).combine(date.fromordinal(delta.days), + time(hour, minute, second, + delta.microseconds, + tzinfo=self._tzinfo)) + raise OverflowError("result out of range") + + __radd__ = __add__ + + def __sub__(self, other): + "Subtract two datetimes, or a datetime and a timedelta." + if not isinstance(other, datetime): + if isinstance(other, timedelta): + return self + -other + return NotImplemented + + days1 = self.toordinal() + days2 = other.toordinal() + secs1 = self._second + self._minute * 60 + self._hour * 3600 + secs2 = other._second + other._minute * 60 + other._hour * 3600 + base = timedelta(days1 - days2, + secs1 - secs2, + self._microsecond - other._microsecond) + if self._tzinfo is other._tzinfo: + return base + myoff = self.utcoffset() + otoff = other.utcoffset() + if myoff == otoff: + return base + if myoff is None or otoff is None: + raise TypeError("cannot mix naive and timezone-aware time") + return base + otoff - myoff + + def __hash__(self): + if self._hashcode == -1: + if self.fold: + t = self.replace(fold=0) + else: + t = self + tzoff = t.utcoffset() + if tzoff is None: + self._hashcode = hash(t._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + self.minute * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) + return self._hashcode + + # Pickle support. + + def _getstate(self, protocol=3): + yhi, ylo = divmod(self._year, 256) + us2, us3 = divmod(self._microsecond, 256) + us1, us2 = divmod(us2, 256) + m = self._month + if self._fold and protocol > 3: + m += 128 + basestate = bytes([yhi, ylo, m, self._day, + self._hour, self._minute, self._second, + us1, us2, us3]) + if self._tzinfo is None: + return (basestate,) + else: + return (basestate, self._tzinfo) + + def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") + (yhi, ylo, m, self._day, self._hour, + self._minute, self._second, us1, us2, us3) = string + if m > 127: + self._fold = 1 + self._month = m - 128 + else: + self._fold = 0 + self._month = m + self._year = yhi * 256 + ylo + self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._tzinfo = tzinfo + + def __reduce_ex__(self, protocol): + return (self.__class__, self._getstate(protocol)) + + def __reduce__(self): + return self.__reduce_ex__(2) + + +datetime.min = datetime(1, 1, 1) +datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999) +datetime.resolution = timedelta(microseconds=1) + + +def _isoweek1monday(year): + # Helper to calculate the day number of the Monday starting week 1 + # XXX This could be done more efficiently + THURSDAY = 3 + firstday = _ymd2ord(year, 1, 1) + firstweekday = (firstday + 6) % 7 # See weekday() above + week1monday = firstday - firstweekday + if firstweekday > THURSDAY: + week1monday += 7 + return week1monday + + +class timezone(tzinfo): + __slots__ = '_offset', '_name' + + # Sentinel value to disallow None + _Omitted = object() + def __new__(cls, offset, name=_Omitted): + if not isinstance(offset, timedelta): + raise TypeError("offset must be a timedelta") + if name is cls._Omitted: + if not offset: + return cls.utc + name = None + elif not isinstance(name, str): + raise TypeError("name must be a string") + if not cls._minoffset <= offset <= cls._maxoffset: + raise ValueError("offset must be a timedelta " + "strictly between -timedelta(hours=24) and " + "timedelta(hours=24).") + return cls._create(offset, name) + + @classmethod + def _create(cls, offset, name=None): + self = tzinfo.__new__(cls) + self._offset = offset + self._name = name + return self + + def __getinitargs__(self): + """pickle support""" + if self._name is None: + return (self._offset,) + return (self._offset, self._name) + + def __eq__(self, other): + if isinstance(other, timezone): + return self._offset == other._offset + return NotImplemented + + def __hash__(self): + return hash(self._offset) + + def __repr__(self): + """Convert to formal string, for repr(). + + >>> tz = timezone.utc + >>> repr(tz) + 'datetime.timezone.utc' + >>> tz = timezone(timedelta(hours=-5), 'EST') + >>> repr(tz) + "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" + """ + if self is self.utc: + return 'datetime.timezone.utc' + if self._name is None: + return "%s.%s(%r)" % (self.__class__.__module__, + self.__class__.__qualname__, + self._offset) + return "%s.%s(%r, %r)" % (self.__class__.__module__, + self.__class__.__qualname__, + self._offset, self._name) + + def __str__(self): + return self.tzname(None) + + def utcoffset(self, dt): + if isinstance(dt, datetime) or dt is None: + return self._offset + raise TypeError("utcoffset() argument must be a datetime instance" + " or None") + + def tzname(self, dt): + if isinstance(dt, datetime) or dt is None: + if self._name is None: + return self._name_from_offset(self._offset) + return self._name + raise TypeError("tzname() argument must be a datetime instance" + " or None") + + def dst(self, dt): + if isinstance(dt, datetime) or dt is None: + return None + raise TypeError("dst() argument must be a datetime instance" + " or None") + + def fromutc(self, dt): + if isinstance(dt, datetime): + if dt.tzinfo is not self: + raise ValueError("fromutc: dt.tzinfo " + "is not self") + return dt + self._offset + raise TypeError("fromutc() argument must be a datetime instance" + " or None") + + _maxoffset = timedelta(hours=24, microseconds=-1) + _minoffset = -_maxoffset + + @staticmethod + def _name_from_offset(delta): + if not delta: + return 'UTC' + if delta < timedelta(0): + sign = '-' + delta = -delta + else: + sign = '+' + hours, rest = divmod(delta, timedelta(hours=1)) + minutes, rest = divmod(rest, timedelta(minutes=1)) + seconds = rest.seconds + microseconds = rest.microseconds + if microseconds: + return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' + f'.{microseconds:06d}') + if seconds: + return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' + return f'UTC{sign}{hours:02d}:{minutes:02d}' + +timezone.utc = timezone._create(timedelta(0)) +# bpo-37642: These attributes are rounded to the nearest minute for backwards +# compatibility, even though the constructor will accept a wider range of +# values. This may change in the future. +timezone.min = timezone._create(-timedelta(hours=23, minutes=59)) +timezone.max = timezone._create(timedelta(hours=23, minutes=59)) +_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) + +# Some time zone algebra. For a datetime x, let +# x.n = x stripped of its timezone -- its naive time. +# x.o = x.utcoffset(), and assuming that doesn't raise an exception or +# return None +# x.d = x.dst(), and assuming that doesn't raise an exception or +# return None +# x.s = x's standard offset, x.o - x.d +# +# Now some derived rules, where k is a duration (timedelta). +# +# 1. x.o = x.s + x.d +# This follows from the definition of x.s. +# +# 2. If x and y have the same tzinfo member, x.s = y.s. +# This is actually a requirement, an assumption we need to make about +# sane tzinfo classes. +# +# 3. The naive UTC time corresponding to x is x.n - x.o. +# This is again a requirement for a sane tzinfo class. +# +# 4. (x+k).s = x.s +# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo. +# +# 5. (x+k).n = x.n + k +# Again follows from how arithmetic is defined. +# +# Now we can explain tz.fromutc(x). Let's assume it's an interesting case +# (meaning that the various tzinfo methods exist, and don't blow up or return +# None when called). +# +# The function wants to return a datetime y with timezone tz, equivalent to x. +# x is already in UTC. +# +# By #3, we want +# +# y.n - y.o = x.n [1] +# +# The algorithm starts by attaching tz to x.n, and calling that y. So +# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1] +# becomes true; in effect, we want to solve [2] for k: +# +# (y+k).n - (y+k).o = x.n [2] +# +# By #1, this is the same as +# +# (y+k).n - ((y+k).s + (y+k).d) = x.n [3] +# +# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start. +# Substituting that into [3], +# +# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving +# k - (y+k).s - (y+k).d = 0; rearranging, +# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so +# k = y.s - (y+k).d +# +# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we +# approximate k by ignoring the (y+k).d term at first. Note that k can't be +# very large, since all offset-returning methods return a duration of magnitude +# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must +# be 0, so ignoring it has no consequence then. +# +# In any case, the new value is +# +# z = y + y.s [4] +# +# It's helpful to step back at look at [4] from a higher level: it's simply +# mapping from UTC to tz's standard time. +# +# At this point, if +# +# z.n - z.o = x.n [5] +# +# we have an equivalent time, and are almost done. The insecurity here is +# at the start of daylight time. Picture US Eastern for concreteness. The wall +# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good +# sense then. The docs ask that an Eastern tzinfo class consider such a time to +# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST +# on the day DST starts. We want to return the 1:MM EST spelling because that's +# the only spelling that makes sense on the local wall clock. +# +# In fact, if [5] holds at this point, we do have the standard-time spelling, +# but that takes a bit of proof. We first prove a stronger result. What's the +# difference between the LHS and RHS of [5]? Let +# +# diff = x.n - (z.n - z.o) [6] +# +# Now +# z.n = by [4] +# (y + y.s).n = by #5 +# y.n + y.s = since y.n = x.n +# x.n + y.s = since z and y are have the same tzinfo member, +# y.s = z.s by #2 +# x.n + z.s +# +# Plugging that back into [6] gives +# +# diff = +# x.n - ((x.n + z.s) - z.o) = expanding +# x.n - x.n - z.s + z.o = cancelling +# - z.s + z.o = by #2 +# z.d +# +# So diff = z.d. +# +# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time +# spelling we wanted in the endcase described above. We're done. Contrarily, +# if z.d = 0, then we have a UTC equivalent, and are also done. +# +# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to +# add to z (in effect, z is in tz's standard time, and we need to shift the +# local clock into tz's daylight time). +# +# Let +# +# z' = z + z.d = z + diff [7] +# +# and we can again ask whether +# +# z'.n - z'.o = x.n [8] +# +# If so, we're done. If not, the tzinfo class is insane, according to the +# assumptions we've made. This also requires a bit of proof. As before, let's +# compute the difference between the LHS and RHS of [8] (and skipping some of +# the justifications for the kinds of substitutions we've done several times +# already): +# +# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7] +# x.n - (z.n + diff - z'.o) = replacing diff via [6] +# x.n - (z.n + x.n - (z.n - z.o) - z'.o) = +# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n +# - z.n + z.n - z.o + z'.o = cancel z.n +# - z.o + z'.o = #1 twice +# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo +# z'.d - z.d +# +# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal, +# we've found the UTC-equivalent so are done. In fact, we stop with [7] and +# return z', not bothering to compute z'.d. +# +# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by +# a dst() offset, and starting *from* a time already in DST (we know z.d != 0), +# would have to change the result dst() returns: we start in DST, and moving +# a little further into it takes us out of DST. +# +# There isn't a sane case where this can happen. The closest it gets is at +# the end of DST, where there's an hour in UTC with no spelling in a hybrid +# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During +# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM +# UTC) because the docs insist on that, but 0:MM is taken as being in daylight +# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local +# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in +# standard time. Since that's what the local clock *does*, we want to map both +# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous +# in local time, but so it goes -- it's the way the local clock works. +# +# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0, +# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going. +# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8] +# (correctly) concludes that z' is not UTC-equivalent to x. +# +# Because we know z.d said z was in daylight time (else [5] would have held and +# we would have stopped then), and we know z.d != z'.d (else [8] would have held +# and we have stopped then), and there are only 2 possible values dst() can +# return in Eastern, it follows that z'.d must be 0 (which it is in the example, +# but the reasoning doesn't depend on the example -- it depends on there being +# two possible dst() outcomes, one zero and the other non-zero). Therefore +# z' must be in standard time, and is the spelling we want in this case. +# +# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is +# concerned (because it takes z' as being in standard time rather than the +# daylight time we intend here), but returning it gives the real-life "local +# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into +# tz. +# +# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with +# the 1:MM standard time spelling we want. +# +# So how can this break? One of the assumptions must be violated. Two +# possibilities: +# +# 1) [2] effectively says that y.s is invariant across all y belong to a given +# time zone. This isn't true if, for political reasons or continental drift, +# a region decides to change its base offset from UTC. +# +# 2) There may be versions of "double daylight" time where the tail end of +# the analysis gives up a step too early. I haven't thought about that +# enough to say. +# +# In any case, it's clear that the default fromutc() is strong enough to handle +# "almost all" time zones: so long as the standard offset is invariant, it +# doesn't matter if daylight time transition points change from year to year, or +# if daylight time is skipped in some years; it doesn't matter how large or +# small dst() may get within its bounds; and it doesn't even matter if some +# perverse time zone returns a negative dst()). So a breaking case must be +# pretty bizarre, and a tzinfo subclass can override fromutc() if it is. + +try: + from _datetime import * +except ImportError: + pass +else: + # Clean up unused names + del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y, + _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time, + _check_date_fields, _check_time_fields, + _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror, + _date_class, _days_before_month, _days_before_year, _days_in_month, + _format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math, + _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord, + _divide_and_round, _parse_isoformat_date, _parse_isoformat_time, + _parse_hh_mm_ss_ff, _IsoCalendarDate) + # XXX Since import * above excludes names that start with _, + # docstring does not get overwritten. In the future, it may be + # appropriate to maintain a single module level docstring and + # remove the following line. + from _datetime import __doc__ diff --git a/pllava/lib/python3.10/dis.py b/pllava/lib/python3.10/dis.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5d24e88058f745b7df7ddab4c5efc23e7bb27e --- /dev/null +++ b/pllava/lib/python3.10/dis.py @@ -0,0 +1,540 @@ +"""Disassembler of Python byte code into mnemonics.""" + +import sys +import types +import collections +import io + +from opcode import * +from opcode import __all__ as _opcodes_all + +__all__ = ["code_info", "dis", "disassemble", "distb", "disco", + "findlinestarts", "findlabels", "show_code", + "get_instructions", "Instruction", "Bytecode"] + _opcodes_all +del _opcodes_all + +_have_code = (types.MethodType, types.FunctionType, types.CodeType, + classmethod, staticmethod, type) + +FORMAT_VALUE = opmap['FORMAT_VALUE'] +FORMAT_VALUE_CONVERTERS = ( + (None, ''), + (str, 'str'), + (repr, 'repr'), + (ascii, 'ascii'), +) +MAKE_FUNCTION = opmap['MAKE_FUNCTION'] +MAKE_FUNCTION_FLAGS = ('defaults', 'kwdefaults', 'annotations', 'closure') + + +def _try_compile(source, name): + """Attempts to compile the given source, first as an expression and + then as a statement if the first approach fails. + + Utility function to accept strings in functions that otherwise + expect code objects + """ + try: + c = compile(source, name, 'eval') + except SyntaxError: + c = compile(source, name, 'exec') + return c + +def dis(x=None, *, file=None, depth=None): + """Disassemble classes, methods, functions, and other compiled objects. + + With no argument, disassemble the last traceback. + + Compiled objects currently include generator objects, async generator + objects, and coroutine objects, all of which store their code object + in a special attribute. + """ + if x is None: + distb(file=file) + return + # Extract functions from methods. + if hasattr(x, '__func__'): + x = x.__func__ + # Extract compiled code objects from... + if hasattr(x, '__code__'): # ...a function, or + x = x.__code__ + elif hasattr(x, 'gi_code'): #...a generator object, or + x = x.gi_code + elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or + x = x.ag_code + elif hasattr(x, 'cr_code'): #...a coroutine. + x = x.cr_code + # Perform the disassembly. + if hasattr(x, '__dict__'): # Class or module + items = sorted(x.__dict__.items()) + for name, x1 in items: + if isinstance(x1, _have_code): + print("Disassembly of %s:" % name, file=file) + try: + dis(x1, file=file, depth=depth) + except TypeError as msg: + print("Sorry:", msg, file=file) + print(file=file) + elif hasattr(x, 'co_code'): # Code object + _disassemble_recursive(x, file=file, depth=depth) + elif isinstance(x, (bytes, bytearray)): # Raw bytecode + _disassemble_bytes(x, file=file) + elif isinstance(x, str): # Source code + _disassemble_str(x, file=file, depth=depth) + else: + raise TypeError("don't know how to disassemble %s objects" % + type(x).__name__) + +def distb(tb=None, *, file=None): + """Disassemble a traceback (default: last traceback).""" + if tb is None: + try: + tb = sys.last_traceback + except AttributeError: + raise RuntimeError("no last traceback to disassemble") from None + while tb.tb_next: tb = tb.tb_next + disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file) + +# The inspect module interrogates this dictionary to build its +# list of CO_* constants. It is also used by pretty_flags to +# turn the co_flags field into a human readable list. +COMPILER_FLAG_NAMES = { + 1: "OPTIMIZED", + 2: "NEWLOCALS", + 4: "VARARGS", + 8: "VARKEYWORDS", + 16: "NESTED", + 32: "GENERATOR", + 64: "NOFREE", + 128: "COROUTINE", + 256: "ITERABLE_COROUTINE", + 512: "ASYNC_GENERATOR", +} + +def pretty_flags(flags): + """Return pretty representation of code flags.""" + names = [] + for i in range(32): + flag = 1<") + # By now, if we don't have a code object, we can't disassemble x. + if hasattr(x, 'co_code'): + return x + raise TypeError("don't know how to disassemble %s objects" % + type(x).__name__) + +def code_info(x): + """Formatted details of methods, functions, or code.""" + return _format_code_info(_get_code_object(x)) + +def _format_code_info(co): + lines = [] + lines.append("Name: %s" % co.co_name) + lines.append("Filename: %s" % co.co_filename) + lines.append("Argument count: %s" % co.co_argcount) + lines.append("Positional-only arguments: %s" % co.co_posonlyargcount) + lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount) + lines.append("Number of locals: %s" % co.co_nlocals) + lines.append("Stack size: %s" % co.co_stacksize) + lines.append("Flags: %s" % pretty_flags(co.co_flags)) + if co.co_consts: + lines.append("Constants:") + for i_c in enumerate(co.co_consts): + lines.append("%4d: %r" % i_c) + if co.co_names: + lines.append("Names:") + for i_n in enumerate(co.co_names): + lines.append("%4d: %s" % i_n) + if co.co_varnames: + lines.append("Variable names:") + for i_n in enumerate(co.co_varnames): + lines.append("%4d: %s" % i_n) + if co.co_freevars: + lines.append("Free variables:") + for i_n in enumerate(co.co_freevars): + lines.append("%4d: %s" % i_n) + if co.co_cellvars: + lines.append("Cell variables:") + for i_n in enumerate(co.co_cellvars): + lines.append("%4d: %s" % i_n) + return "\n".join(lines) + +def show_code(co, *, file=None): + """Print details of methods, functions, or code to *file*. + + If *file* is not provided, the output is printed on stdout. + """ + print(code_info(co), file=file) + +_Instruction = collections.namedtuple("_Instruction", + "opname opcode arg argval argrepr offset starts_line is_jump_target") + +_Instruction.opname.__doc__ = "Human readable name for operation" +_Instruction.opcode.__doc__ = "Numeric code for operation" +_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None" +_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg" +_Instruction.argrepr.__doc__ = "Human readable description of operation argument" +_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence" +_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None" +_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False" + +_OPNAME_WIDTH = 20 +_OPARG_WIDTH = 5 + +class Instruction(_Instruction): + """Details for a bytecode operation + + Defined fields: + opname - human readable name for operation + opcode - numeric code for operation + arg - numeric argument to operation (if any), otherwise None + argval - resolved arg value (if known), otherwise same as arg + argrepr - human readable description of operation argument + offset - start index of operation within bytecode sequence + starts_line - line started by this opcode (if any), otherwise None + is_jump_target - True if other code jumps to here, otherwise False + """ + + def _disassemble(self, lineno_width=3, mark_as_current=False, offset_width=4): + """Format instruction details for inclusion in disassembly output + + *lineno_width* sets the width of the line number field (0 omits it) + *mark_as_current* inserts a '-->' marker arrow as part of the line + *offset_width* sets the width of the instruction offset field + """ + fields = [] + # Column: Source code line number + if lineno_width: + if self.starts_line is not None: + lineno_fmt = "%%%dd" % lineno_width + fields.append(lineno_fmt % self.starts_line) + else: + fields.append(' ' * lineno_width) + # Column: Current instruction indicator + if mark_as_current: + fields.append('-->') + else: + fields.append(' ') + # Column: Jump target marker + if self.is_jump_target: + fields.append('>>') + else: + fields.append(' ') + # Column: Instruction offset from start of code sequence + fields.append(repr(self.offset).rjust(offset_width)) + # Column: Opcode name + fields.append(self.opname.ljust(_OPNAME_WIDTH)) + # Column: Opcode argument + if self.arg is not None: + fields.append(repr(self.arg).rjust(_OPARG_WIDTH)) + # Column: Opcode argument details + if self.argrepr: + fields.append('(' + self.argrepr + ')') + return ' '.join(fields).rstrip() + + +def get_instructions(x, *, first_line=None): + """Iterator for the opcodes in methods, functions or code + + Generates a series of Instruction named tuples giving the details of + each operations in the supplied code. + + If *first_line* is not None, it indicates the line number that should + be reported for the first source line in the disassembled code. + Otherwise, the source line information (if any) is taken directly from + the disassembled code object. + """ + co = _get_code_object(x) + cell_names = co.co_cellvars + co.co_freevars + linestarts = dict(findlinestarts(co)) + if first_line is not None: + line_offset = first_line - co.co_firstlineno + else: + line_offset = 0 + return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names, + co.co_consts, cell_names, linestarts, + line_offset) + +def _get_const_info(const_index, const_list): + """Helper to get optional details about const references + + Returns the dereferenced constant and its repr if the constant + list is defined. + Otherwise returns the constant index and its repr(). + """ + argval = const_index + if const_list is not None: + argval = const_list[const_index] + return argval, repr(argval) + +def _get_name_info(name_index, name_list): + """Helper to get optional details about named references + + Returns the dereferenced name as both value and repr if the name + list is defined. + Otherwise returns the name index and its repr(). + """ + argval = name_index + if name_list is not None: + argval = name_list[name_index] + argrepr = argval + else: + argrepr = repr(argval) + return argval, argrepr + + +def _get_instructions_bytes(code, varnames=None, names=None, constants=None, + cells=None, linestarts=None, line_offset=0): + """Iterate over the instructions in a bytecode string. + + Generates a sequence of Instruction namedtuples giving the details of each + opcode. Additional information about the code's runtime environment + (e.g. variable names, constants) can be specified using optional + arguments. + + """ + labels = findlabels(code) + starts_line = None + for offset, op, arg in _unpack_opargs(code): + if linestarts is not None: + starts_line = linestarts.get(offset, None) + if starts_line is not None: + starts_line += line_offset + is_jump_target = offset in labels + argval = None + argrepr = '' + if arg is not None: + # Set argval to the dereferenced value of the argument when + # available, and argrepr to the string representation of argval. + # _disassemble_bytes needs the string repr of the + # raw name index for LOAD_GLOBAL, LOAD_CONST, etc. + argval = arg + if op in hasconst: + argval, argrepr = _get_const_info(arg, constants) + elif op in hasname: + argval, argrepr = _get_name_info(arg, names) + elif op in hasjabs: + argval = arg*2 + argrepr = "to " + repr(argval) + elif op in hasjrel: + argval = offset + 2 + arg*2 + argrepr = "to " + repr(argval) + elif op in haslocal: + argval, argrepr = _get_name_info(arg, varnames) + elif op in hascompare: + argval = cmp_op[arg] + argrepr = argval + elif op in hasfree: + argval, argrepr = _get_name_info(arg, cells) + elif op == FORMAT_VALUE: + argval, argrepr = FORMAT_VALUE_CONVERTERS[arg & 0x3] + argval = (argval, bool(arg & 0x4)) + if argval[1]: + if argrepr: + argrepr += ', ' + argrepr += 'with format' + elif op == MAKE_FUNCTION: + argrepr = ', '.join(s for i, s in enumerate(MAKE_FUNCTION_FLAGS) + if arg & (1< 0: + if depth is not None: + depth = depth - 1 + for x in co.co_consts: + if hasattr(x, 'co_code'): + print(file=file) + print("Disassembly of %r:" % (x,), file=file) + _disassemble_recursive(x, file=file, depth=depth) + +def _disassemble_bytes(code, lasti=-1, varnames=None, names=None, + constants=None, cells=None, linestarts=None, + *, file=None, line_offset=0): + # Omit the line number column entirely if we have no line number info + show_lineno = bool(linestarts) + if show_lineno: + maxlineno = max(linestarts.values()) + line_offset + if maxlineno >= 1000: + lineno_width = len(str(maxlineno)) + else: + lineno_width = 3 + else: + lineno_width = 0 + maxoffset = len(code) - 2 + if maxoffset >= 10000: + offset_width = len(str(maxoffset)) + else: + offset_width = 4 + for instr in _get_instructions_bytes(code, varnames, names, + constants, cells, linestarts, + line_offset=line_offset): + new_source_line = (show_lineno and + instr.starts_line is not None and + instr.offset > 0) + if new_source_line: + print(file=file) + is_current_instr = instr.offset == lasti + print(instr._disassemble(lineno_width, is_current_instr, offset_width), + file=file) + +def _disassemble_str(source, **kwargs): + """Compile the source string, then disassemble the code object.""" + _disassemble_recursive(_try_compile(source, ''), **kwargs) + +disco = disassemble # XXX For backwards compatibility + +def _unpack_opargs(code): + extended_arg = 0 + for i in range(0, len(code), 2): + op = code[i] + if op >= HAVE_ARGUMENT: + arg = code[i+1] | extended_arg + extended_arg = (arg << 8) if op == EXTENDED_ARG else 0 + else: + arg = None + extended_arg = 0 + yield (i, op, arg) + +def findlabels(code): + """Detect all offsets in a byte code which are jump targets. + + Return the list of offsets. + + """ + labels = [] + for offset, op, arg in _unpack_opargs(code): + if arg is not None: + if op in hasjrel: + label = offset + 2 + arg*2 + elif op in hasjabs: + label = arg*2 + else: + continue + if label not in labels: + labels.append(label) + return labels + +def findlinestarts(code): + """Find the offsets in a byte code which are start of lines in the source. + + Generate pairs (offset, lineno) + """ + lastline = None + for start, end, line in code.co_lines(): + if line is not None and line != lastline: + lastline = line + yield start, line + return + + +class Bytecode: + """The bytecode operations of a piece of code + + Instantiate this with a function, method, other compiled object, string of + code, or a code object (as returned by compile()). + + Iterating over this yields the bytecode operations as Instruction instances. + """ + def __init__(self, x, *, first_line=None, current_offset=None): + self.codeobj = co = _get_code_object(x) + if first_line is None: + self.first_line = co.co_firstlineno + self._line_offset = 0 + else: + self.first_line = first_line + self._line_offset = first_line - co.co_firstlineno + self._cell_names = co.co_cellvars + co.co_freevars + self._linestarts = dict(findlinestarts(co)) + self._original_object = x + self.current_offset = current_offset + + def __iter__(self): + co = self.codeobj + return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names, + co.co_consts, self._cell_names, + self._linestarts, + line_offset=self._line_offset) + + def __repr__(self): + return "{}({!r})".format(self.__class__.__name__, + self._original_object) + + @classmethod + def from_traceback(cls, tb): + """ Construct a Bytecode from the given traceback """ + while tb.tb_next: + tb = tb.tb_next + return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti) + + def info(self): + """Return formatted information about the code object.""" + return _format_code_info(self.codeobj) + + def dis(self): + """Return a formatted view of the bytecode operations.""" + co = self.codeobj + if self.current_offset is not None: + offset = self.current_offset + else: + offset = -1 + with io.StringIO() as output: + _disassemble_bytes(co.co_code, varnames=co.co_varnames, + names=co.co_names, constants=co.co_consts, + cells=self._cell_names, + linestarts=self._linestarts, + line_offset=self._line_offset, + file=output, + lasti=offset) + return output.getvalue() + + +def _test(): + """Simple test program to disassemble a file.""" + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('infile', type=argparse.FileType('rb'), nargs='?', default='-') + args = parser.parse_args() + with args.infile as infile: + source = infile.read() + code = compile(source, args.infile.name, "exec") + dis(code) + +if __name__ == "__main__": + _test() diff --git a/pllava/lib/python3.10/filecmp.py b/pllava/lib/python3.10/filecmp.py new file mode 100644 index 0000000000000000000000000000000000000000..70a4b23c982205d4e68d3aab5c7277fe1cbfdaae --- /dev/null +++ b/pllava/lib/python3.10/filecmp.py @@ -0,0 +1,313 @@ +"""Utilities for comparing files and directories. + +Classes: + dircmp + +Functions: + cmp(f1, f2, shallow=True) -> int + cmpfiles(a, b, common) -> ([], [], []) + clear_cache() + +""" + +import os +import stat +from itertools import filterfalse +from types import GenericAlias + +__all__ = ['clear_cache', 'cmp', 'dircmp', 'cmpfiles', 'DEFAULT_IGNORES'] + +_cache = {} +BUFSIZE = 8*1024 + +DEFAULT_IGNORES = [ + 'RCS', 'CVS', 'tags', '.git', '.hg', '.bzr', '_darcs', '__pycache__'] + +def clear_cache(): + """Clear the filecmp cache.""" + _cache.clear() + +def cmp(f1, f2, shallow=True): + """Compare two files. + + Arguments: + + f1 -- First file name + + f2 -- Second file name + + shallow -- treat files as identical if their stat signatures (type, size, + mtime) are identical. Otherwise, files are considered different + if their sizes or contents differ. [default: True] + + Return value: + + True if the files are the same, False otherwise. + + This function uses a cache for past comparisons and the results, + with cache entries invalidated if their stat information + changes. The cache may be cleared by calling clear_cache(). + + """ + + s1 = _sig(os.stat(f1)) + s2 = _sig(os.stat(f2)) + if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG: + return False + if shallow and s1 == s2: + return True + if s1[1] != s2[1]: + return False + + outcome = _cache.get((f1, f2, s1, s2)) + if outcome is None: + outcome = _do_cmp(f1, f2) + if len(_cache) > 100: # limit the maximum size of the cache + clear_cache() + _cache[f1, f2, s1, s2] = outcome + return outcome + +def _sig(st): + return (stat.S_IFMT(st.st_mode), + st.st_size, + st.st_mtime) + +def _do_cmp(f1, f2): + bufsize = BUFSIZE + with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2: + while True: + b1 = fp1.read(bufsize) + b2 = fp2.read(bufsize) + if b1 != b2: + return False + if not b1: + return True + +# Directory comparison class. +# +class dircmp: + """A class that manages the comparison of 2 directories. + + dircmp(a, b, ignore=None, hide=None) + A and B are directories. + IGNORE is a list of names to ignore, + defaults to DEFAULT_IGNORES. + HIDE is a list of names to hide, + defaults to [os.curdir, os.pardir]. + + High level usage: + x = dircmp(dir1, dir2) + x.report() -> prints a report on the differences between dir1 and dir2 + or + x.report_partial_closure() -> prints report on differences between dir1 + and dir2, and reports on common immediate subdirectories. + x.report_full_closure() -> like report_partial_closure, + but fully recursive. + + Attributes: + left_list, right_list: The files in dir1 and dir2, + filtered by hide and ignore. + common: a list of names in both dir1 and dir2. + left_only, right_only: names only in dir1, dir2. + common_dirs: subdirectories in both dir1 and dir2. + common_files: files in both dir1 and dir2. + common_funny: names in both dir1 and dir2 where the type differs between + dir1 and dir2, or the name is not stat-able. + same_files: list of identical files. + diff_files: list of filenames which differ. + funny_files: list of files which could not be compared. + subdirs: a dictionary of dircmp instances (or MyDirCmp instances if this + object is of type MyDirCmp, a subclass of dircmp), keyed by names + in common_dirs. + """ + + def __init__(self, a, b, ignore=None, hide=None): # Initialize + self.left = a + self.right = b + if hide is None: + self.hide = [os.curdir, os.pardir] # Names never to be shown + else: + self.hide = hide + if ignore is None: + self.ignore = DEFAULT_IGNORES + else: + self.ignore = ignore + + def phase0(self): # Compare everything except common subdirectories + self.left_list = _filter(os.listdir(self.left), + self.hide+self.ignore) + self.right_list = _filter(os.listdir(self.right), + self.hide+self.ignore) + self.left_list.sort() + self.right_list.sort() + + def phase1(self): # Compute common names + a = dict(zip(map(os.path.normcase, self.left_list), self.left_list)) + b = dict(zip(map(os.path.normcase, self.right_list), self.right_list)) + self.common = list(map(a.__getitem__, filter(b.__contains__, a))) + self.left_only = list(map(a.__getitem__, filterfalse(b.__contains__, a))) + self.right_only = list(map(b.__getitem__, filterfalse(a.__contains__, b))) + + def phase2(self): # Distinguish files, directories, funnies + self.common_dirs = [] + self.common_files = [] + self.common_funny = [] + + for x in self.common: + a_path = os.path.join(self.left, x) + b_path = os.path.join(self.right, x) + + ok = 1 + try: + a_stat = os.stat(a_path) + except OSError: + # print('Can\'t stat', a_path, ':', why.args[1]) + ok = 0 + try: + b_stat = os.stat(b_path) + except OSError: + # print('Can\'t stat', b_path, ':', why.args[1]) + ok = 0 + + if ok: + a_type = stat.S_IFMT(a_stat.st_mode) + b_type = stat.S_IFMT(b_stat.st_mode) + if a_type != b_type: + self.common_funny.append(x) + elif stat.S_ISDIR(a_type): + self.common_dirs.append(x) + elif stat.S_ISREG(a_type): + self.common_files.append(x) + else: + self.common_funny.append(x) + else: + self.common_funny.append(x) + + def phase3(self): # Find out differences between common files + xx = cmpfiles(self.left, self.right, self.common_files) + self.same_files, self.diff_files, self.funny_files = xx + + def phase4(self): # Find out differences between common subdirectories + # A new dircmp (or MyDirCmp if dircmp was subclassed) object is created + # for each common subdirectory, + # these are stored in a dictionary indexed by filename. + # The hide and ignore properties are inherited from the parent + self.subdirs = {} + for x in self.common_dirs: + a_x = os.path.join(self.left, x) + b_x = os.path.join(self.right, x) + self.subdirs[x] = self.__class__(a_x, b_x, self.ignore, self.hide) + + def phase4_closure(self): # Recursively call phase4() on subdirectories + self.phase4() + for sd in self.subdirs.values(): + sd.phase4_closure() + + def report(self): # Print a report on the differences between a and b + # Output format is purposely lousy + print('diff', self.left, self.right) + if self.left_only: + self.left_only.sort() + print('Only in', self.left, ':', self.left_only) + if self.right_only: + self.right_only.sort() + print('Only in', self.right, ':', self.right_only) + if self.same_files: + self.same_files.sort() + print('Identical files :', self.same_files) + if self.diff_files: + self.diff_files.sort() + print('Differing files :', self.diff_files) + if self.funny_files: + self.funny_files.sort() + print('Trouble with common files :', self.funny_files) + if self.common_dirs: + self.common_dirs.sort() + print('Common subdirectories :', self.common_dirs) + if self.common_funny: + self.common_funny.sort() + print('Common funny cases :', self.common_funny) + + def report_partial_closure(self): # Print reports on self and on subdirs + self.report() + for sd in self.subdirs.values(): + print() + sd.report() + + def report_full_closure(self): # Report on self and subdirs recursively + self.report() + for sd in self.subdirs.values(): + print() + sd.report_full_closure() + + methodmap = dict(subdirs=phase4, + same_files=phase3, diff_files=phase3, funny_files=phase3, + common_dirs = phase2, common_files=phase2, common_funny=phase2, + common=phase1, left_only=phase1, right_only=phase1, + left_list=phase0, right_list=phase0) + + def __getattr__(self, attr): + if attr not in self.methodmap: + raise AttributeError(attr) + self.methodmap[attr](self) + return getattr(self, attr) + + __class_getitem__ = classmethod(GenericAlias) + + +def cmpfiles(a, b, common, shallow=True): + """Compare common files in two directories. + + a, b -- directory names + common -- list of file names found in both directories + shallow -- if true, do comparison based solely on stat() information + + Returns a tuple of three lists: + files that compare equal + files that are different + filenames that aren't regular files. + + """ + res = ([], [], []) + for x in common: + ax = os.path.join(a, x) + bx = os.path.join(b, x) + res[_cmp(ax, bx, shallow)].append(x) + return res + + +# Compare two files. +# Return: +# 0 for equal +# 1 for different +# 2 for funny cases (can't stat, etc.) +# +def _cmp(a, b, sh, abs=abs, cmp=cmp): + try: + return not abs(cmp(a, b, sh)) + except OSError: + return 2 + + +# Return a copy with items that occur in skip removed. +# +def _filter(flist, skip): + return list(filterfalse(skip.__contains__, flist)) + + +# Demonstration and testing. +# +def demo(): + import sys + import getopt + options, args = getopt.getopt(sys.argv[1:], 'r') + if len(args) != 2: + raise getopt.GetoptError('need exactly two args', None) + dd = dircmp(args[0], args[1]) + if ('-r', '') in options: + dd.report_full_closure() + else: + dd.report() + +if __name__ == '__main__': + demo() diff --git a/pllava/lib/python3.10/fileinput.py b/pllava/lib/python3.10/fileinput.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd19906dcf5d276b9259d5e1147cde270f372de --- /dev/null +++ b/pllava/lib/python3.10/fileinput.py @@ -0,0 +1,462 @@ +"""Helper class to quickly write a loop over all standard input files. + +Typical use is: + + import fileinput + for line in fileinput.input(encoding="utf-8"): + process(line) + +This iterates over the lines of all files listed in sys.argv[1:], +defaulting to sys.stdin if the list is empty. If a filename is '-' it +is also replaced by sys.stdin and the optional arguments mode and +openhook are ignored. To specify an alternative list of filenames, +pass it as the argument to input(). A single file name is also allowed. + +Functions filename(), lineno() return the filename and cumulative line +number of the line that has just been read; filelineno() returns its +line number in the current file; isfirstline() returns true iff the +line just read is the first line of its file; isstdin() returns true +iff the line was read from sys.stdin. Function nextfile() closes the +current file so that the next iteration will read the first line from +the next file (if any); lines not read from the file will not count +towards the cumulative line count; the filename is not changed until +after the first line of the next file has been read. Function close() +closes the sequence. + +Before any lines have been read, filename() returns None and both line +numbers are zero; nextfile() has no effect. After all lines have been +read, filename() and the line number functions return the values +pertaining to the last line read; nextfile() has no effect. + +All files are opened in text mode by default, you can override this by +setting the mode parameter to input() or FileInput.__init__(). +If an I/O error occurs during opening or reading a file, the OSError +exception is raised. + +If sys.stdin is used more than once, the second and further use will +return no lines, except perhaps for interactive use, or if it has been +explicitly reset (e.g. using sys.stdin.seek(0)). + +Empty files are opened and immediately closed; the only time their +presence in the list of filenames is noticeable at all is when the +last file opened is empty. + +It is possible that the last line of a file doesn't end in a newline +character; otherwise lines are returned including the trailing +newline. + +Class FileInput is the implementation; its methods filename(), +lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close() +correspond to the functions in the module. In addition it has a +readline() method which returns the next input line, and a +__getitem__() method which implements the sequence behavior. The +sequence must be accessed in strictly sequential order; sequence +access and readline() cannot be mixed. + +Optional in-place filtering: if the keyword argument inplace=1 is +passed to input() or to the FileInput constructor, the file is moved +to a backup file and standard output is directed to the input file. +This makes it possible to write a filter that rewrites its input file +in place. If the keyword argument backup="." is also +given, it specifies the extension for the backup file, and the backup +file remains around; by default, the extension is ".bak" and it is +deleted when the output file is closed. In-place filtering is +disabled when standard input is read. XXX The current implementation +does not work for MS-DOS 8+3 filesystems. +""" + +import io +import sys, os +from types import GenericAlias + +__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno", + "fileno", "isfirstline", "isstdin", "FileInput", "hook_compressed", + "hook_encoded"] + +_state = None + +def input(files=None, inplace=False, backup="", *, mode="r", openhook=None, + encoding=None, errors=None): + """Return an instance of the FileInput class, which can be iterated. + + The parameters are passed to the constructor of the FileInput class. + The returned instance, in addition to being an iterator, + keeps global state for the functions of this module,. + """ + global _state + if _state and _state._file: + raise RuntimeError("input() already active") + _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook, + encoding=encoding, errors=errors) + return _state + +def close(): + """Close the sequence.""" + global _state + state = _state + _state = None + if state: + state.close() + +def nextfile(): + """ + Close the current file so that the next iteration will read the first + line from the next file (if any); lines not read from the file will + not count towards the cumulative line count. The filename is not + changed until after the first line of the next file has been read. + Before the first line has been read, this function has no effect; + it cannot be used to skip the first file. After the last line of the + last file has been read, this function has no effect. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.nextfile() + +def filename(): + """ + Return the name of the file currently being read. + Before the first line has been read, returns None. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filename() + +def lineno(): + """ + Return the cumulative line number of the line that has just been read. + Before the first line has been read, returns 0. After the last line + of the last file has been read, returns the line number of that line. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.lineno() + +def filelineno(): + """ + Return the line number in the current file. Before the first line + has been read, returns 0. After the last line of the last file has + been read, returns the line number of that line within the file. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filelineno() + +def fileno(): + """ + Return the file number of the current file. When no file is currently + opened, returns -1. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.fileno() + +def isfirstline(): + """ + Returns true the line just read is the first line of its file, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isfirstline() + +def isstdin(): + """ + Returns true if the last line was read from sys.stdin, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isstdin() + +class FileInput: + """FileInput([files[, inplace[, backup]]], *, mode=None, openhook=None) + + Class FileInput is the implementation of the module; its methods + filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(), + nextfile() and close() correspond to the functions of the same name + in the module. + In addition it has a readline() method which returns the next + input line, and a __getitem__() method which implements the + sequence behavior. The sequence must be accessed in strictly + sequential order; random access and readline() cannot be mixed. + """ + + def __init__(self, files=None, inplace=False, backup="", *, + mode="r", openhook=None, encoding=None, errors=None): + if isinstance(files, str): + files = (files,) + elif isinstance(files, os.PathLike): + files = (os.fspath(files), ) + else: + if files is None: + files = sys.argv[1:] + if not files: + files = ('-',) + else: + files = tuple(files) + self._files = files + self._inplace = inplace + self._backup = backup + self._savestdout = None + self._output = None + self._filename = None + self._startlineno = 0 + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = None + self._encoding = encoding + self._errors = errors + + # We can not use io.text_encoding() here because old openhook doesn't + # take encoding parameter. + if (sys.flags.warn_default_encoding and + "b" not in mode and encoding is None and openhook is None): + import warnings + warnings.warn("'encoding' argument not specified.", + EncodingWarning, 2) + + # restrict mode argument to reading modes + if mode not in ('r', 'rU', 'U', 'rb'): + raise ValueError("FileInput opening mode must be one of " + "'r', 'rU', 'U' and 'rb'") + if 'U' in mode: + import warnings + warnings.warn("'U' mode is deprecated", + DeprecationWarning, 2) + self._mode = mode + self._write_mode = mode.replace('r', 'w') if 'U' not in mode else 'w' + if openhook: + if inplace: + raise ValueError("FileInput cannot use an opening hook in inplace mode") + if not callable(openhook): + raise ValueError("FileInput openhook must be callable") + self._openhook = openhook + + def __del__(self): + self.close() + + def close(self): + try: + self.nextfile() + finally: + self._files = () + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __iter__(self): + return self + + def __next__(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + raise StopIteration + self.nextfile() + # repeat with next file + + def __getitem__(self, i): + import warnings + warnings.warn( + "Support for indexing FileInput objects is deprecated. " + "Use iterator protocol instead.", + DeprecationWarning, + stacklevel=2 + ) + if i != self.lineno(): + raise RuntimeError("accessing lines out of order") + try: + return self.__next__() + except StopIteration: + raise IndexError("end of input reached") + + def nextfile(self): + savestdout = self._savestdout + self._savestdout = None + if savestdout: + sys.stdout = savestdout + + output = self._output + self._output = None + try: + if output: + output.close() + finally: + file = self._file + self._file = None + try: + del self._readline # restore FileInput._readline + except AttributeError: + pass + try: + if file and not self._isstdin: + file.close() + finally: + backupfilename = self._backupfilename + self._backupfilename = None + if backupfilename and not self._backup: + try: os.unlink(backupfilename) + except OSError: pass + + self._isstdin = False + + def readline(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + return line + self.nextfile() + # repeat with next file + + def _readline(self): + if not self._files: + if 'b' in self._mode: + return b'' + else: + return '' + self._filename = self._files[0] + self._files = self._files[1:] + self._startlineno = self.lineno() + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = 0 + + # EncodingWarning is emitted in __init__() already + if "b" not in self._mode: + encoding = self._encoding or "locale" + else: + encoding = None + + if self._filename == '-': + self._filename = '' + if 'b' in self._mode: + self._file = getattr(sys.stdin, 'buffer', sys.stdin) + else: + self._file = sys.stdin + self._isstdin = True + else: + if self._inplace: + self._backupfilename = ( + os.fspath(self._filename) + (self._backup or ".bak")) + try: + os.unlink(self._backupfilename) + except OSError: + pass + # The next few lines may raise OSError + os.rename(self._filename, self._backupfilename) + self._file = open(self._backupfilename, self._mode, + encoding=encoding, errors=self._errors) + try: + perm = os.fstat(self._file.fileno()).st_mode + except OSError: + self._output = open(self._filename, self._write_mode, + encoding=encoding, errors=self._errors) + else: + mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC + if hasattr(os, 'O_BINARY'): + mode |= os.O_BINARY + + fd = os.open(self._filename, mode, perm) + self._output = os.fdopen(fd, self._write_mode, + encoding=encoding, errors=self._errors) + try: + os.chmod(self._filename, perm) + except OSError: + pass + self._savestdout = sys.stdout + sys.stdout = self._output + else: + # This may raise OSError + if self._openhook: + # Custom hooks made previous to Python 3.10 didn't have + # encoding argument + if self._encoding is None: + self._file = self._openhook(self._filename, self._mode) + else: + self._file = self._openhook( + self._filename, self._mode, encoding=self._encoding, errors=self._errors) + else: + self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors) + self._readline = self._file.readline # hide FileInput._readline + return self._readline() + + def filename(self): + return self._filename + + def lineno(self): + return self._startlineno + self._filelineno + + def filelineno(self): + return self._filelineno + + def fileno(self): + if self._file: + try: + return self._file.fileno() + except ValueError: + return -1 + else: + return -1 + + def isfirstline(self): + return self._filelineno == 1 + + def isstdin(self): + return self._isstdin + + __class_getitem__ = classmethod(GenericAlias) + + +def hook_compressed(filename, mode, *, encoding=None, errors=None): + if encoding is None and "b" not in mode: # EncodingWarning is emitted in FileInput() already. + encoding = "locale" + ext = os.path.splitext(filename)[1] + if ext == '.gz': + import gzip + stream = gzip.open(filename, mode) + elif ext == '.bz2': + import bz2 + stream = bz2.BZ2File(filename, mode) + else: + return open(filename, mode, encoding=encoding, errors=errors) + + # gzip and bz2 are binary mode by default. + if "b" not in mode: + stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors) + return stream + + +def hook_encoded(encoding, errors=None): + def openhook(filename, mode): + return open(filename, mode, encoding=encoding, errors=errors) + return openhook + + +def _test(): + import getopt + inplace = False + backup = False + opts, args = getopt.getopt(sys.argv[1:], "ib:") + for o, a in opts: + if o == '-i': inplace = True + if o == '-b': backup = a + for line in input(args, inplace=inplace, backup=backup): + if line[-1:] == '\n': line = line[:-1] + if line[-1:] == '\r': line = line[:-1] + print("%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(), + isfirstline() and "*" or "", line)) + print("%d: %s[%d]" % (lineno(), filename(), filelineno())) + +if __name__ == '__main__': + _test() diff --git a/pllava/lib/python3.10/fractions.py b/pllava/lib/python3.10/fractions.py new file mode 100644 index 0000000000000000000000000000000000000000..96047beb4546a5384e23e2963f45abe5186dd9cc --- /dev/null +++ b/pllava/lib/python3.10/fractions.py @@ -0,0 +1,748 @@ +# Originally contributed by Sjoerd Mullender. +# Significantly modified by Jeffrey Yasskin . + +"""Fraction, infinite-precision, real numbers.""" + +from decimal import Decimal +import math +import numbers +import operator +import re +import sys + +__all__ = ['Fraction'] + + +# Constants related to the hash implementation; hash(x) is based +# on the reduction of x modulo the prime _PyHASH_MODULUS. +_PyHASH_MODULUS = sys.hash_info.modulus +# Value to be used for rationals that reduce to infinity modulo +# _PyHASH_MODULUS. +_PyHASH_INF = sys.hash_info.inf + +_RATIONAL_FORMAT = re.compile(r""" + \A\s* # optional whitespace at the start, then + (?P[-+]?) # an optional sign, then + (?=\d|\.\d) # lookahead for digit or .digit + (?P\d*) # numerator (possibly empty) + (?: # followed by + (?:/(?P\d+))? # an optional denominator + | # or + (?:\.(?P\d*))? # an optional fractional part + (?:E(?P[-+]?\d+))? # and optional exponent + ) + \s*\Z # and optional whitespace to finish +""", re.VERBOSE | re.IGNORECASE) + + +class Fraction(numbers.Rational): + """This class implements rational numbers. + + In the two-argument form of the constructor, Fraction(8, 6) will + produce a rational number equivalent to 4/3. Both arguments must + be Rational. The numerator defaults to 0 and the denominator + defaults to 1 so that Fraction(3) == 3 and Fraction() == 0. + + Fractions can also be constructed from: + + - numeric strings similar to those accepted by the + float constructor (for example, '-2.3' or '1e10') + + - strings of the form '123/456' + + - float and Decimal instances + + - other Rational instances (including integers) + + """ + + __slots__ = ('_numerator', '_denominator') + + # We're immutable, so use __new__ not __init__ + def __new__(cls, numerator=0, denominator=None, *, _normalize=True): + """Constructs a Rational. + + Takes a string like '3/2' or '1.5', another Rational instance, a + numerator/denominator pair, or a float. + + Examples + -------- + + >>> Fraction(10, -8) + Fraction(-5, 4) + >>> Fraction(Fraction(1, 7), 5) + Fraction(1, 35) + >>> Fraction(Fraction(1, 7), Fraction(2, 3)) + Fraction(3, 14) + >>> Fraction('314') + Fraction(314, 1) + >>> Fraction('-35/4') + Fraction(-35, 4) + >>> Fraction('3.1415') # conversion from numeric string + Fraction(6283, 2000) + >>> Fraction('-47e-2') # string may include a decimal exponent + Fraction(-47, 100) + >>> Fraction(1.47) # direct construction from float (exact conversion) + Fraction(6620291452234629, 4503599627370496) + >>> Fraction(2.25) + Fraction(9, 4) + >>> Fraction(Decimal('1.47')) + Fraction(147, 100) + + """ + self = super(Fraction, cls).__new__(cls) + + if denominator is None: + if type(numerator) is int: + self._numerator = numerator + self._denominator = 1 + return self + + elif isinstance(numerator, numbers.Rational): + self._numerator = numerator.numerator + self._denominator = numerator.denominator + return self + + elif isinstance(numerator, (float, Decimal)): + # Exact conversion + self._numerator, self._denominator = numerator.as_integer_ratio() + return self + + elif isinstance(numerator, str): + # Handle construction from strings. + m = _RATIONAL_FORMAT.match(numerator) + if m is None: + raise ValueError('Invalid literal for Fraction: %r' % + numerator) + numerator = int(m.group('num') or '0') + denom = m.group('denom') + if denom: + denominator = int(denom) + else: + denominator = 1 + decimal = m.group('decimal') + if decimal: + scale = 10**len(decimal) + numerator = numerator * scale + int(decimal) + denominator *= scale + exp = m.group('exp') + if exp: + exp = int(exp) + if exp >= 0: + numerator *= 10**exp + else: + denominator *= 10**-exp + if m.group('sign') == '-': + numerator = -numerator + + else: + raise TypeError("argument should be a string " + "or a Rational instance") + + elif type(numerator) is int is type(denominator): + pass # *very* normal case + + elif (isinstance(numerator, numbers.Rational) and + isinstance(denominator, numbers.Rational)): + numerator, denominator = ( + numerator.numerator * denominator.denominator, + denominator.numerator * numerator.denominator + ) + else: + raise TypeError("both arguments should be " + "Rational instances") + + if denominator == 0: + raise ZeroDivisionError('Fraction(%s, 0)' % numerator) + if _normalize: + g = math.gcd(numerator, denominator) + if denominator < 0: + g = -g + numerator //= g + denominator //= g + self._numerator = numerator + self._denominator = denominator + return self + + @classmethod + def from_float(cls, f): + """Converts a finite float to a rational number, exactly. + + Beware that Fraction.from_float(0.3) != Fraction(3, 10). + + """ + if isinstance(f, numbers.Integral): + return cls(f) + elif not isinstance(f, float): + raise TypeError("%s.from_float() only takes floats, not %r (%s)" % + (cls.__name__, f, type(f).__name__)) + return cls(*f.as_integer_ratio()) + + @classmethod + def from_decimal(cls, dec): + """Converts a finite Decimal instance to a rational number, exactly.""" + from decimal import Decimal + if isinstance(dec, numbers.Integral): + dec = Decimal(int(dec)) + elif not isinstance(dec, Decimal): + raise TypeError( + "%s.from_decimal() only takes Decimals, not %r (%s)" % + (cls.__name__, dec, type(dec).__name__)) + return cls(*dec.as_integer_ratio()) + + def as_integer_ratio(self): + """Return the integer ratio as a tuple. + + Return a tuple of two integers, whose ratio is equal to the + Fraction and with a positive denominator. + """ + return (self._numerator, self._denominator) + + def limit_denominator(self, max_denominator=1000000): + """Closest Fraction to self with denominator at most max_denominator. + + >>> Fraction('3.141592653589793').limit_denominator(10) + Fraction(22, 7) + >>> Fraction('3.141592653589793').limit_denominator(100) + Fraction(311, 99) + >>> Fraction(4321, 8765).limit_denominator(10000) + Fraction(4321, 8765) + + """ + # Algorithm notes: For any real number x, define a *best upper + # approximation* to x to be a rational number p/q such that: + # + # (1) p/q >= x, and + # (2) if p/q > r/s >= x then s > q, for any rational r/s. + # + # Define *best lower approximation* similarly. Then it can be + # proved that a rational number is a best upper or lower + # approximation to x if, and only if, it is a convergent or + # semiconvergent of the (unique shortest) continued fraction + # associated to x. + # + # To find a best rational approximation with denominator <= M, + # we find the best upper and lower approximations with + # denominator <= M and take whichever of these is closer to x. + # In the event of a tie, the bound with smaller denominator is + # chosen. If both denominators are equal (which can happen + # only when max_denominator == 1 and self is midway between + # two integers) the lower bound---i.e., the floor of self, is + # taken. + + if max_denominator < 1: + raise ValueError("max_denominator should be at least 1") + if self._denominator <= max_denominator: + return Fraction(self) + + p0, q0, p1, q1 = 0, 1, 1, 0 + n, d = self._numerator, self._denominator + while True: + a = n//d + q2 = q0+a*q1 + if q2 > max_denominator: + break + p0, q0, p1, q1 = p1, q1, p0+a*p1, q2 + n, d = d, n-a*d + + k = (max_denominator-q0)//q1 + bound1 = Fraction(p0+k*p1, q0+k*q1) + bound2 = Fraction(p1, q1) + if abs(bound2 - self) <= abs(bound1-self): + return bound2 + else: + return bound1 + + @property + def numerator(a): + return a._numerator + + @property + def denominator(a): + return a._denominator + + def __repr__(self): + """repr(self)""" + return '%s(%s, %s)' % (self.__class__.__name__, + self._numerator, self._denominator) + + def __str__(self): + """str(self)""" + if self._denominator == 1: + return str(self._numerator) + else: + return '%s/%s' % (self._numerator, self._denominator) + + def _operator_fallbacks(monomorphic_operator, fallback_operator): + """Generates forward and reverse operators given a purely-rational + operator and a function from the operator module. + + Use this like: + __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op) + + In general, we want to implement the arithmetic operations so + that mixed-mode operations either call an implementation whose + author knew about the types of both arguments, or convert both + to the nearest built in type and do the operation there. In + Fraction, that means that we define __add__ and __radd__ as: + + def __add__(self, other): + # Both types have numerators/denominator attributes, + # so do the operation directly + if isinstance(other, (int, Fraction)): + return Fraction(self.numerator * other.denominator + + other.numerator * self.denominator, + self.denominator * other.denominator) + # float and complex don't have those operations, but we + # know about those types, so special case them. + elif isinstance(other, float): + return float(self) + other + elif isinstance(other, complex): + return complex(self) + other + # Let the other type take over. + return NotImplemented + + def __radd__(self, other): + # radd handles more types than add because there's + # nothing left to fall back to. + if isinstance(other, numbers.Rational): + return Fraction(self.numerator * other.denominator + + other.numerator * self.denominator, + self.denominator * other.denominator) + elif isinstance(other, Real): + return float(other) + float(self) + elif isinstance(other, Complex): + return complex(other) + complex(self) + return NotImplemented + + + There are 5 different cases for a mixed-type addition on + Fraction. I'll refer to all of the above code that doesn't + refer to Fraction, float, or complex as "boilerplate". 'r' + will be an instance of Fraction, which is a subtype of + Rational (r : Fraction <: Rational), and b : B <: + Complex. The first three involve 'r + b': + + 1. If B <: Fraction, int, float, or complex, we handle + that specially, and all is well. + 2. If Fraction falls back to the boilerplate code, and it + were to return a value from __add__, we'd miss the + possibility that B defines a more intelligent __radd__, + so the boilerplate should return NotImplemented from + __add__. In particular, we don't handle Rational + here, even though we could get an exact answer, in case + the other type wants to do something special. + 3. If B <: Fraction, Python tries B.__radd__ before + Fraction.__add__. This is ok, because it was + implemented with knowledge of Fraction, so it can + handle those instances before delegating to Real or + Complex. + + The next two situations describe 'b + r'. We assume that b + didn't know about Fraction in its implementation, and that it + uses similar boilerplate code: + + 4. If B <: Rational, then __radd_ converts both to the + builtin rational type (hey look, that's us) and + proceeds. + 5. Otherwise, __radd__ tries to find the nearest common + base ABC, and fall back to its builtin type. Since this + class doesn't subclass a concrete type, there's no + implementation to fall back to, so we need to try as + hard as possible to return an actual value, or the user + will get a TypeError. + + """ + def forward(a, b): + if isinstance(b, (int, Fraction)): + return monomorphic_operator(a, b) + elif isinstance(b, float): + return fallback_operator(float(a), b) + elif isinstance(b, complex): + return fallback_operator(complex(a), b) + else: + return NotImplemented + forward.__name__ = '__' + fallback_operator.__name__ + '__' + forward.__doc__ = monomorphic_operator.__doc__ + + def reverse(b, a): + if isinstance(a, numbers.Rational): + # Includes ints. + return monomorphic_operator(a, b) + elif isinstance(a, numbers.Real): + return fallback_operator(float(a), float(b)) + elif isinstance(a, numbers.Complex): + return fallback_operator(complex(a), complex(b)) + else: + return NotImplemented + reverse.__name__ = '__r' + fallback_operator.__name__ + '__' + reverse.__doc__ = monomorphic_operator.__doc__ + + return forward, reverse + + # Rational arithmetic algorithms: Knuth, TAOCP, Volume 2, 4.5.1. + # + # Assume input fractions a and b are normalized. + # + # 1) Consider addition/subtraction. + # + # Let g = gcd(da, db). Then + # + # na nb na*db ± nb*da + # a ± b == -- ± -- == ------------- == + # da db da*db + # + # na*(db//g) ± nb*(da//g) t + # == ----------------------- == - + # (da*db)//g d + # + # Now, if g > 1, we're working with smaller integers. + # + # Note, that t, (da//g) and (db//g) are pairwise coprime. + # + # Indeed, (da//g) and (db//g) share no common factors (they were + # removed) and da is coprime with na (since input fractions are + # normalized), hence (da//g) and na are coprime. By symmetry, + # (db//g) and nb are coprime too. Then, + # + # gcd(t, da//g) == gcd(na*(db//g), da//g) == 1 + # gcd(t, db//g) == gcd(nb*(da//g), db//g) == 1 + # + # Above allows us optimize reduction of the result to lowest + # terms. Indeed, + # + # g2 = gcd(t, d) == gcd(t, (da//g)*(db//g)*g) == gcd(t, g) + # + # t//g2 t//g2 + # a ± b == ----------------------- == ---------------- + # (da//g)*(db//g)*(g//g2) (da//g)*(db//g2) + # + # is a normalized fraction. This is useful because the unnormalized + # denominator d could be much larger than g. + # + # We should special-case g == 1 (and g2 == 1), since 60.8% of + # randomly-chosen integers are coprime: + # https://en.wikipedia.org/wiki/Coprime_integers#Probability_of_coprimality + # Note, that g2 == 1 always for fractions, obtained from floats: here + # g is a power of 2 and the unnormalized numerator t is an odd integer. + # + # 2) Consider multiplication + # + # Let g1 = gcd(na, db) and g2 = gcd(nb, da), then + # + # na*nb na*nb (na//g1)*(nb//g2) + # a*b == ----- == ----- == ----------------- + # da*db db*da (db//g1)*(da//g2) + # + # Note, that after divisions we're multiplying smaller integers. + # + # Also, the resulting fraction is normalized, because each of + # two factors in the numerator is coprime to each of the two factors + # in the denominator. + # + # Indeed, pick (na//g1). It's coprime with (da//g2), because input + # fractions are normalized. It's also coprime with (db//g1), because + # common factors are removed by g1 == gcd(na, db). + # + # As for addition/subtraction, we should special-case g1 == 1 + # and g2 == 1 for same reason. That happens also for multiplying + # rationals, obtained from floats. + + def _add(a, b): + """a + b""" + na, da = a.numerator, a.denominator + nb, db = b.numerator, b.denominator + g = math.gcd(da, db) + if g == 1: + return Fraction(na * db + da * nb, da * db, _normalize=False) + s = da // g + t = na * (db // g) + nb * s + g2 = math.gcd(t, g) + if g2 == 1: + return Fraction(t, s * db, _normalize=False) + return Fraction(t // g2, s * (db // g2), _normalize=False) + + __add__, __radd__ = _operator_fallbacks(_add, operator.add) + + def _sub(a, b): + """a - b""" + na, da = a.numerator, a.denominator + nb, db = b.numerator, b.denominator + g = math.gcd(da, db) + if g == 1: + return Fraction(na * db - da * nb, da * db, _normalize=False) + s = da // g + t = na * (db // g) - nb * s + g2 = math.gcd(t, g) + if g2 == 1: + return Fraction(t, s * db, _normalize=False) + return Fraction(t // g2, s * (db // g2), _normalize=False) + + __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) + + def _mul(a, b): + """a * b""" + na, da = a.numerator, a.denominator + nb, db = b.numerator, b.denominator + g1 = math.gcd(na, db) + if g1 > 1: + na //= g1 + db //= g1 + g2 = math.gcd(nb, da) + if g2 > 1: + nb //= g2 + da //= g2 + return Fraction(na * nb, db * da, _normalize=False) + + __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) + + def _div(a, b): + """a / b""" + # Same as _mul(), with inversed b. + na, da = a.numerator, a.denominator + nb, db = b.numerator, b.denominator + g1 = math.gcd(na, nb) + if g1 > 1: + na //= g1 + nb //= g1 + g2 = math.gcd(db, da) + if g2 > 1: + da //= g2 + db //= g2 + n, d = na * db, nb * da + if d < 0: + n, d = -n, -d + return Fraction(n, d, _normalize=False) + + __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) + + def _floordiv(a, b): + """a // b""" + return (a.numerator * b.denominator) // (a.denominator * b.numerator) + + __floordiv__, __rfloordiv__ = _operator_fallbacks(_floordiv, operator.floordiv) + + def _divmod(a, b): + """(a // b, a % b)""" + da, db = a.denominator, b.denominator + div, n_mod = divmod(a.numerator * db, da * b.numerator) + return div, Fraction(n_mod, da * db) + + __divmod__, __rdivmod__ = _operator_fallbacks(_divmod, divmod) + + def _mod(a, b): + """a % b""" + da, db = a.denominator, b.denominator + return Fraction((a.numerator * db) % (b.numerator * da), da * db) + + __mod__, __rmod__ = _operator_fallbacks(_mod, operator.mod) + + def __pow__(a, b): + """a ** b + + If b is not an integer, the result will be a float or complex + since roots are generally irrational. If b is an integer, the + result will be rational. + + """ + if isinstance(b, numbers.Rational): + if b.denominator == 1: + power = b.numerator + if power >= 0: + return Fraction(a._numerator ** power, + a._denominator ** power, + _normalize=False) + elif a._numerator >= 0: + return Fraction(a._denominator ** -power, + a._numerator ** -power, + _normalize=False) + else: + return Fraction((-a._denominator) ** -power, + (-a._numerator) ** -power, + _normalize=False) + else: + # A fractional power will generally produce an + # irrational number. + return float(a) ** float(b) + else: + return float(a) ** b + + def __rpow__(b, a): + """a ** b""" + if b._denominator == 1 and b._numerator >= 0: + # If a is an int, keep it that way if possible. + return a ** b._numerator + + if isinstance(a, numbers.Rational): + return Fraction(a.numerator, a.denominator) ** b + + if b._denominator == 1: + return a ** b._numerator + + return a ** float(b) + + def __pos__(a): + """+a: Coerces a subclass instance to Fraction""" + return Fraction(a._numerator, a._denominator, _normalize=False) + + def __neg__(a): + """-a""" + return Fraction(-a._numerator, a._denominator, _normalize=False) + + def __abs__(a): + """abs(a)""" + return Fraction(abs(a._numerator), a._denominator, _normalize=False) + + def __trunc__(a): + """trunc(a)""" + if a._numerator < 0: + return -(-a._numerator // a._denominator) + else: + return a._numerator // a._denominator + + def __floor__(a): + """math.floor(a)""" + return a.numerator // a.denominator + + def __ceil__(a): + """math.ceil(a)""" + # The negations cleverly convince floordiv to return the ceiling. + return -(-a.numerator // a.denominator) + + def __round__(self, ndigits=None): + """round(self, ndigits) + + Rounds half toward even. + """ + if ndigits is None: + floor, remainder = divmod(self.numerator, self.denominator) + if remainder * 2 < self.denominator: + return floor + elif remainder * 2 > self.denominator: + return floor + 1 + # Deal with the half case: + elif floor % 2 == 0: + return floor + else: + return floor + 1 + shift = 10**abs(ndigits) + # See _operator_fallbacks.forward to check that the results of + # these operations will always be Fraction and therefore have + # round(). + if ndigits > 0: + return Fraction(round(self * shift), shift) + else: + return Fraction(round(self / shift) * shift) + + def __hash__(self): + """hash(self)""" + + # To make sure that the hash of a Fraction agrees with the hash + # of a numerically equal integer, float or Decimal instance, we + # follow the rules for numeric hashes outlined in the + # documentation. (See library docs, 'Built-in Types'). + + try: + dinv = pow(self._denominator, -1, _PyHASH_MODULUS) + except ValueError: + # ValueError means there is no modular inverse. + hash_ = _PyHASH_INF + else: + # The general algorithm now specifies that the absolute value of + # the hash is + # (|N| * dinv) % P + # where N is self._numerator and P is _PyHASH_MODULUS. That's + # optimized here in two ways: first, for a non-negative int i, + # hash(i) == i % P, but the int hash implementation doesn't need + # to divide, and is faster than doing % P explicitly. So we do + # hash(|N| * dinv) + # instead. Second, N is unbounded, so its product with dinv may + # be arbitrarily expensive to compute. The final answer is the + # same if we use the bounded |N| % P instead, which can again + # be done with an int hash() call. If 0 <= i < P, hash(i) == i, + # so this nested hash() call wastes a bit of time making a + # redundant copy when |N| < P, but can save an arbitrarily large + # amount of computation for large |N|. + hash_ = hash(hash(abs(self._numerator)) * dinv) + result = hash_ if self._numerator >= 0 else -hash_ + return -2 if result == -1 else result + + def __eq__(a, b): + """a == b""" + if type(b) is int: + return a._numerator == b and a._denominator == 1 + if isinstance(b, numbers.Rational): + return (a._numerator == b.numerator and + a._denominator == b.denominator) + if isinstance(b, numbers.Complex) and b.imag == 0: + b = b.real + if isinstance(b, float): + if math.isnan(b) or math.isinf(b): + # comparisons with an infinity or nan should behave in + # the same way for any finite a, so treat a as zero. + return 0.0 == b + else: + return a == a.from_float(b) + else: + # Since a doesn't know how to compare with b, let's give b + # a chance to compare itself with a. + return NotImplemented + + def _richcmp(self, other, op): + """Helper for comparison operators, for internal use only. + + Implement comparison between a Rational instance `self`, and + either another Rational instance or a float `other`. If + `other` is not a Rational instance or a float, return + NotImplemented. `op` should be one of the six standard + comparison operators. + + """ + # convert other to a Rational instance where reasonable. + if isinstance(other, numbers.Rational): + return op(self._numerator * other.denominator, + self._denominator * other.numerator) + if isinstance(other, float): + if math.isnan(other) or math.isinf(other): + return op(0.0, other) + else: + return op(self, self.from_float(other)) + else: + return NotImplemented + + def __lt__(a, b): + """a < b""" + return a._richcmp(b, operator.lt) + + def __gt__(a, b): + """a > b""" + return a._richcmp(b, operator.gt) + + def __le__(a, b): + """a <= b""" + return a._richcmp(b, operator.le) + + def __ge__(a, b): + """a >= b""" + return a._richcmp(b, operator.ge) + + def __bool__(a): + """a != 0""" + # bpo-39274: Use bool() because (a._numerator != 0) can return an + # object which is not a bool. + return bool(a._numerator) + + # support for pickling, copy, and deepcopy + + def __reduce__(self): + return (self.__class__, (str(self),)) + + def __copy__(self): + if type(self) == Fraction: + return self # I'm immutable; therefore I am my own clone + return self.__class__(self._numerator, self._denominator) + + def __deepcopy__(self, memo): + if type(self) == Fraction: + return self # My components are also immutable + return self.__class__(self._numerator, self._denominator) diff --git a/pllava/lib/python3.10/functools.py b/pllava/lib/python3.10/functools.py new file mode 100644 index 0000000000000000000000000000000000000000..305ceb450a71c4a7bb8112a353cc1c94cd8f7c59 --- /dev/null +++ b/pllava/lib/python3.10/functools.py @@ -0,0 +1,992 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan , +# Raymond Hettinger , +# and Łukasz Langa . +# Copyright (C) 2006-2013 Python Software Foundation. +# See C source code for _functools credits/copyright + +__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', + 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce', + 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod', + 'cached_property'] + +from abc import get_cache_token +from collections import namedtuple +# import types, weakref # Deferred to single_dispatch() +from reprlib import recursive_repr +from _thread import RLock +from types import GenericAlias + + +################################################################################ +### update_wrapper() and wraps() decorator +################################################################################ + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', + '__annotations__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Issue #17482: set __wrapped__ last so we don't inadvertently copy it + # from the wrapped function when updating __dict__ + wrapper.__wrapped__ = wrapped + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + + +################################################################################ +### total_ordering class decorator +################################################################################ + +# The total ordering functions all invoke the root magic method directly +# rather than using the corresponding operator. This avoids possible +# infinite recursion that could occur when the operator dispatch logic +# detects a NotImplemented result and then calls a reflected method. + +def _gt_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _le_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return op_result or self == other + +def _ge_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a < b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _ge_from_le(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _lt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _gt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a <= b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _lt_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _ge_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return op_result or self == other + +def _le_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a > b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _le_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _gt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _lt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a >= b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +_convert = { + '__lt__': [('__gt__', _gt_from_lt), + ('__le__', _le_from_lt), + ('__ge__', _ge_from_lt)], + '__le__': [('__ge__', _ge_from_le), + ('__lt__', _lt_from_le), + ('__gt__', _gt_from_le)], + '__gt__': [('__lt__', _lt_from_gt), + ('__ge__', _ge_from_gt), + ('__le__', _le_from_gt)], + '__ge__': [('__le__', _le_from_ge), + ('__gt__', _gt_from_ge), + ('__lt__', _lt_from_ge)] +} + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + # Find user-defined comparisons (not those inherited from object). + roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in _convert[root]: + if opname not in roots: + opfunc.__name__ = opname + setattr(cls, opname, opfunc) + return cls + + +################################################################################ +### cmp_to_key() function converter +################################################################################ + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + __hash__ = None + return K + +try: + from _functools import cmp_to_key +except ImportError: + pass + + +################################################################################ +### reduce() sequence to a single item +################################################################################ + +_initial_missing = object() + +def reduce(function, sequence, initial=_initial_missing): + """ + reduce(function, iterable[, initial]) -> value + + Apply a function of two arguments cumulatively to the items of a sequence + or iterable, from left to right, so as to reduce the iterable to a single + value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates + ((((1+2)+3)+4)+5). If initial is present, it is placed before the items + of the iterable in the calculation, and serves as a default when the + iterable is empty. + """ + + it = iter(sequence) + + if initial is _initial_missing: + try: + value = next(it) + except StopIteration: + raise TypeError( + "reduce() of empty iterable with no initial value") from None + else: + value = initial + + for element in it: + value = function(value, element) + + return value + +try: + from _functools import reduce +except ImportError: + pass + + +################################################################################ +### partial() argument application +################################################################################ + +# Purely functional, no descriptor behaviour +class partial: + """New function with partial application of the given arguments + and keywords. + """ + + __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" + + def __new__(cls, func, /, *args, **keywords): + if not callable(func): + raise TypeError("the first argument must be callable") + + if hasattr(func, "func"): + args = func.args + args + keywords = {**func.keywords, **keywords} + func = func.func + + self = super(partial, cls).__new__(cls) + + self.func = func + self.args = args + self.keywords = keywords + return self + + def __call__(self, /, *args, **keywords): + keywords = {**self.keywords, **keywords} + return self.func(*self.args, *args, **keywords) + + @recursive_repr() + def __repr__(self): + qualname = type(self).__qualname__ + args = [repr(self.func)] + args.extend(repr(x) for x in self.args) + args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) + if type(self).__module__ == "functools": + return f"functools.{qualname}({', '.join(args)})" + return f"{qualname}({', '.join(args)})" + + def __reduce__(self): + return type(self), (self.func,), (self.func, self.args, + self.keywords or None, self.__dict__ or None) + + def __setstate__(self, state): + if not isinstance(state, tuple): + raise TypeError("argument to __setstate__ must be a tuple") + if len(state) != 4: + raise TypeError(f"expected 4 items in state, got {len(state)}") + func, args, kwds, namespace = state + if (not callable(func) or not isinstance(args, tuple) or + (kwds is not None and not isinstance(kwds, dict)) or + (namespace is not None and not isinstance(namespace, dict))): + raise TypeError("invalid partial state") + + args = tuple(args) # just in case it's a subclass + if kwds is None: + kwds = {} + elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? + kwds = dict(kwds) + if namespace is None: + namespace = {} + + self.__dict__ = namespace + self.func = func + self.args = args + self.keywords = kwds + +try: + from _functools import partial +except ImportError: + pass + +# Descriptor version +class partialmethod(object): + """Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func, /, *args, **keywords): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError("{!r} is not callable or a descriptor" + .format(func)) + + # func could be a descriptor like classmethod which isn't callable, + # so we can't inherit from partial (it verifies func is callable) + if isinstance(func, partialmethod): + # flattening is mandatory in order to place cls/self before all + # other arguments + # it's also more efficient since only one function will be called + self.func = func.func + self.args = func.args + args + self.keywords = {**func.keywords, **keywords} + else: + self.func = func + self.args = args + self.keywords = keywords + + def __repr__(self): + args = ", ".join(map(repr, self.args)) + keywords = ", ".join("{}={!r}".format(k, v) + for k, v in self.keywords.items()) + format_string = "{module}.{cls}({func}, {args}, {keywords})" + return format_string.format(module=self.__class__.__module__, + cls=self.__class__.__qualname__, + func=self.func, + args=args, + keywords=keywords) + + def _make_unbound_method(self): + def _method(cls_or_self, /, *args, **keywords): + keywords = {**self.keywords, **keywords} + return self.func(cls_or_self, *self.args, *args, **keywords) + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method._partialmethod = self + return _method + + def __get__(self, obj, cls=None): + get = getattr(self.func, "__get__", None) + result = None + if get is not None: + new_func = get(obj, cls) + if new_func is not self.func: + # Assume __get__ returning something new indicates the + # creation of an appropriate callable + result = partial(new_func, *self.args, **self.keywords) + try: + result.__self__ = new_func.__self__ + except AttributeError: + pass + if result is None: + # If the underlying descriptor didn't do anything, treat this + # like an instance method + result = self._make_unbound_method().__get__(obj, cls) + return result + + @property + def __isabstractmethod__(self): + return getattr(self.func, "__isabstractmethod__", False) + + __class_getitem__ = classmethod(GenericAlias) + + +# Helper functions + +def _unwrap_partial(func): + while isinstance(func, partial): + func = func.func + return func + +################################################################################ +### LRU Cache function decorator +################################################################################ + +_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + +class _HashedSeq(list): + """ This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + +def _make_key(args, kwds, typed, + kwd_mark = (object(),), + fasttypes = {int, str}, + tuple=tuple, type=type, len=len): + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + # All of code below relies on kwds preserving the order input by the user. + # Formerly, we sorted() the kwds before looping. The new way is *much* + # faster; however, it means that f(x=1, y=2) will now be treated as a + # distinct call from f(y=2, x=1) which will be cached separately. + key = args + if kwds: + key += kwd_mark + for item in kwds.items(): + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for v in kwds.values()) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + +def lru_cache(maxsize=128, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + if isinstance(maxsize, int): + # Negative maxsize is treated as 0 + if maxsize < 0: + maxsize = 0 + elif callable(maxsize) and isinstance(typed, bool): + # The user_function was passed in directly via the maxsize argument + user_function, maxsize = maxsize, 128 + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} + return update_wrapper(wrapper, user_function) + elif maxsize is not None: + raise TypeError( + 'Expected first argument to be an integer, a callable, or None') + + def decorating_function(user_function): + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} + return update_wrapper(wrapper, user_function) + + return decorating_function + +def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update + nonlocal misses + misses += 1 + result = user_function(*args, **kwds) + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + cache[key] = result + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + oldresult = root[RESULT] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, cache_len()) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + +try: + from _functools import _lru_cache_wrapper +except ImportError: + pass + + +################################################################################ +### cache -- simplified access to the infinity cache +################################################################################ + +def cache(user_function, /): + 'Simple lightweight unbounded cache. Sometimes called "memoize".' + return lru_cache(maxsize=None)(user_function) + + +################################################################################ +### singledispatch() - single-dispatch generic function decorator +################################################################################ + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from https://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if candidate is None: + raise RuntimeError("Inconsistent hierarchy") + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + +def _c3_mro(cls, abcs=None): + """Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + """ + for i, base in enumerate(reversed(cls.__bases__)): + if hasattr(base, '__abstractmethods__'): + boundary = len(cls.__bases__) - i + break # Bases up to the last explicit ABC are considered first. + else: + boundary = 0 + abcs = list(abcs) if abcs else [] + explicit_bases = list(cls.__bases__[:boundary]) + abstract_bases = [] + other_bases = list(cls.__bases__[boundary:]) + for base in abcs: + if issubclass(cls, base) and not any( + issubclass(b, base) for b in cls.__bases__ + ): + # If *cls* is the class that introduces behaviour described by + # an ABC *base*, insert said ABC to its MRO. + abstract_bases.append(base) + for base in abstract_bases: + abcs.remove(base) + explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] + abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] + other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] + return _c3_merge( + [[cls]] + + explicit_c3_mros + abstract_c3_mros + other_c3_mros + + [explicit_bases] + [abstract_bases] + [other_bases] + ) + +def _compose_mro(cls, types): + """Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + """ + bases = set(cls.__mro__) + # Remove entries which are already present in the __mro__ or unrelated. + def is_related(typ): + return (typ not in bases and hasattr(typ, '__mro__') + and not isinstance(typ, GenericAlias) + and issubclass(cls, typ)) + types = [n for n in types if is_related(n)] + # Remove entries which are strict bases of other entries (they will end up + # in the MRO anyway. + def is_strict_base(typ): + for other in types: + if typ != other and typ in other.__mro__: + return True + return False + types = [n for n in types if not is_strict_base(n)] + # Subclasses of the ABCs in *types* which are also implemented by + # *cls* can be used to stabilize ABC ordering. + type_set = set(types) + mro = [] + for typ in types: + found = [] + for sub in typ.__subclasses__(): + if sub not in bases and issubclass(cls, sub): + found.append([s for s in sub.__mro__ if s in type_set]) + if not found: + mro.append(typ) + continue + # Favor subclasses with the biggest number of useful bases + found.sort(key=len, reverse=True) + for sub in found: + for subcls in sub: + if subcls not in mro: + mro.append(subcls) + return _c3_mro(cls, abcs=mro) + +def _find_impl(cls, registry): + """Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + """ + mro = _compose_mro(cls, registry.keys()) + match = None + for t in mro: + if match is not None: + # If *match* is an implicit ABC but there is another unrelated, + # equally matching implicit ABC, refuse the temptation to guess. + if (t in registry and t not in cls.__mro__ + and match not in cls.__mro__ + and not issubclass(match, t)): + raise RuntimeError("Ambiguous dispatch: {} or {}".format( + match, t)) + break + if t in registry: + match = t + return registry.get(match) + +def singledispatch(func): + """Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + """ + # There are many programs that use functools without singledispatch, so we + # trade-off making singledispatch marginally slower for the benefit of + # making start-up of such applications slightly faster. + import types, weakref + + registry = {} + dispatch_cache = weakref.WeakKeyDictionary() + cache_token = None + + def dispatch(cls): + """generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + """ + nonlocal cache_token + if cache_token is not None: + current_token = get_cache_token() + if cache_token != current_token: + dispatch_cache.clear() + cache_token = current_token + try: + impl = dispatch_cache[cls] + except KeyError: + try: + impl = registry[cls] + except KeyError: + impl = _find_impl(cls, registry) + dispatch_cache[cls] = impl + return impl + + def _is_valid_dispatch_type(cls): + return isinstance(cls, type) and not isinstance(cls, GenericAlias) + + def register(cls, func=None): + """generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + """ + nonlocal cache_token + if _is_valid_dispatch_type(cls): + if func is None: + return lambda f: register(cls, f) + else: + if func is not None: + raise TypeError( + f"Invalid first argument to `register()`. " + f"{cls!r} is not a class." + ) + ann = getattr(cls, '__annotations__', {}) + if not ann: + raise TypeError( + f"Invalid first argument to `register()`: {cls!r}. " + f"Use either `@register(some_class)` or plain `@register` " + f"on an annotated function." + ) + func = cls + + # only import typing if annotation parsing is necessary + from typing import get_type_hints + argname, cls = next(iter(get_type_hints(func).items())) + if not _is_valid_dispatch_type(cls): + raise TypeError( + f"Invalid annotation for {argname!r}. " + f"{cls!r} is not a class." + ) + + registry[cls] = func + if cache_token is None and hasattr(cls, '__abstractmethods__'): + cache_token = get_cache_token() + dispatch_cache.clear() + return func + + def wrapper(*args, **kw): + if not args: + raise TypeError(f'{funcname} requires at least ' + '1 positional argument') + + return dispatch(args[0].__class__)(*args, **kw) + + funcname = getattr(func, '__name__', 'singledispatch function') + registry[object] = func + wrapper.register = register + wrapper.dispatch = dispatch + wrapper.registry = types.MappingProxyType(registry) + wrapper._clear_cache = dispatch_cache.clear + update_wrapper(wrapper, func) + return wrapper + + +# Descriptor version +class singledispatchmethod: + """Single-dispatch generic method descriptor. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError(f"{func!r} is not callable or a descriptor") + + self.dispatcher = singledispatch(func) + self.func = func + + def register(self, cls, method=None): + """generic_method.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_method*. + """ + return self.dispatcher.register(cls, func=method) + + def __get__(self, obj, cls=None): + def _method(*args, **kwargs): + method = self.dispatcher.dispatch(args[0].__class__) + return method.__get__(obj, cls)(*args, **kwargs) + + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method.register = self.register + update_wrapper(_method, self.func) + return _method + + @property + def __isabstractmethod__(self): + return getattr(self.func, '__isabstractmethod__', False) + + +################################################################################ +### cached_property() - computed once per instance, cached as attribute +################################################################################ + +_NOT_FOUND = object() + + +class cached_property: + def __init__(self, func): + self.func = func + self.attrname = None + self.__doc__ = func.__doc__ + self.lock = RLock() + + def __set_name__(self, owner, name): + if self.attrname is None: + self.attrname = name + elif name != self.attrname: + raise TypeError( + "Cannot assign the same cached_property to two different names " + f"({self.attrname!r} and {name!r})." + ) + + def __get__(self, instance, owner=None): + if instance is None: + return self + if self.attrname is None: + raise TypeError( + "Cannot use cached_property instance without calling __set_name__ on it.") + try: + cache = instance.__dict__ + except AttributeError: # not all objects have __dict__ (e.g. class defines slots) + msg = ( + f"No '__dict__' attribute on {type(instance).__name__!r} " + f"instance to cache {self.attrname!r} property." + ) + raise TypeError(msg) from None + val = cache.get(self.attrname, _NOT_FOUND) + if val is _NOT_FOUND: + with self.lock: + # check if another thread filled cache while we awaited lock + val = cache.get(self.attrname, _NOT_FOUND) + if val is _NOT_FOUND: + val = self.func(instance) + try: + cache[self.attrname] = val + except TypeError: + msg = ( + f"The '__dict__' attribute on {type(instance).__name__!r} instance " + f"does not support item assignment for caching {self.attrname!r} property." + ) + raise TypeError(msg) from None + return val + + __class_getitem__ = classmethod(GenericAlias) diff --git a/pllava/lib/python3.10/getopt.py b/pllava/lib/python3.10/getopt.py new file mode 100644 index 0000000000000000000000000000000000000000..9d4cab1bac360dda9d49dd8eef258f23dc252a2b --- /dev/null +++ b/pllava/lib/python3.10/getopt.py @@ -0,0 +1,215 @@ +"""Parser for command line options. + +This module helps scripts to parse the command line arguments in +sys.argv. It supports the same conventions as the Unix getopt() +function (including the special meanings of arguments of the form `-' +and `--'). Long options similar to those supported by GNU software +may be used as well via an optional third argument. This module +provides two functions and an exception: + +getopt() -- Parse command line options +gnu_getopt() -- Like getopt(), but allow option and non-option arguments +to be intermixed. +GetoptError -- exception (class) raised with 'opt' attribute, which is the +option involved with the exception. +""" + +# Long option support added by Lars Wirzenius . +# +# Gerrit Holl moved the string-based exceptions +# to class-based exceptions. +# +# Peter Åstrand added gnu_getopt(). +# +# TODO for gnu_getopt(): +# +# - GNU getopt_long_only mechanism +# - allow the caller to specify ordering +# - RETURN_IN_ORDER option +# - GNU extension with '-' as first character of option string +# - optional arguments, specified by double colons +# - an option string with a W followed by semicolon should +# treat "-W foo" as "--foo" + +__all__ = ["GetoptError","error","getopt","gnu_getopt"] + +import os +try: + from gettext import gettext as _ +except ImportError: + # Bootstrapping Python: gettext's dependencies not built yet + def _(s): return s + +class GetoptError(Exception): + opt = '' + msg = '' + def __init__(self, msg, opt=''): + self.msg = msg + self.opt = opt + Exception.__init__(self, msg, opt) + + def __str__(self): + return self.msg + +error = GetoptError # backward compatibility + +def getopt(args, shortopts, longopts = []): + """getopt(args, options[, long_options]) -> opts, args + + Parses command line options and parameter list. args is the + argument list to be parsed, without the leading reference to the + running program. Typically, this means "sys.argv[1:]". shortopts + is the string of option letters that the script wants to + recognize, with options that require an argument followed by a + colon (i.e., the same format that Unix getopt() uses). If + specified, longopts is a list of strings with the names of the + long options which should be supported. The leading '--' + characters should not be included in the option name. Options + which require an argument should be followed by an equal sign + ('='). + + The return value consists of two elements: the first is a list of + (option, value) pairs; the second is the list of program arguments + left after the option list was stripped (this is a trailing slice + of the first argument). Each option-and-value pair returned has + the option as its first element, prefixed with a hyphen (e.g., + '-x'), and the option argument as its second element, or an empty + string if the option has no argument. The options occur in the + list in the same order in which they were found, thus allowing + multiple occurrences. Long and short options may be mixed. + + """ + + opts = [] + if type(longopts) == type(""): + longopts = [longopts] + else: + longopts = list(longopts) + while args and args[0].startswith('-') and args[0] != '-': + if args[0] == '--': + args = args[1:] + break + if args[0].startswith('--'): + opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) + else: + opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) + + return opts, args + +def gnu_getopt(args, shortopts, longopts = []): + """getopt(args, options[, long_options]) -> opts, args + + This function works like getopt(), except that GNU style scanning + mode is used by default. This means that option and non-option + arguments may be intermixed. The getopt() function stops + processing options as soon as a non-option argument is + encountered. + + If the first character of the option string is `+', or if the + environment variable POSIXLY_CORRECT is set, then option + processing stops as soon as a non-option argument is encountered. + + """ + + opts = [] + prog_args = [] + if isinstance(longopts, str): + longopts = [longopts] + else: + longopts = list(longopts) + + # Allow options after non-option arguments? + if shortopts.startswith('+'): + shortopts = shortopts[1:] + all_options_first = True + elif os.environ.get("POSIXLY_CORRECT"): + all_options_first = True + else: + all_options_first = False + + while args: + if args[0] == '--': + prog_args += args[1:] + break + + if args[0][:2] == '--': + opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) + elif args[0][:1] == '-' and args[0] != '-': + opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) + else: + if all_options_first: + prog_args += args + break + else: + prog_args.append(args[0]) + args = args[1:] + + return opts, prog_args + +def do_longs(opts, opt, longopts, args): + try: + i = opt.index('=') + except ValueError: + optarg = None + else: + opt, optarg = opt[:i], opt[i+1:] + + has_arg, opt = long_has_args(opt, longopts) + if has_arg: + if optarg is None: + if not args: + raise GetoptError(_('option --%s requires argument') % opt, opt) + optarg, args = args[0], args[1:] + elif optarg is not None: + raise GetoptError(_('option --%s must not have an argument') % opt, opt) + opts.append(('--' + opt, optarg or '')) + return opts, args + +# Return: +# has_arg? +# full option name +def long_has_args(opt, longopts): + possibilities = [o for o in longopts if o.startswith(opt)] + if not possibilities: + raise GetoptError(_('option --%s not recognized') % opt, opt) + # Is there an exact match? + if opt in possibilities: + return False, opt + elif opt + '=' in possibilities: + return True, opt + # No exact match, so better be unique. + if len(possibilities) > 1: + # XXX since possibilities contains all valid continuations, might be + # nice to work them into the error msg + raise GetoptError(_('option --%s not a unique prefix') % opt, opt) + assert len(possibilities) == 1 + unique_match = possibilities[0] + has_arg = unique_match.endswith('=') + if has_arg: + unique_match = unique_match[:-1] + return has_arg, unique_match + +def do_shorts(opts, optstring, shortopts, args): + while optstring != '': + opt, optstring = optstring[0], optstring[1:] + if short_has_arg(opt, shortopts): + if optstring == '': + if not args: + raise GetoptError(_('option -%s requires argument') % opt, + opt) + optstring, args = args[0], args[1:] + optarg, optstring = optstring, '' + else: + optarg = '' + opts.append(('-' + opt, optarg)) + return opts, args + +def short_has_arg(opt, shortopts): + for i in range(len(shortopts)): + if opt == shortopts[i] != ':': + return shortopts.startswith(':', i+1) + raise GetoptError(_('option -%s not recognized') % opt, opt) + +if __name__ == '__main__': + import sys + print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])) diff --git a/pllava/lib/python3.10/getpass.py b/pllava/lib/python3.10/getpass.py new file mode 100644 index 0000000000000000000000000000000000000000..6970d8adfbab3673de681ef05059732dffa8f64c --- /dev/null +++ b/pllava/lib/python3.10/getpass.py @@ -0,0 +1,185 @@ +"""Utilities to get a password and/or the current user name. + +getpass(prompt[, stream]) - Prompt for a password, with echo turned off. +getuser() - Get the user name from the environment or password database. + +GetPassWarning - This UserWarning is issued when getpass() cannot prevent + echoing of the password contents while reading. + +On Windows, the msvcrt module will be used. + +""" + +# Authors: Piers Lauder (original) +# Guido van Rossum (Windows support and cleanup) +# Gregory P. Smith (tty support & GetPassWarning) + +import contextlib +import io +import os +import sys +import warnings + +__all__ = ["getpass","getuser","GetPassWarning"] + + +class GetPassWarning(UserWarning): pass + + +def unix_getpass(prompt='Password: ', stream=None): + """Prompt for a password, with echo turned off. + + Args: + prompt: Written on stream to ask for the input. Default: 'Password: ' + stream: A writable file object to display the prompt. Defaults to + the tty. If no tty is available defaults to sys.stderr. + Returns: + The seKr3t input. + Raises: + EOFError: If our input tty or stdin was closed. + GetPassWarning: When we were unable to turn echo off on the input. + + Always restores terminal settings before returning. + """ + passwd = None + with contextlib.ExitStack() as stack: + try: + # Always try reading and writing directly on the tty first. + fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY) + tty = io.FileIO(fd, 'w+') + stack.enter_context(tty) + input = io.TextIOWrapper(tty) + stack.enter_context(input) + if not stream: + stream = input + except OSError: + # If that fails, see if stdin can be controlled. + stack.close() + try: + fd = sys.stdin.fileno() + except (AttributeError, ValueError): + fd = None + passwd = fallback_getpass(prompt, stream) + input = sys.stdin + if not stream: + stream = sys.stderr + + if fd is not None: + try: + old = termios.tcgetattr(fd) # a copy to save + new = old[:] + new[3] &= ~termios.ECHO # 3 == 'lflags' + tcsetattr_flags = termios.TCSAFLUSH + if hasattr(termios, 'TCSASOFT'): + tcsetattr_flags |= termios.TCSASOFT + try: + termios.tcsetattr(fd, tcsetattr_flags, new) + passwd = _raw_input(prompt, stream, input=input) + finally: + termios.tcsetattr(fd, tcsetattr_flags, old) + stream.flush() # issue7208 + except termios.error: + if passwd is not None: + # _raw_input succeeded. The final tcsetattr failed. Reraise + # instead of leaving the terminal in an unknown state. + raise + # We can't control the tty or stdin. Give up and use normal IO. + # fallback_getpass() raises an appropriate warning. + if stream is not input: + # clean up unused file objects before blocking + stack.close() + passwd = fallback_getpass(prompt, stream) + + stream.write('\n') + return passwd + + +def win_getpass(prompt='Password: ', stream=None): + """Prompt for password with echo off, using Windows getwch().""" + if sys.stdin is not sys.__stdin__: + return fallback_getpass(prompt, stream) + + for c in prompt: + msvcrt.putwch(c) + pw = "" + while 1: + c = msvcrt.getwch() + if c == '\r' or c == '\n': + break + if c == '\003': + raise KeyboardInterrupt + if c == '\b': + pw = pw[:-1] + else: + pw = pw + c + msvcrt.putwch('\r') + msvcrt.putwch('\n') + return pw + + +def fallback_getpass(prompt='Password: ', stream=None): + warnings.warn("Can not control echo on the terminal.", GetPassWarning, + stacklevel=2) + if not stream: + stream = sys.stderr + print("Warning: Password input may be echoed.", file=stream) + return _raw_input(prompt, stream) + + +def _raw_input(prompt="", stream=None, input=None): + # This doesn't save the string in the GNU readline history. + if not stream: + stream = sys.stderr + if not input: + input = sys.stdin + prompt = str(prompt) + if prompt: + try: + stream.write(prompt) + except UnicodeEncodeError: + # Use replace error handler to get as much as possible printed. + prompt = prompt.encode(stream.encoding, 'replace') + prompt = prompt.decode(stream.encoding) + stream.write(prompt) + stream.flush() + # NOTE: The Python C API calls flockfile() (and unlock) during readline. + line = input.readline() + if not line: + raise EOFError + if line[-1] == '\n': + line = line[:-1] + return line + + +def getuser(): + """Get the username from the environment or password database. + + First try various environment variables, then the password + database. This works on Windows as long as USERNAME is set. + + """ + + for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): + user = os.environ.get(name) + if user: + return user + + # If this fails, the exception will "explain" why + import pwd + return pwd.getpwuid(os.getuid())[0] + +# Bind the name getpass to the appropriate function +try: + import termios + # it's possible there is an incompatible termios from the + # McMillan Installer, make sure we have a UNIX-compatible termios + termios.tcgetattr, termios.tcsetattr +except (ImportError, AttributeError): + try: + import msvcrt + except ImportError: + getpass = fallback_getpass + else: + getpass = win_getpass +else: + getpass = unix_getpass diff --git a/pllava/lib/python3.10/gettext.py b/pllava/lib/python3.10/gettext.py new file mode 100644 index 0000000000000000000000000000000000000000..77b67aef4204c909931d6eb1f75854324ef796a9 --- /dev/null +++ b/pllava/lib/python3.10/gettext.py @@ -0,0 +1,788 @@ +"""Internationalization and localization support. + +This module provides internationalization (I18N) and localization (L10N) +support for your Python programs by providing an interface to the GNU gettext +message catalog library. + +I18N refers to the operation by which a program is made aware of multiple +languages. L10N refers to the adaptation of your program, once +internationalized, to the local language and cultural habits. + +""" + +# This module represents the integration of work, contributions, feedback, and +# suggestions from the following people: +# +# Martin von Loewis, who wrote the initial implementation of the underlying +# C-based libintlmodule (later renamed _gettext), along with a skeletal +# gettext.py implementation. +# +# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule, +# which also included a pure-Python implementation to read .mo files if +# intlmodule wasn't available. +# +# James Henstridge, who also wrote a gettext.py module, which has some +# interesting, but currently unsupported experimental features: the notion of +# a Catalog class and instances, and the ability to add to a catalog file via +# a Python API. +# +# Barry Warsaw integrated these modules, wrote the .install() API and code, +# and conformed all C and Python code to Python's coding standards. +# +# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this +# module. +# +# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs. +# +# TODO: +# - Lazy loading of .mo files. Currently the entire catalog is loaded into +# memory, but that's probably bad for large translated programs. Instead, +# the lexical sort of original strings in GNU .mo files should be exploited +# to do binary searches and lazy initializations. Or you might want to use +# the undocumented double-hash algorithm for .mo files with hash tables, but +# you'll need to study the GNU gettext code to do this. +# +# - Support Solaris .mo file formats. Unfortunately, we've been unable to +# find this format documented anywhere. + + +import os +import re +import sys + + +__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog', + 'find', 'translation', 'install', 'textdomain', 'bindtextdomain', + 'bind_textdomain_codeset', + 'dgettext', 'dngettext', 'gettext', 'lgettext', 'ldgettext', + 'ldngettext', 'lngettext', 'ngettext', + 'pgettext', 'dpgettext', 'npgettext', 'dnpgettext', + ] + +_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale') + +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y + +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) + +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' + +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) + else: + return ValueError('unexpected end of plural form') + +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) from None + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) from None + import warnings + warnings.warn('Plural value must be an integer, got %s' % + (n.__class__.__name__,), + DeprecationWarning, 4) + return n + +def c2py(plural): + """Gets a C expression as used in PO files for plural forms and returns a + Python function that implements an equivalent expression. + """ + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') + try: + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) + + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 + + ns = {'_as_int': _as_int} + exec('''if True: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RecursionError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') + + +def _expand_lang(loc): + import locale + loc = locale.normalize(loc) + COMPONENT_CODESET = 1 << 0 + COMPONENT_TERRITORY = 1 << 1 + COMPONENT_MODIFIER = 1 << 2 + # split up the locale into its base components + mask = 0 + pos = loc.find('@') + if pos >= 0: + modifier = loc[pos:] + loc = loc[:pos] + mask |= COMPONENT_MODIFIER + else: + modifier = '' + pos = loc.find('.') + if pos >= 0: + codeset = loc[pos:] + loc = loc[:pos] + mask |= COMPONENT_CODESET + else: + codeset = '' + pos = loc.find('_') + if pos >= 0: + territory = loc[pos:] + loc = loc[:pos] + mask |= COMPONENT_TERRITORY + else: + territory = '' + language = loc + ret = [] + for i in range(mask+1): + if not (i & ~mask): # if all components for this combo exist ... + val = language + if i & COMPONENT_TERRITORY: val += territory + if i & COMPONENT_CODESET: val += codeset + if i & COMPONENT_MODIFIER: val += modifier + ret.append(val) + ret.reverse() + return ret + + + +class NullTranslations: + def __init__(self, fp=None): + self._info = {} + self._charset = None + self._output_charset = None + self._fallback = None + if fp is not None: + self._parse(fp) + + def _parse(self, fp): + pass + + def add_fallback(self, fallback): + if self._fallback: + self._fallback.add_fallback(fallback) + else: + self._fallback = fallback + + def gettext(self, message): + if self._fallback: + return self._fallback.gettext(message) + return message + + def lgettext(self, message): + import warnings + warnings.warn('lgettext() is deprecated, use gettext() instead', + DeprecationWarning, 2) + import locale + if self._fallback: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\blgettext\b.*', + DeprecationWarning) + return self._fallback.lgettext(message) + if self._output_charset: + return message.encode(self._output_charset) + return message.encode(locale.getpreferredencoding()) + + def ngettext(self, msgid1, msgid2, n): + if self._fallback: + return self._fallback.ngettext(msgid1, msgid2, n) + if n == 1: + return msgid1 + else: + return msgid2 + + def lngettext(self, msgid1, msgid2, n): + import warnings + warnings.warn('lngettext() is deprecated, use ngettext() instead', + DeprecationWarning, 2) + import locale + if self._fallback: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\blngettext\b.*', + DeprecationWarning) + return self._fallback.lngettext(msgid1, msgid2, n) + if n == 1: + tmsg = msgid1 + else: + tmsg = msgid2 + if self._output_charset: + return tmsg.encode(self._output_charset) + return tmsg.encode(locale.getpreferredencoding()) + + def pgettext(self, context, message): + if self._fallback: + return self._fallback.pgettext(context, message) + return message + + def npgettext(self, context, msgid1, msgid2, n): + if self._fallback: + return self._fallback.npgettext(context, msgid1, msgid2, n) + if n == 1: + return msgid1 + else: + return msgid2 + + def info(self): + return self._info + + def charset(self): + return self._charset + + def output_charset(self): + import warnings + warnings.warn('output_charset() is deprecated', + DeprecationWarning, 2) + return self._output_charset + + def set_output_charset(self, charset): + import warnings + warnings.warn('set_output_charset() is deprecated', + DeprecationWarning, 2) + self._output_charset = charset + + def install(self, names=None): + import builtins + builtins.__dict__['_'] = self.gettext + if names is not None: + allowed = {'gettext', 'lgettext', 'lngettext', + 'ngettext', 'npgettext', 'pgettext'} + for name in allowed & set(names): + builtins.__dict__[name] = getattr(self, name) + + +class GNUTranslations(NullTranslations): + # Magic number of .mo files + LE_MAGIC = 0x950412de + BE_MAGIC = 0xde120495 + + # The encoding of a msgctxt and a msgid in a .mo file is + # msgctxt + "\x04" + msgid (gettext version >= 0.15) + CONTEXT = "%s\x04%s" + + # Acceptable .mo versions + VERSIONS = (0, 1) + + def _get_versions(self, version): + """Returns a tuple of major version, minor version""" + return (version >> 16, version & 0xffff) + + def _parse(self, fp): + """Override this method to support alternative .mo formats.""" + # Delay struct import for speeding up gettext import when .mo files + # are not used. + from struct import unpack + filename = getattr(fp, 'name', '') + # Parse the .mo file header, which consists of 5 little endian 32 + # bit words. + self._catalog = catalog = {} + self.plural = lambda n: int(n != 1) # germanic plural by default + buf = fp.read() + buflen = len(buf) + # Are we big endian or little endian? + magic = unpack('4I', buf[4:20]) + ii = '>II' + else: + raise OSError(0, 'Bad magic number', filename) + + major_version, minor_version = self._get_versions(version) + + if major_version not in self.VERSIONS: + raise OSError(0, 'Bad version number ' + str(major_version), filename) + + # Now put all messages from the .mo file buffer into the catalog + # dictionary. + for i in range(0, msgcount): + mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) + mend = moff + mlen + tlen, toff = unpack(ii, buf[transidx:transidx+8]) + tend = toff + tlen + if mend < buflen and tend < buflen: + msg = buf[moff:mend] + tmsg = buf[toff:tend] + else: + raise OSError(0, 'File is corrupt', filename) + # See if we're looking at GNU .mo conventions for metadata + if mlen == 0: + # Catalog description + lastk = None + for b_item in tmsg.split(b'\n'): + item = b_item.decode().strip() + if not item: + continue + # Skip over comment lines: + if item.startswith('#-#-#-#-#') and item.endswith('#-#-#-#-#'): + continue + k = v = None + if ':' in item: + k, v = item.split(':', 1) + k = k.strip().lower() + v = v.strip() + self._info[k] = v + lastk = k + elif lastk: + self._info[lastk] += '\n' + item + if k == 'content-type': + self._charset = v.split('charset=')[1] + elif k == 'plural-forms': + v = v.split(';') + plural = v[1].split('plural=')[1] + self.plural = c2py(plural) + # Note: we unconditionally convert both msgids and msgstrs to + # Unicode using the character encoding specified in the charset + # parameter of the Content-Type header. The gettext documentation + # strongly encourages msgids to be us-ascii, but some applications + # require alternative encodings (e.g. Zope's ZCML and ZPT). For + # traditional gettext applications, the msgid conversion will + # cause no problems since us-ascii should always be a subset of + # the charset encoding. We may want to fall back to 8-bit msgids + # if the Unicode conversion fails. + charset = self._charset or 'ascii' + if b'\x00' in msg: + # Plural forms + msgid1, msgid2 = msg.split(b'\x00') + tmsg = tmsg.split(b'\x00') + msgid1 = str(msgid1, charset) + for i, x in enumerate(tmsg): + catalog[(msgid1, i)] = str(x, charset) + else: + catalog[str(msg, charset)] = str(tmsg, charset) + # advance to next entry in the seek tables + masteridx += 8 + transidx += 8 + + def lgettext(self, message): + import warnings + warnings.warn('lgettext() is deprecated, use gettext() instead', + DeprecationWarning, 2) + import locale + missing = object() + tmsg = self._catalog.get(message, missing) + if tmsg is missing: + if self._fallback: + return self._fallback.lgettext(message) + tmsg = message + if self._output_charset: + return tmsg.encode(self._output_charset) + return tmsg.encode(locale.getpreferredencoding()) + + def lngettext(self, msgid1, msgid2, n): + import warnings + warnings.warn('lngettext() is deprecated, use ngettext() instead', + DeprecationWarning, 2) + import locale + try: + tmsg = self._catalog[(msgid1, self.plural(n))] + except KeyError: + if self._fallback: + return self._fallback.lngettext(msgid1, msgid2, n) + if n == 1: + tmsg = msgid1 + else: + tmsg = msgid2 + if self._output_charset: + return tmsg.encode(self._output_charset) + return tmsg.encode(locale.getpreferredencoding()) + + def gettext(self, message): + missing = object() + tmsg = self._catalog.get(message, missing) + if tmsg is missing: + if self._fallback: + return self._fallback.gettext(message) + return message + return tmsg + + def ngettext(self, msgid1, msgid2, n): + try: + tmsg = self._catalog[(msgid1, self.plural(n))] + except KeyError: + if self._fallback: + return self._fallback.ngettext(msgid1, msgid2, n) + if n == 1: + tmsg = msgid1 + else: + tmsg = msgid2 + return tmsg + + def pgettext(self, context, message): + ctxt_msg_id = self.CONTEXT % (context, message) + missing = object() + tmsg = self._catalog.get(ctxt_msg_id, missing) + if tmsg is missing: + if self._fallback: + return self._fallback.pgettext(context, message) + return message + return tmsg + + def npgettext(self, context, msgid1, msgid2, n): + ctxt_msg_id = self.CONTEXT % (context, msgid1) + try: + tmsg = self._catalog[ctxt_msg_id, self.plural(n)] + except KeyError: + if self._fallback: + return self._fallback.npgettext(context, msgid1, msgid2, n) + if n == 1: + tmsg = msgid1 + else: + tmsg = msgid2 + return tmsg + + +# Locate a .mo file using the gettext strategy +def find(domain, localedir=None, languages=None, all=False): + # Get some reasonable defaults for arguments that were not supplied + if localedir is None: + localedir = _default_localedir + if languages is None: + languages = [] + for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): + val = os.environ.get(envar) + if val: + languages = val.split(':') + break + if 'C' not in languages: + languages.append('C') + # now normalize and expand the languages + nelangs = [] + for lang in languages: + for nelang in _expand_lang(lang): + if nelang not in nelangs: + nelangs.append(nelang) + # select a language + if all: + result = [] + else: + result = None + for lang in nelangs: + if lang == 'C': + break + mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) + if os.path.exists(mofile): + if all: + result.append(mofile) + else: + return mofile + return result + + + +# a mapping between absolute .mo file path and Translation object +_translations = {} +_unspecified = ['unspecified'] + +def translation(domain, localedir=None, languages=None, + class_=None, fallback=False, codeset=_unspecified): + if class_ is None: + class_ = GNUTranslations + mofiles = find(domain, localedir, languages, all=True) + if not mofiles: + if fallback: + return NullTranslations() + from errno import ENOENT + raise FileNotFoundError(ENOENT, + 'No translation file found for domain', domain) + # Avoid opening, reading, and parsing the .mo file after it's been done + # once. + result = None + for mofile in mofiles: + key = (class_, os.path.abspath(mofile)) + t = _translations.get(key) + if t is None: + with open(mofile, 'rb') as fp: + t = _translations.setdefault(key, class_(fp)) + # Copy the translation object to allow setting fallbacks and + # output charset. All other instance data is shared with the + # cached object. + # Delay copy import for speeding up gettext import when .mo files + # are not used. + import copy + t = copy.copy(t) + if codeset is not _unspecified: + import warnings + warnings.warn('parameter codeset is deprecated', + DeprecationWarning, 2) + if codeset: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\bset_output_charset\b.*', + DeprecationWarning) + t.set_output_charset(codeset) + if result is None: + result = t + else: + result.add_fallback(t) + return result + + +def install(domain, localedir=None, codeset=_unspecified, names=None): + t = translation(domain, localedir, fallback=True, codeset=codeset) + t.install(names) + + + +# a mapping b/w domains and locale directories +_localedirs = {} +# a mapping b/w domains and codesets +_localecodesets = {} +# current global domain, `messages' used for compatibility w/ GNU gettext +_current_domain = 'messages' + + +def textdomain(domain=None): + global _current_domain + if domain is not None: + _current_domain = domain + return _current_domain + + +def bindtextdomain(domain, localedir=None): + global _localedirs + if localedir is not None: + _localedirs[domain] = localedir + return _localedirs.get(domain, _default_localedir) + + +def bind_textdomain_codeset(domain, codeset=None): + import warnings + warnings.warn('bind_textdomain_codeset() is deprecated', + DeprecationWarning, 2) + global _localecodesets + if codeset is not None: + _localecodesets[domain] = codeset + return _localecodesets.get(domain) + + +def dgettext(domain, message): + try: + t = translation(domain, _localedirs.get(domain, None)) + except OSError: + return message + return t.gettext(message) + +def ldgettext(domain, message): + import warnings + warnings.warn('ldgettext() is deprecated, use dgettext() instead', + DeprecationWarning, 2) + import locale + codeset = _localecodesets.get(domain) + try: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*', + DeprecationWarning) + t = translation(domain, _localedirs.get(domain, None), codeset=codeset) + except OSError: + return message.encode(codeset or locale.getpreferredencoding()) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\blgettext\b.*', + DeprecationWarning) + return t.lgettext(message) + +def dngettext(domain, msgid1, msgid2, n): + try: + t = translation(domain, _localedirs.get(domain, None)) + except OSError: + if n == 1: + return msgid1 + else: + return msgid2 + return t.ngettext(msgid1, msgid2, n) + +def ldngettext(domain, msgid1, msgid2, n): + import warnings + warnings.warn('ldngettext() is deprecated, use dngettext() instead', + DeprecationWarning, 2) + import locale + codeset = _localecodesets.get(domain) + try: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*', + DeprecationWarning) + t = translation(domain, _localedirs.get(domain, None), codeset=codeset) + except OSError: + if n == 1: + tmsg = msgid1 + else: + tmsg = msgid2 + return tmsg.encode(codeset or locale.getpreferredencoding()) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\blngettext\b.*', + DeprecationWarning) + return t.lngettext(msgid1, msgid2, n) + + +def dpgettext(domain, context, message): + try: + t = translation(domain, _localedirs.get(domain, None)) + except OSError: + return message + return t.pgettext(context, message) + + +def dnpgettext(domain, context, msgid1, msgid2, n): + try: + t = translation(domain, _localedirs.get(domain, None)) + except OSError: + if n == 1: + return msgid1 + else: + return msgid2 + return t.npgettext(context, msgid1, msgid2, n) + + +def gettext(message): + return dgettext(_current_domain, message) + +def lgettext(message): + import warnings + warnings.warn('lgettext() is deprecated, use gettext() instead', + DeprecationWarning, 2) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\bldgettext\b.*', + DeprecationWarning) + return ldgettext(_current_domain, message) + +def ngettext(msgid1, msgid2, n): + return dngettext(_current_domain, msgid1, msgid2, n) + +def lngettext(msgid1, msgid2, n): + import warnings + warnings.warn('lngettext() is deprecated, use ngettext() instead', + DeprecationWarning, 2) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', r'.*\bldngettext\b.*', + DeprecationWarning) + return ldngettext(_current_domain, msgid1, msgid2, n) + + +def pgettext(context, message): + return dpgettext(_current_domain, context, message) + + +def npgettext(context, msgid1, msgid2, n): + return dnpgettext(_current_domain, context, msgid1, msgid2, n) + + +# dcgettext() has been deemed unnecessary and is not implemented. + +# James Henstridge's Catalog constructor from GNOME gettext. Documented usage +# was: +# +# import gettext +# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR) +# _ = cat.gettext +# print _('Hello World') + +# The resulting catalog object currently don't support access through a +# dictionary API, which was supported (but apparently unused) in GNOME +# gettext. + +Catalog = translation diff --git a/pllava/lib/python3.10/glob.py b/pllava/lib/python3.10/glob.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc08f45df115a0196636e6b3a10d316607b56eb --- /dev/null +++ b/pllava/lib/python3.10/glob.py @@ -0,0 +1,237 @@ +"""Filename globbing utility.""" + +import contextlib +import os +import re +import fnmatch +import itertools +import stat +import sys + +__all__ = ["glob", "iglob", "escape"] + +def glob(pathname, *, root_dir=None, dir_fd=None, recursive=False): + """Return a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + return list(iglob(pathname, root_dir=root_dir, dir_fd=dir_fd, recursive=recursive)) + +def iglob(pathname, *, root_dir=None, dir_fd=None, recursive=False): + """Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + sys.audit("glob.glob", pathname, recursive) + sys.audit("glob.glob/2", pathname, recursive, root_dir, dir_fd) + if root_dir is not None: + root_dir = os.fspath(root_dir) + else: + root_dir = pathname[:0] + it = _iglob(pathname, root_dir, dir_fd, recursive, False) + if not pathname or recursive and _isrecursive(pathname[:2]): + try: + s = next(it) # skip empty string + if s: + it = itertools.chain((s,), it) + except StopIteration: + pass + return it + +def _iglob(pathname, root_dir, dir_fd, recursive, dironly): + dirname, basename = os.path.split(pathname) + if not has_magic(pathname): + assert not dironly + if basename: + if _lexists(_join(root_dir, pathname), dir_fd): + yield pathname + else: + # Patterns ending with a slash should match only directories + if _isdir(_join(root_dir, dirname), dir_fd): + yield pathname + return + if not dirname: + if recursive and _isrecursive(basename): + yield from _glob2(root_dir, basename, dir_fd, dironly) + else: + yield from _glob1(root_dir, basename, dir_fd, dironly) + return + # `os.path.split()` returns the argument itself as a dirname if it is a + # drive or UNC path. Prevent an infinite recursion if a drive or UNC path + # contains magic characters (i.e. r'\\?\C:'). + if dirname != pathname and has_magic(dirname): + dirs = _iglob(dirname, root_dir, dir_fd, recursive, True) + else: + dirs = [dirname] + if has_magic(basename): + if recursive and _isrecursive(basename): + glob_in_dir = _glob2 + else: + glob_in_dir = _glob1 + else: + glob_in_dir = _glob0 + for dirname in dirs: + for name in glob_in_dir(_join(root_dir, dirname), basename, dir_fd, dironly): + yield os.path.join(dirname, name) + +# These 2 helper functions non-recursively glob inside a literal directory. +# They return a list of basenames. _glob1 accepts a pattern while _glob0 +# takes a literal basename (so it only has to check for its existence). + +def _glob1(dirname, pattern, dir_fd, dironly): + names = _listdir(dirname, dir_fd, dironly) + if not _ishidden(pattern): + names = (x for x in names if not _ishidden(x)) + return fnmatch.filter(names, pattern) + +def _glob0(dirname, basename, dir_fd, dironly): + if basename: + if _lexists(_join(dirname, basename), dir_fd): + return [basename] + else: + # `os.path.split()` returns an empty basename for paths ending with a + # directory separator. 'q*x/' should match only directories. + if _isdir(dirname, dir_fd): + return [basename] + return [] + +# Following functions are not public but can be used by third-party code. + +def glob0(dirname, pattern): + return _glob0(dirname, pattern, None, False) + +def glob1(dirname, pattern): + return _glob1(dirname, pattern, None, False) + +# This helper function recursively yields relative pathnames inside a literal +# directory. + +def _glob2(dirname, pattern, dir_fd, dironly): + assert _isrecursive(pattern) + yield pattern[:0] + yield from _rlistdir(dirname, dir_fd, dironly) + +# If dironly is false, yields all file names inside a directory. +# If dironly is true, yields only directory names. +def _iterdir(dirname, dir_fd, dironly): + try: + fd = None + fsencode = None + if dir_fd is not None: + if dirname: + fd = arg = os.open(dirname, _dir_open_flags, dir_fd=dir_fd) + else: + arg = dir_fd + if isinstance(dirname, bytes): + fsencode = os.fsencode + elif dirname: + arg = dirname + elif isinstance(dirname, bytes): + arg = bytes(os.curdir, 'ASCII') + else: + arg = os.curdir + try: + with os.scandir(arg) as it: + for entry in it: + try: + if not dironly or entry.is_dir(): + if fsencode is not None: + yield fsencode(entry.name) + else: + yield entry.name + except OSError: + pass + finally: + if fd is not None: + os.close(fd) + except OSError: + return + +def _listdir(dirname, dir_fd, dironly): + with contextlib.closing(_iterdir(dirname, dir_fd, dironly)) as it: + return list(it) + +# Recursively yields relative pathnames inside a literal directory. +def _rlistdir(dirname, dir_fd, dironly): + names = _listdir(dirname, dir_fd, dironly) + for x in names: + if not _ishidden(x): + yield x + path = _join(dirname, x) if dirname else x + for y in _rlistdir(path, dir_fd, dironly): + yield _join(x, y) + + +def _lexists(pathname, dir_fd): + # Same as os.path.lexists(), but with dir_fd + if dir_fd is None: + return os.path.lexists(pathname) + try: + os.lstat(pathname, dir_fd=dir_fd) + except (OSError, ValueError): + return False + else: + return True + +def _isdir(pathname, dir_fd): + # Same as os.path.isdir(), but with dir_fd + if dir_fd is None: + return os.path.isdir(pathname) + try: + st = os.stat(pathname, dir_fd=dir_fd) + except (OSError, ValueError): + return False + else: + return stat.S_ISDIR(st.st_mode) + +def _join(dirname, basename): + # It is common if dirname or basename is empty + if not dirname or not basename: + return dirname or basename + return os.path.join(dirname, basename) + +magic_check = re.compile('([*?[])') +magic_check_bytes = re.compile(b'([*?[])') + +def has_magic(s): + if isinstance(s, bytes): + match = magic_check_bytes.search(s) + else: + match = magic_check.search(s) + return match is not None + +def _ishidden(path): + return path[0] in ('.', b'.'[0]) + +def _isrecursive(pattern): + if isinstance(pattern, bytes): + return pattern == b'**' + else: + return pattern == '**' + +def escape(pathname): + """Escape all special characters. + """ + # Escaping is done by wrapping any of "*?[" between square brackets. + # Metacharacters do not work in the drive part and shouldn't be escaped. + drive, pathname = os.path.splitdrive(pathname) + if isinstance(pathname, bytes): + pathname = magic_check_bytes.sub(br'[\1]', pathname) + else: + pathname = magic_check.sub(r'[\1]', pathname) + return drive + pathname + + +_dir_open_flags = os.O_RDONLY | getattr(os, 'O_DIRECTORY', 0) diff --git a/pllava/lib/python3.10/gzip.py b/pllava/lib/python3.10/gzip.py new file mode 100644 index 0000000000000000000000000000000000000000..475ec326c0c982bf2b31603d64d788ba6d2d35ca --- /dev/null +++ b/pllava/lib/python3.10/gzip.py @@ -0,0 +1,609 @@ +"""Functions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.""" + +# based on Andrew Kuchling's minigzip.py distributed with the zlib module + +import struct, sys, time, os +import zlib +import builtins +import io +import _compression + +__all__ = ["BadGzipFile", "GzipFile", "open", "compress", "decompress"] + +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 + +READ, WRITE = 1, 2 + +_COMPRESS_LEVEL_FAST = 1 +_COMPRESS_LEVEL_TRADEOFF = 6 +_COMPRESS_LEVEL_BEST = 9 + + +def open(filename, mode="rb", compresslevel=_COMPRESS_LEVEL_BEST, + encoding=None, errors=None, newline=None): + """Open a gzip-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str or bytes object), or + an existing file object to read from or write to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for + binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is + "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the GzipFile constructor: + GzipFile(filename, mode, compresslevel). In this case, the encoding, errors + and newline arguments must not be provided. + + For text mode, a GzipFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error handling + behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + gz_mode = mode.replace("t", "") + if isinstance(filename, (str, bytes, os.PathLike)): + binary_file = GzipFile(filename, gz_mode, compresslevel) + elif hasattr(filename, "read") or hasattr(filename, "write"): + binary_file = GzipFile(None, gz_mode, compresslevel, filename) + else: + raise TypeError("filename must be a str or bytes object, or a file") + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + +def write32u(output, value): + # The L format writes the bit pattern correctly whether signed + # or unsigned. + output.write(struct.pack("' + + def _init_write(self, filename): + self.name = filename + self.crc = zlib.crc32(b"") + self.size = 0 + self.writebuf = [] + self.bufsize = 0 + self.offset = 0 # Current file offset for seek(), tell(), etc + + def _write_gzip_header(self, compresslevel): + self.fileobj.write(b'\037\213') # magic header + self.fileobj.write(b'\010') # compression method + try: + # RFC 1952 requires the FNAME field to be Latin-1. Do not + # include filenames that cannot be represented that way. + fname = os.path.basename(self.name) + if not isinstance(fname, bytes): + fname = fname.encode('latin-1') + if fname.endswith(b'.gz'): + fname = fname[:-3] + except UnicodeEncodeError: + fname = b'' + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags).encode('latin-1')) + mtime = self._write_mtime + if mtime is None: + mtime = time.time() + write32u(self.fileobj, int(mtime)) + if compresslevel == _COMPRESS_LEVEL_BEST: + xfl = b'\002' + elif compresslevel == _COMPRESS_LEVEL_FAST: + xfl = b'\004' + else: + xfl = b'\000' + self.fileobj.write(xfl) + self.fileobj.write(b'\377') + if fname: + self.fileobj.write(fname + b'\000') + + def write(self,data): + self._check_not_closed() + if self.mode != WRITE: + import errno + raise OSError(errno.EBADF, "write() on read-only GzipFile object") + + if self.fileobj is None: + raise ValueError("write() on closed GzipFile object") + + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + if length > 0: + self.fileobj.write(self.compress.compress(data)) + self.size += length + self.crc = zlib.crc32(data, self.crc) + self.offset += length + + return length + + def read(self, size=-1): + self._check_not_closed() + if self.mode != READ: + import errno + raise OSError(errno.EBADF, "read() on write-only GzipFile object") + return self._buffer.read(size) + + def read1(self, size=-1): + """Implements BufferedIOBase.read1() + + Reads up to a buffer's worth of data if size is negative.""" + self._check_not_closed() + if self.mode != READ: + import errno + raise OSError(errno.EBADF, "read1() on write-only GzipFile object") + + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def peek(self, n): + self._check_not_closed() + if self.mode != READ: + import errno + raise OSError(errno.EBADF, "peek() on write-only GzipFile object") + return self._buffer.peek(n) + + @property + def closed(self): + return self.fileobj is None + + def close(self): + fileobj = self.fileobj + if fileobj is None: + return + self.fileobj = None + try: + if self.mode == WRITE: + fileobj.write(self.compress.flush()) + write32u(fileobj, self.crc) + # self.size may exceed 2 GiB, or even 4 GiB + write32u(fileobj, self.size & 0xffffffff) + elif self.mode == READ: + self._buffer.close() + finally: + myfileobj = self.myfileobj + if myfileobj: + self.myfileobj = None + myfileobj.close() + + def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): + self._check_not_closed() + if self.mode == WRITE: + # Ensure the compressor's buffer is flushed + self.fileobj.write(self.compress.flush(zlib_mode)) + self.fileobj.flush() + + def fileno(self): + """Invoke the underlying file object's fileno() method. + + This will raise AttributeError if the underlying file object + doesn't support fileno(). + """ + return self.fileobj.fileno() + + def rewind(self): + '''Return the uncompressed stream file position indicator to the + beginning of the file''' + if self.mode != READ: + raise OSError("Can't rewind in write mode") + self._buffer.seek(0) + + def readable(self): + return self.mode == READ + + def writable(self): + return self.mode == WRITE + + def seekable(self): + return True + + def seek(self, offset, whence=io.SEEK_SET): + if self.mode == WRITE: + if whence != io.SEEK_SET: + if whence == io.SEEK_CUR: + offset = self.offset + offset + else: + raise ValueError('Seek from end not supported') + if offset < self.offset: + raise OSError('Negative seek in write mode') + count = offset - self.offset + chunk = b'\0' * 1024 + for i in range(count // 1024): + self.write(chunk) + self.write(b'\0' * (count % 1024)) + elif self.mode == READ: + self._check_not_closed() + return self._buffer.seek(offset, whence) + + return self.offset + + def readline(self, size=-1): + self._check_not_closed() + return self._buffer.readline(size) + + +class _GzipReader(_compression.DecompressReader): + def __init__(self, fp): + super().__init__(_PaddedFile(fp), zlib.decompressobj, + wbits=-zlib.MAX_WBITS) + # Set flag indicating start of a new member + self._new_member = True + self._last_mtime = None + + def _init_read(self): + self._crc = zlib.crc32(b"") + self._stream_size = 0 # Decompressed size of unconcatenated stream + + def _read_exact(self, n): + '''Read exactly *n* bytes from `self._fp` + + This method is required because self._fp may be unbuffered, + i.e. return short reads. + ''' + + data = self._fp.read(n) + while len(data) < n: + b = self._fp.read(n - len(data)) + if not b: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + data += b + return data + + def _read_gzip_header(self): + magic = self._fp.read(2) + if magic == b'': + return False + + if magic != b'\037\213': + raise BadGzipFile('Not a gzipped file (%r)' % magic) + + (method, flag, + self._last_mtime) = struct.unpack(" heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + +def _heappop_max(heap): + """Maxheap version of a heappop.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup_max(heap, 0) + return returnitem + return lastelt + +def _heapreplace_max(heap, item): + """Maxheap version of a heappop followed by a heappush.""" + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup_max(heap, 0) + return returnitem + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +def merge(*iterables, key=None, reverse=False): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + ''' + + h = [] + h_append = h.append + + if reverse: + _heapify = _heapify_max + _heappop = _heappop_max + _heapreplace = _heapreplace_max + direction = -1 + else: + _heapify = heapify + _heappop = heappop + _heapreplace = heapreplace + direction = 1 + + if key is None: + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + h_append([next(), order * direction, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + value, order, next = s = h[0] + yield value + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + value, order, next = h[0] + yield value + yield from next.__self__ + return + + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + value = next() + h_append([key(value), order * direction, value, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + key_value, order, value, next = s = h[0] + yield value + value = next() + s[0] = key(value) + s[2] = value + _heapreplace(h, s) + except StopIteration: + _heappop(h) + if h: + key_value, order, value, next = h[0] + yield value + yield from next.__self__ + + +# Algorithm notes for nlargest() and nsmallest() +# ============================================== +# +# Make a single pass over the data while keeping the k most extreme values +# in a heap. Memory consumption is limited to keeping k values in a list. +# +# Measured performance for random inputs: +# +# number of comparisons +# n inputs k-extreme values (average of 5 trials) % more than min() +# ------------- ---------------- --------------------- ----------------- +# 1,000 100 3,317 231.7% +# 10,000 100 14,046 40.5% +# 100,000 100 105,749 5.7% +# 1,000,000 100 1,007,751 0.8% +# 10,000,000 100 10,009,401 0.1% +# +# Theoretical number of comparisons for k smallest of n random inputs: +# +# Step Comparisons Action +# ---- -------------------------- --------------------------- +# 1 1.66 * k heapify the first k-inputs +# 2 n - k compare remaining elements to top of heap +# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap +# 4 k * lg2(k) - (k/2) final sort of the k most extreme values +# +# Combining and simplifying for a rough estimate gives: +# +# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) +# +# Computing the number of comparisons for step 3: +# ----------------------------------------------- +# * For the i-th new value from the iterable, the probability of being in the +# k most extreme values is k/i. For example, the probability of the 101st +# value seen being in the 100 most extreme values is 100/101. +# * If the value is a new extreme value, the cost of inserting it into the +# heap is 1 + log(k, 2). +# * The probability times the cost gives: +# (k/i) * (1 + log(k, 2)) +# * Summing across the remaining n-k elements gives: +# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) +# * This reduces to: +# (H(n) - H(k)) * k * (1 + log(k, 2)) +# * Where H(n) is the n-th harmonic number estimated by: +# gamma = 0.5772156649 +# H(n) = log(n, e) + gamma + 1 / (2 * n) +# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence +# * Substituting the H(n) formula: +# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) +# +# Worst-case for step 3: +# ---------------------- +# In the worst case, the input data is reversed sorted so that every new element +# must be inserted in the heap: +# +# comparisons = 1.66 * k + log(k, 2) * (n - k) +# +# Alternative Algorithms +# ---------------------- +# Other algorithms were not used because they: +# 1) Took much more auxiliary memory, +# 2) Made multiple passes over the data. +# 3) Made more comparisons in common cases (small k, large n, semi-random input). +# See the more detailed comparison of approach at: +# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest + +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + + # Short-cut for n==1 is to use min() + if n == 1: + it = iter(iterable) + sentinel = object() + result = min(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + # put the range(n) first so that zip() doesn't + # consume one too many elements from the iterator + result = [(elem, i) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + if elem < top: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order += 1 + result.sort() + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + k = key(elem) + if k < top: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order += 1 + result.sort() + return [elem for (k, order, elem) in result] + +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() + if n == 1: + it = iter(iterable) + sentinel = object() + result = max(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + if top < elem: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + k = key(elem) + if top < k: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (k, order, elem) in result] + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass +try: + from _heapq import _heapreplace_max +except ImportError: + pass +try: + from _heapq import _heapify_max +except ImportError: + pass +try: + from _heapq import _heappop_max +except ImportError: + pass + + +if __name__ == "__main__": + + import doctest # pragma: no cover + print(doctest.testmod()) # pragma: no cover diff --git a/pllava/lib/python3.10/hmac.py b/pllava/lib/python3.10/hmac.py new file mode 100644 index 0000000000000000000000000000000000000000..8b4f920db954ca8e5e5844c6b91b52ea56d44d81 --- /dev/null +++ b/pllava/lib/python3.10/hmac.py @@ -0,0 +1,219 @@ +"""HMAC (Keyed-Hashing for Message Authentication) module. + +Implements the HMAC algorithm as described by RFC 2104. +""" + +import warnings as _warnings +try: + import _hashlib as _hashopenssl +except ImportError: + _hashopenssl = None + _functype = None + from _operator import _compare_digest as compare_digest +else: + compare_digest = _hashopenssl.compare_digest + _functype = type(_hashopenssl.openssl_sha256) # builtin type + +import hashlib as _hashlib + +trans_5C = bytes((x ^ 0x5C) for x in range(256)) +trans_36 = bytes((x ^ 0x36) for x in range(256)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. Use digest_size from the instance of HMAC instead. +digest_size = None + + +class HMAC: + """RFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + blocksize = 64 # 512-bit HMAC; can be changed in subclasses. + + __slots__ = ( + "_hmac", "_inner", "_outer", "block_size", "digest_size" + ) + + def __init__(self, key, msg=None, digestmod=''): + """Create a new HMAC object. + + key: bytes or buffer, key for the keyed hash object. + msg: bytes or buffer, Initial input for the hash or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + """ + + if not isinstance(key, (bytes, bytearray)): + raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__) + + if not digestmod: + raise TypeError("Missing required parameter 'digestmod'.") + + if _hashopenssl and isinstance(digestmod, (str, _functype)): + try: + self._init_hmac(key, msg, digestmod) + except _hashopenssl.UnsupportedDigestmodError: + self._init_old(key, msg, digestmod) + else: + self._init_old(key, msg, digestmod) + + def _init_hmac(self, key, msg, digestmod): + self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod) + self.digest_size = self._hmac.digest_size + self.block_size = self._hmac.block_size + + def _init_old(self, key, msg, digestmod): + if callable(digestmod): + digest_cons = digestmod + elif isinstance(digestmod, str): + digest_cons = lambda d=b'': _hashlib.new(digestmod, d) + else: + digest_cons = lambda d=b'': digestmod.new(d) + + self._hmac = None + self._outer = digest_cons() + self._inner = digest_cons() + self.digest_size = self._inner.digest_size + + if hasattr(self._inner, 'block_size'): + blocksize = self._inner.block_size + if blocksize < 16: + _warnings.warn('block_size of %d seems too small; using our ' + 'default of %d.' % (blocksize, self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + else: + _warnings.warn('No block_size attribute on given digest object; ' + 'Assuming %d.' % (self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + + if len(key) > blocksize: + key = digest_cons(key).digest() + + # self.blocksize is the default blocksize. self.block_size is + # effective block size as well as the public API attribute. + self.block_size = blocksize + + key = key.ljust(blocksize, b'\0') + self._outer.update(key.translate(trans_5C)) + self._inner.update(key.translate(trans_36)) + if msg is not None: + self.update(msg) + + @property + def name(self): + if self._hmac: + return self._hmac.name + else: + return f"hmac-{self._inner.name}" + + def update(self, msg): + """Feed data from msg into this hashing object.""" + inst = self._hmac or self._inner + inst.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + # Call __new__ directly to avoid the expensive __init__. + other = self.__class__.__new__(self.__class__) + other.digest_size = self.digest_size + if self._hmac: + other._hmac = self._hmac.copy() + other._inner = other._outer = None + else: + other._hmac = None + other._inner = self._inner.copy() + other._outer = self._outer.copy() + return other + + def _current(self): + """Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + """ + if self._hmac: + return self._hmac + else: + h = self._outer.copy() + h.update(self._inner.digest()) + return h + + def digest(self): + """Return the hash value of this hashing object. + + This returns the hmac value as bytes. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self._current() + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + h = self._current() + return h.hexdigest() + +def new(key, msg=None, digestmod=''): + """Create a new hashing object and return it. + + key: bytes or buffer, The starting key for the hash. + msg: bytes or buffer, Initial input for the hash, or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + + You can now feed arbitrary bytes into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + or hexdigest() methods. + """ + return HMAC(key, msg, digestmod) + + +def digest(key, msg, digest): + """Fast inline implementation of HMAC. + + key: bytes or buffer, The key for the keyed hash object. + msg: bytes or buffer, Input message. + digest: A hash name suitable for hashlib.new() for best performance. *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + """ + if _hashopenssl is not None and isinstance(digest, (str, _functype)): + try: + return _hashopenssl.hmac_digest(key, msg, digest) + except _hashopenssl.UnsupportedDigestmodError: + pass + + if callable(digest): + digest_cons = digest + elif isinstance(digest, str): + digest_cons = lambda d=b'': _hashlib.new(digest, d) + else: + digest_cons = lambda d=b'': digest.new(d) + + inner = digest_cons() + outer = digest_cons() + blocksize = getattr(inner, 'block_size', 64) + if len(key) > blocksize: + key = digest_cons(key).digest() + key = key + b'\x00' * (blocksize - len(key)) + inner.update(key.translate(trans_36)) + outer.update(key.translate(trans_5C)) + inner.update(msg) + outer.update(inner.digest()) + return outer.digest() diff --git a/pllava/lib/python3.10/imaplib.py b/pllava/lib/python3.10/imaplib.py new file mode 100644 index 0000000000000000000000000000000000000000..73184396d894a384b435e44204c4b8ff0163f4f6 --- /dev/null +++ b/pllava/lib/python3.10/imaplib.py @@ -0,0 +1,1649 @@ +"""IMAP4 client. + +Based on RFC 2060. + +Public class: IMAP4 +Public variable: Debug +Public functions: Internaldate2tuple + Int2AP + ParseFlags + Time2Internaldate +""" + +# Author: Piers Lauder December 1997. +# +# Authentication code contributed by Donn Cave June 1998. +# String method conversion by ESR, February 2001. +# GET/SETACL contributed by Anthony Baxter April 2001. +# IMAP4_SSL contributed by Tino Lange March 2002. +# GET/SETQUOTA contributed by Andreas Zeidler June 2002. +# PROXYAUTH contributed by Rick Holbert November 2002. +# GET/SETANNOTATION contributed by Tomas Lindroos June 2005. + +__version__ = "2.58" + +import binascii, errno, random, re, socket, subprocess, sys, time, calendar +from datetime import datetime, timezone, timedelta +from io import DEFAULT_BUFFER_SIZE + +try: + import ssl + HAVE_SSL = True +except ImportError: + HAVE_SSL = False + +__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", + "Int2AP", "ParseFlags", "Time2Internaldate"] + +# Globals + +CRLF = b'\r\n' +Debug = 0 +IMAP4_PORT = 143 +IMAP4_SSL_PORT = 993 +AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first + +# Maximal line length when calling readline(). This is to prevent +# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) +# don't specify a line length. RFC 2683 suggests limiting client +# command lines to 1000 octets and that servers should be prepared +# to accept command lines up to 8000 octets, so we used to use 10K here. +# In the modern world (eg: gmail) the response to, for example, a +# search command can be quite large, so we now use 1M. +_MAXLINE = 1000000 + + +# Commands + +Commands = { + # name valid states + 'APPEND': ('AUTH', 'SELECTED'), + 'AUTHENTICATE': ('NONAUTH',), + 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'CHECK': ('SELECTED',), + 'CLOSE': ('SELECTED',), + 'COPY': ('SELECTED',), + 'CREATE': ('AUTH', 'SELECTED'), + 'DELETE': ('AUTH', 'SELECTED'), + 'DELETEACL': ('AUTH', 'SELECTED'), + 'ENABLE': ('AUTH', ), + 'EXAMINE': ('AUTH', 'SELECTED'), + 'EXPUNGE': ('SELECTED',), + 'FETCH': ('SELECTED',), + 'GETACL': ('AUTH', 'SELECTED'), + 'GETANNOTATION':('AUTH', 'SELECTED'), + 'GETQUOTA': ('AUTH', 'SELECTED'), + 'GETQUOTAROOT': ('AUTH', 'SELECTED'), + 'MYRIGHTS': ('AUTH', 'SELECTED'), + 'LIST': ('AUTH', 'SELECTED'), + 'LOGIN': ('NONAUTH',), + 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'LSUB': ('AUTH', 'SELECTED'), + 'MOVE': ('SELECTED',), + 'NAMESPACE': ('AUTH', 'SELECTED'), + 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'PARTIAL': ('SELECTED',), # NB: obsolete + 'PROXYAUTH': ('AUTH',), + 'RENAME': ('AUTH', 'SELECTED'), + 'SEARCH': ('SELECTED',), + 'SELECT': ('AUTH', 'SELECTED'), + 'SETACL': ('AUTH', 'SELECTED'), + 'SETANNOTATION':('AUTH', 'SELECTED'), + 'SETQUOTA': ('AUTH', 'SELECTED'), + 'SORT': ('SELECTED',), + 'STARTTLS': ('NONAUTH',), + 'STATUS': ('AUTH', 'SELECTED'), + 'STORE': ('SELECTED',), + 'SUBSCRIBE': ('AUTH', 'SELECTED'), + 'THREAD': ('SELECTED',), + 'UID': ('SELECTED',), + 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), + 'UNSELECT': ('SELECTED',), + } + +# Patterns to match server responses + +Continuation = re.compile(br'\+( (?P.*))?') +Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)') +InternalDate = re.compile(br'.*INTERNALDATE "' + br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])' + br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])' + br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])' + br'"') +# Literal is no longer used; kept for backward compatibility. +Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII) +MapCRLF = re.compile(br'\r\n|\r|\n') +# We no longer exclude the ']' character from the data portion of the response +# code, even though it violates the RFC. Popular IMAP servers such as Gmail +# allow flags with ']', and there are programs (including imaplib!) that can +# produce them. The problem with this is if the 'text' portion of the response +# includes a ']' we'll parse the response wrong (which is the point of the RFC +# restriction). However, that seems less likely to be a problem in practice +# than being unable to correctly parse flags that include ']' chars, which +# was reported as a real-world problem in issue #21815. +Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]') +Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?') +# Untagged_status is no longer used; kept for backward compatibility +Untagged_status = re.compile( + br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII) +# We compile these in _mode_xxx. +_Literal = br'.*{(?P\d+)}$' +_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' + + + +class IMAP4: + + r"""IMAP4 client class. + + Instantiate with: IMAP4([host[, port[, timeout=None]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 port). + timeout - socket timeout (default: None) + If timeout is not given or is None, + the global default socket timeout is used + + All IMAP4rev1 commands are supported by methods of the same + name (in lower-case). + + All arguments to commands are converted to strings, except for + AUTHENTICATE, and the last argument to APPEND which is passed as + an IMAP4 literal. If necessary (the string contains any + non-printing characters or white-space and isn't enclosed with + either parentheses or double quotes) each string is quoted. + However, the 'password' argument to the LOGIN command is always + quoted. If you want to avoid having an argument string quoted + (eg: the 'flags' argument to STORE) then enclose the string in + parentheses (eg: "(\Deleted)"). + + Each command returns a tuple: (type, [data, ...]) where 'type' + is usually 'OK' or 'NO', and 'data' is either the text from the + tagged response, or untagged results from command. Each 'data' + is either a string, or a tuple. If a tuple, then the first part + is the header of the response, and the second part contains + the data (ie: 'literal' value). + + Errors raise the exception class .error(""). + IMAP4 server errors raise .abort(""), + which is a sub-class of 'error'. Mailbox status changes + from READ-WRITE to READ-ONLY raise the exception class + .readonly(""), which is a sub-class of 'abort'. + + "error" exceptions imply a program error. + "abort" exceptions imply the connection should be reset, and + the command re-tried. + "readonly" exceptions imply the command should be re-tried. + + Note: to use this module, you must read the RFCs pertaining to the + IMAP4 protocol, as the semantics of the arguments to each IMAP4 + command are left to the invoker, not to mention the results. Also, + most IMAP servers implement a sub-set of the commands available here. + """ + + class error(Exception): pass # Logical errors - debug required + class abort(error): pass # Service errors - close and retry + class readonly(abort): pass # Mailbox status changed to READ-ONLY + + def __init__(self, host='', port=IMAP4_PORT, timeout=None): + self.debug = Debug + self.state = 'LOGOUT' + self.literal = None # A literal argument to a command + self.tagged_commands = {} # Tagged commands awaiting response + self.untagged_responses = {} # {typ: [data, ...], ...} + self.continuation_response = '' # Last continuation response + self.is_readonly = False # READ-ONLY desired state + self.tagnum = 0 + self._tls_established = False + self._mode_ascii() + + # Open socket to server. + + self.open(host, port, timeout) + + try: + self._connect() + except Exception: + try: + self.shutdown() + except OSError: + pass + raise + + def _mode_ascii(self): + self.utf8_enabled = False + self._encoding = 'ascii' + self.Literal = re.compile(_Literal, re.ASCII) + self.Untagged_status = re.compile(_Untagged_status, re.ASCII) + + + def _mode_utf8(self): + self.utf8_enabled = True + self._encoding = 'utf-8' + self.Literal = re.compile(_Literal) + self.Untagged_status = re.compile(_Untagged_status) + + + def _connect(self): + # Create unique tag for this session, + # and compile tagged response matcher. + + self.tagpre = Int2AP(random.randint(4096, 65535)) + self.tagre = re.compile(br'(?P' + + self.tagpre + + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII) + + # Get server welcome message, + # request and store CAPABILITY response. + + if __debug__: + self._cmd_log_len = 10 + self._cmd_log_idx = 0 + self._cmd_log = {} # Last `_cmd_log_len' interactions + if self.debug >= 1: + self._mesg('imaplib version %s' % __version__) + self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) + + self.welcome = self._get_response() + if 'PREAUTH' in self.untagged_responses: + self.state = 'AUTH' + elif 'OK' in self.untagged_responses: + self.state = 'NONAUTH' + else: + raise self.error(self.welcome) + + self._get_capabilities() + if __debug__: + if self.debug >= 3: + self._mesg('CAPABILITIES: %r' % (self.capabilities,)) + + for version in AllowedVersions: + if not version in self.capabilities: + continue + self.PROTOCOL_VERSION = version + return + + raise self.error('server not IMAP4 compliant') + + + def __getattr__(self, attr): + # Allow UPPERCASE variants of IMAP4 command methods. + if attr in Commands: + return getattr(self, attr.lower()) + raise AttributeError("Unknown IMAP4 command: '%s'" % attr) + + def __enter__(self): + return self + + def __exit__(self, *args): + if self.state == "LOGOUT": + return + + try: + self.logout() + except OSError: + pass + + + # Overridable methods + + + def _create_socket(self, timeout): + # Default value of IMAP4.host is '', but socket.getaddrinfo() + # (which is used by socket.create_connection()) expects None + # as a default value for host. + if timeout is not None and not timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + host = None if not self.host else self.host + sys.audit("imaplib.open", self, self.host, self.port) + address = (host, self.port) + if timeout is not None: + return socket.create_connection(address, timeout) + return socket.create_connection(address) + + def open(self, host='', port=IMAP4_PORT, timeout=None): + """Setup connection to remote server on "host:port" + (default: localhost:standard IMAP4 port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = self._create_socket(timeout) + self.file = self.sock.makefile('rb') + + + def read(self, size): + """Read 'size' bytes from remote.""" + return self.file.read(size) + + + def readline(self): + """Read line from remote.""" + line = self.file.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise self.error("got more than %d bytes" % _MAXLINE) + return line + + + def send(self, data): + """Send data to remote.""" + sys.audit("imaplib.send", self, data) + self.sock.sendall(data) + + + def shutdown(self): + """Close I/O established in "open".""" + self.file.close() + try: + self.sock.shutdown(socket.SHUT_RDWR) + except OSError as exc: + # The server might already have closed the connection. + # On Windows, this may result in WSAEINVAL (error 10022): + # An invalid operation was attempted. + if (exc.errno != errno.ENOTCONN + and getattr(exc, 'winerror', 0) != 10022): + raise + finally: + self.sock.close() + + + def socket(self): + """Return socket instance used to connect to IMAP4 server. + + socket = .socket() + """ + return self.sock + + + + # Utility methods + + + def recent(self): + """Return most recent 'RECENT' responses if any exist, + else prompt server for an update using the 'NOOP' command. + + (typ, [data]) = .recent() + + 'data' is None if no new messages, + else list of RECENT responses, most recent last. + """ + name = 'RECENT' + typ, dat = self._untagged_response('OK', [None], name) + if dat[-1]: + return typ, dat + typ, dat = self.noop() # Prod server for response + return self._untagged_response(typ, dat, name) + + + def response(self, code): + """Return data for response 'code' if received, or None. + + Old value for response 'code' is cleared. + + (code, [data]) = .response(code) + """ + return self._untagged_response(code, [None], code.upper()) + + + + # IMAP4 commands + + + def append(self, mailbox, flags, date_time, message): + """Append message to named mailbox. + + (typ, [data]) = .append(mailbox, flags, date_time, message) + + All args except `message' can be None. + """ + name = 'APPEND' + if not mailbox: + mailbox = 'INBOX' + if flags: + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags + else: + flags = None + if date_time: + date_time = Time2Internaldate(date_time) + else: + date_time = None + literal = MapCRLF.sub(CRLF, message) + if self.utf8_enabled: + literal = b'UTF8 (' + literal + b')' + self.literal = literal + return self._simple_command(name, mailbox, flags, date_time) + + + def authenticate(self, mechanism, authobject): + """Authenticate command - requires response processing. + + 'mechanism' specifies which authentication mechanism is to + be used - it must appear in .capabilities in the + form AUTH=. + + 'authobject' must be a callable object: + + data = authobject(response) + + It will be called to process server continuation responses; the + response argument it is passed will be a bytes. It should return bytes + data that will be base64 encoded and sent to the server. It should + return None if the client abort response '*' should be sent instead. + """ + mech = mechanism.upper() + # XXX: shouldn't this code be removed, not commented out? + #cap = 'AUTH=%s' % mech + #if not cap in self.capabilities: # Let the server decide! + # raise self.error("Server doesn't allow %s authentication." % mech) + self.literal = _Authenticator(authobject).process + typ, dat = self._simple_command('AUTHENTICATE', mech) + if typ != 'OK': + raise self.error(dat[-1].decode('utf-8', 'replace')) + self.state = 'AUTH' + return typ, dat + + + def capability(self): + """(typ, [data]) = .capability() + Fetch capabilities list from server.""" + + name = 'CAPABILITY' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def check(self): + """Checkpoint mailbox on server. + + (typ, [data]) = .check() + """ + return self._simple_command('CHECK') + + + def close(self): + """Close currently selected mailbox. + + Deleted messages are removed from writable mailbox. + This is the recommended command before 'LOGOUT'. + + (typ, [data]) = .close() + """ + try: + typ, dat = self._simple_command('CLOSE') + finally: + self.state = 'AUTH' + return typ, dat + + + def copy(self, message_set, new_mailbox): + """Copy 'message_set' messages onto end of 'new_mailbox'. + + (typ, [data]) = .copy(message_set, new_mailbox) + """ + return self._simple_command('COPY', message_set, new_mailbox) + + + def create(self, mailbox): + """Create new mailbox. + + (typ, [data]) = .create(mailbox) + """ + return self._simple_command('CREATE', mailbox) + + + def delete(self, mailbox): + """Delete old mailbox. + + (typ, [data]) = .delete(mailbox) + """ + return self._simple_command('DELETE', mailbox) + + def deleteacl(self, mailbox, who): + """Delete the ACLs (remove any rights) set for who on mailbox. + + (typ, [data]) = .deleteacl(mailbox, who) + """ + return self._simple_command('DELETEACL', mailbox, who) + + def enable(self, capability): + """Send an RFC5161 enable string to the server. + + (typ, [data]) = .enable(capability) + """ + if 'ENABLE' not in self.capabilities: + raise IMAP4.error("Server does not support ENABLE") + typ, data = self._simple_command('ENABLE', capability) + if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper(): + self._mode_utf8() + return typ, data + + def expunge(self): + """Permanently remove deleted items from selected mailbox. + + Generates 'EXPUNGE' response for each deleted message. + + (typ, [data]) = .expunge() + + 'data' is list of 'EXPUNGE'd message numbers in order received. + """ + name = 'EXPUNGE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def fetch(self, message_set, message_parts): + """Fetch (parts of) messages. + + (typ, [data, ...]) = .fetch(message_set, message_parts) + + 'message_parts' should be a string of selected parts + enclosed in parentheses, eg: "(UID BODY[TEXT])". + + 'data' are tuples of message part envelope and data. + """ + name = 'FETCH' + typ, dat = self._simple_command(name, message_set, message_parts) + return self._untagged_response(typ, dat, name) + + + def getacl(self, mailbox): + """Get the ACLs for a mailbox. + + (typ, [data]) = .getacl(mailbox) + """ + typ, dat = self._simple_command('GETACL', mailbox) + return self._untagged_response(typ, dat, 'ACL') + + + def getannotation(self, mailbox, entry, attribute): + """(typ, [data]) = .getannotation(mailbox, entry, attribute) + Retrieve ANNOTATIONs.""" + + typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def getquota(self, root): + """Get the quota root's resource usage and limits. + + Part of the IMAP4 QUOTA extension defined in rfc2087. + + (typ, [data]) = .getquota(root) + """ + typ, dat = self._simple_command('GETQUOTA', root) + return self._untagged_response(typ, dat, 'QUOTA') + + + def getquotaroot(self, mailbox): + """Get the list of quota roots for the named mailbox. + + (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox) + """ + typ, dat = self._simple_command('GETQUOTAROOT', mailbox) + typ, quota = self._untagged_response(typ, dat, 'QUOTA') + typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') + return typ, [quotaroot, quota] + + + def list(self, directory='""', pattern='*'): + """List mailbox names in directory matching pattern. + + (typ, [data]) = .list(directory='""', pattern='*') + + 'data' is list of LIST responses. + """ + name = 'LIST' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + + def login(self, user, password): + """Identify client using plaintext password. + + (typ, [data]) = .login(user, password) + + NB: 'password' will be quoted. + """ + typ, dat = self._simple_command('LOGIN', user, self._quote(password)) + if typ != 'OK': + raise self.error(dat[-1]) + self.state = 'AUTH' + return typ, dat + + + def login_cram_md5(self, user, password): + """ Force use of CRAM-MD5 authentication. + + (typ, [data]) = .login_cram_md5(user, password) + """ + self.user, self.password = user, password + return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) + + + def _CRAM_MD5_AUTH(self, challenge): + """ Authobject to use with CRAM-MD5 authentication. """ + import hmac + pwd = (self.password.encode('utf-8') if isinstance(self.password, str) + else self.password) + return self.user + " " + hmac.HMAC(pwd, challenge, 'md5').hexdigest() + + + def logout(self): + """Shutdown connection to server. + + (typ, [data]) = .logout() + + Returns server 'BYE' response. + """ + self.state = 'LOGOUT' + typ, dat = self._simple_command('LOGOUT') + self.shutdown() + return typ, dat + + + def lsub(self, directory='""', pattern='*'): + """List 'subscribed' mailbox names in directory matching pattern. + + (typ, [data, ...]) = .lsub(directory='""', pattern='*') + + 'data' are tuples of message part envelope and data. + """ + name = 'LSUB' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + def myrights(self, mailbox): + """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). + + (typ, [data]) = .myrights(mailbox) + """ + typ,dat = self._simple_command('MYRIGHTS', mailbox) + return self._untagged_response(typ, dat, 'MYRIGHTS') + + def namespace(self): + """ Returns IMAP namespaces ala rfc2342 + + (typ, [data, ...]) = .namespace() + """ + name = 'NAMESPACE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def noop(self): + """Send NOOP command. + + (typ, [data]) = .noop() + """ + if __debug__: + if self.debug >= 3: + self._dump_ur(self.untagged_responses) + return self._simple_command('NOOP') + + + def partial(self, message_num, message_part, start, length): + """Fetch truncated part of a message. + + (typ, [data, ...]) = .partial(message_num, message_part, start, length) + + 'data' is tuple of message part envelope and data. + """ + name = 'PARTIAL' + typ, dat = self._simple_command(name, message_num, message_part, start, length) + return self._untagged_response(typ, dat, 'FETCH') + + + def proxyauth(self, user): + """Assume authentication as "user". + + Allows an authorised administrator to proxy into any user's + mailbox. + + (typ, [data]) = .proxyauth(user) + """ + + name = 'PROXYAUTH' + return self._simple_command('PROXYAUTH', user) + + + def rename(self, oldmailbox, newmailbox): + """Rename old mailbox name to new. + + (typ, [data]) = .rename(oldmailbox, newmailbox) + """ + return self._simple_command('RENAME', oldmailbox, newmailbox) + + + def search(self, charset, *criteria): + """Search mailbox for matching messages. + + (typ, [data]) = .search(charset, criterion, ...) + + 'data' is space separated list of matching message numbers. + If UTF8 is enabled, charset MUST be None. + """ + name = 'SEARCH' + if charset: + if self.utf8_enabled: + raise IMAP4.error("Non-None charset not valid in UTF8 mode") + typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) + else: + typ, dat = self._simple_command(name, *criteria) + return self._untagged_response(typ, dat, name) + + + def select(self, mailbox='INBOX', readonly=False): + """Select a mailbox. + + Flush all untagged responses. + + (typ, [data]) = .select(mailbox='INBOX', readonly=False) + + 'data' is count of messages in mailbox ('EXISTS' response). + + Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so + other responses should be obtained via .response('FLAGS') etc. + """ + self.untagged_responses = {} # Flush old responses. + self.is_readonly = readonly + if readonly: + name = 'EXAMINE' + else: + name = 'SELECT' + typ, dat = self._simple_command(name, mailbox) + if typ != 'OK': + self.state = 'AUTH' # Might have been 'SELECTED' + return typ, dat + self.state = 'SELECTED' + if 'READ-ONLY' in self.untagged_responses \ + and not readonly: + if __debug__: + if self.debug >= 1: + self._dump_ur(self.untagged_responses) + raise self.readonly('%s is not writable' % mailbox) + return typ, self.untagged_responses.get('EXISTS', [None]) + + + def setacl(self, mailbox, who, what): + """Set a mailbox acl. + + (typ, [data]) = .setacl(mailbox, who, what) + """ + return self._simple_command('SETACL', mailbox, who, what) + + + def setannotation(self, *args): + """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+) + Set ANNOTATIONs.""" + + typ, dat = self._simple_command('SETANNOTATION', *args) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def setquota(self, root, limits): + """Set the quota root's resource limits. + + (typ, [data]) = .setquota(root, limits) + """ + typ, dat = self._simple_command('SETQUOTA', root, limits) + return self._untagged_response(typ, dat, 'QUOTA') + + + def sort(self, sort_criteria, charset, *search_criteria): + """IMAP4rev1 extension SORT command. + + (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...) + """ + name = 'SORT' + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unimplemented extension command: %s' % name) + if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): + sort_criteria = '(%s)' % sort_criteria + typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def starttls(self, ssl_context=None): + name = 'STARTTLS' + if not HAVE_SSL: + raise self.error('SSL support missing') + if self._tls_established: + raise self.abort('TLS session already established') + if name not in self.capabilities: + raise self.abort('TLS not supported by server') + # Generate a default SSL context if none was passed. + if ssl_context is None: + ssl_context = ssl._create_stdlib_context() + typ, dat = self._simple_command(name) + if typ == 'OK': + self.sock = ssl_context.wrap_socket(self.sock, + server_hostname=self.host) + self.file = self.sock.makefile('rb') + self._tls_established = True + self._get_capabilities() + else: + raise self.error("Couldn't establish TLS session") + return self._untagged_response(typ, dat, name) + + + def status(self, mailbox, names): + """Request named status conditions for mailbox. + + (typ, [data]) = .status(mailbox, names) + """ + name = 'STATUS' + #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! + # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) + typ, dat = self._simple_command(name, mailbox, names) + return self._untagged_response(typ, dat, name) + + + def store(self, message_set, command, flags): + """Alters flag dispositions for messages in mailbox. + + (typ, [data]) = .store(message_set, command, flags) + """ + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags # Avoid quoting the flags + typ, dat = self._simple_command('STORE', message_set, command, flags) + return self._untagged_response(typ, dat, 'FETCH') + + + def subscribe(self, mailbox): + """Subscribe to new mailbox. + + (typ, [data]) = .subscribe(mailbox) + """ + return self._simple_command('SUBSCRIBE', mailbox) + + + def thread(self, threading_algorithm, charset, *search_criteria): + """IMAPrev1 extension THREAD command. + + (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...) + """ + name = 'THREAD' + typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def uid(self, command, *args): + """Execute "command arg ..." with messages identified by UID, + rather than message number. + + (typ, [data]) = .uid(command, arg1, arg2, ...) + + Returns response appropriate to 'command'. + """ + command = command.upper() + if not command in Commands: + raise self.error("Unknown IMAP4 UID command: %s" % command) + if self.state not in Commands[command]: + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (command, self.state, + ', '.join(Commands[command]))) + name = 'UID' + typ, dat = self._simple_command(name, command, *args) + if command in ('SEARCH', 'SORT', 'THREAD'): + name = command + else: + name = 'FETCH' + return self._untagged_response(typ, dat, name) + + + def unsubscribe(self, mailbox): + """Unsubscribe from old mailbox. + + (typ, [data]) = .unsubscribe(mailbox) + """ + return self._simple_command('UNSUBSCRIBE', mailbox) + + + def unselect(self): + """Free server's resources associated with the selected mailbox + and returns the server to the authenticated state. + This command performs the same actions as CLOSE, except + that no messages are permanently removed from the currently + selected mailbox. + + (typ, [data]) = .unselect() + """ + try: + typ, data = self._simple_command('UNSELECT') + finally: + self.state = 'AUTH' + return typ, data + + + def xatom(self, name, *args): + """Allow simple extension commands + notified by server in CAPABILITY response. + + Assumes command is legal in current state. + + (typ, [data]) = .xatom(name, arg, ...) + + Returns response appropriate to extension command `name'. + """ + name = name.upper() + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unknown extension command: %s' % name) + if not name in Commands: + Commands[name] = (self.state,) + return self._simple_command(name, *args) + + + + # Private methods + + + def _append_untagged(self, typ, dat): + if dat is None: + dat = b'' + ur = self.untagged_responses + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] %s += ["%r"]' % + (typ, len(ur.get(typ,'')), dat)) + if typ in ur: + ur[typ].append(dat) + else: + ur[typ] = [dat] + + + def _check_bye(self): + bye = self.untagged_responses.get('BYE') + if bye: + raise self.abort(bye[-1].decode(self._encoding, 'replace')) + + + def _command(self, name, *args): + + if self.state not in Commands[name]: + self.literal = None + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (name, self.state, + ', '.join(Commands[name]))) + + for typ in ('OK', 'NO', 'BAD'): + if typ in self.untagged_responses: + del self.untagged_responses[typ] + + if 'READ-ONLY' in self.untagged_responses \ + and not self.is_readonly: + raise self.readonly('mailbox status changed to READ-ONLY') + + tag = self._new_tag() + name = bytes(name, self._encoding) + data = tag + b' ' + name + for arg in args: + if arg is None: continue + if isinstance(arg, str): + arg = bytes(arg, self._encoding) + data = data + b' ' + arg + + literal = self.literal + if literal is not None: + self.literal = None + if type(literal) is type(self._command): + literator = literal + else: + literator = None + data = data + bytes(' {%s}' % len(literal), self._encoding) + + if __debug__: + if self.debug >= 4: + self._mesg('> %r' % data) + else: + self._log('> %r' % data) + + try: + self.send(data + CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if literal is None: + return tag + + while 1: + # Wait for continuation response + + while self._get_response(): + if self.tagged_commands[tag]: # BAD/NO? + return tag + + # Send literal + + if literator: + literal = literator(self.continuation_response) + + if __debug__: + if self.debug >= 4: + self._mesg('write literal size %s' % len(literal)) + + try: + self.send(literal) + self.send(CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if not literator: + break + + return tag + + + def _command_complete(self, name, tag): + logout = (name == 'LOGOUT') + # BYE is expected after LOGOUT + if not logout: + self._check_bye() + try: + typ, data = self._get_tagged_response(tag, expect_bye=logout) + except self.abort as val: + raise self.abort('command: %s => %s' % (name, val)) + except self.error as val: + raise self.error('command: %s => %s' % (name, val)) + if not logout: + self._check_bye() + if typ == 'BAD': + raise self.error('%s command error: %s %s' % (name, typ, data)) + return typ, data + + + def _get_capabilities(self): + typ, dat = self.capability() + if dat == [None]: + raise self.error('no CAPABILITY response from server') + dat = str(dat[-1], self._encoding) + dat = dat.upper() + self.capabilities = tuple(dat.split()) + + + def _get_response(self): + + # Read response and store. + # + # Returns None for continuation responses, + # otherwise first response line received. + + resp = self._get_line() + + # Command completion response? + + if self._match(self.tagre, resp): + tag = self.mo.group('tag') + if not tag in self.tagged_commands: + raise self.abort('unexpected tagged response: %r' % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + self.tagged_commands[tag] = (typ, [dat]) + else: + dat2 = None + + # '*' (untagged) responses? + + if not self._match(Untagged_response, resp): + if self._match(self.Untagged_status, resp): + dat2 = self.mo.group('data2') + + if self.mo is None: + # Only other possibility is '+' (continuation) response... + + if self._match(Continuation, resp): + self.continuation_response = self.mo.group('data') + return None # NB: indicates continuation + + raise self.abort("unexpected response: %r" % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + if dat is None: dat = b'' # Null untagged response + if dat2: dat = dat + b' ' + dat2 + + # Is there a literal to come? + + while self._match(self.Literal, dat): + + # Read literal direct from connection. + + size = int(self.mo.group('size')) + if __debug__: + if self.debug >= 4: + self._mesg('read literal size %s' % size) + data = self.read(size) + + # Store response with literal as tuple + + self._append_untagged(typ, (dat, data)) + + # Read trailer - possibly containing another literal + + dat = self._get_line() + + self._append_untagged(typ, dat) + + # Bracketed response information? + + if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): + typ = self.mo.group('type') + typ = str(typ, self._encoding) + self._append_untagged(typ, self.mo.group('data')) + + if __debug__: + if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): + self._mesg('%s response: %r' % (typ, dat)) + + return resp + + + def _get_tagged_response(self, tag, expect_bye=False): + + while 1: + result = self.tagged_commands[tag] + if result is not None: + del self.tagged_commands[tag] + return result + + if expect_bye: + typ = 'BYE' + bye = self.untagged_responses.pop(typ, None) + if bye is not None: + # Server replies to the "LOGOUT" command with "BYE" + return (typ, bye) + + # If we've seen a BYE at this point, the socket will be + # closed, so report the BYE now. + self._check_bye() + + # Some have reported "unexpected response" exceptions. + # Note that ignoring them here causes loops. + # Instead, send me details of the unexpected response and + # I'll update the code in `_get_response()'. + + try: + self._get_response() + except self.abort as val: + if __debug__: + if self.debug >= 1: + self.print_log() + raise + + + def _get_line(self): + + line = self.readline() + if not line: + raise self.abort('socket error: EOF') + + # Protocol mandates all lines terminated by CRLF + if not line.endswith(b'\r\n'): + raise self.abort('socket error: unterminated line: %r' % line) + + line = line[:-2] + if __debug__: + if self.debug >= 4: + self._mesg('< %r' % line) + else: + self._log('< %r' % line) + return line + + + def _match(self, cre, s): + + # Run compiled regular expression match method on 's'. + # Save result, return success. + + self.mo = cre.match(s) + if __debug__: + if self.mo is not None and self.debug >= 5: + self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups())) + return self.mo is not None + + + def _new_tag(self): + + tag = self.tagpre + bytes(str(self.tagnum), self._encoding) + self.tagnum = self.tagnum + 1 + self.tagged_commands[tag] = None + return tag + + + def _quote(self, arg): + + arg = arg.replace('\\', '\\\\') + arg = arg.replace('"', '\\"') + + return '"' + arg + '"' + + + def _simple_command(self, name, *args): + + return self._command_complete(name, self._command(name, *args)) + + + def _untagged_response(self, typ, dat, name): + if typ == 'NO': + return typ, dat + if not name in self.untagged_responses: + return typ, [None] + data = self.untagged_responses.pop(name) + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] => %s' % (name, data)) + return typ, data + + + if __debug__: + + def _mesg(self, s, secs=None): + if secs is None: + secs = time.time() + tm = time.strftime('%M:%S', time.localtime(secs)) + sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) + sys.stderr.flush() + + def _dump_ur(self, untagged_resp_dict): + if not untagged_resp_dict: + return + items = (f'{key}: {value!r}' + for key, value in untagged_resp_dict.items()) + self._mesg('untagged responses dump:' + '\n\t\t'.join(items)) + + def _log(self, line): + # Keep log of last `_cmd_log_len' interactions for debugging. + self._cmd_log[self._cmd_log_idx] = (line, time.time()) + self._cmd_log_idx += 1 + if self._cmd_log_idx >= self._cmd_log_len: + self._cmd_log_idx = 0 + + def print_log(self): + self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) + i, n = self._cmd_log_idx, self._cmd_log_len + while n: + try: + self._mesg(*self._cmd_log[i]) + except: + pass + i += 1 + if i >= self._cmd_log_len: + i = 0 + n -= 1 + + +if HAVE_SSL: + + class IMAP4_SSL(IMAP4): + + """IMAP4 client class over SSL connection + + Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile[, ssl_context[, timeout=None]]]]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 SSL port); + keyfile - PEM formatted file that contains your private key (default: None); + certfile - PEM formatted certificate chain file (default: None); + ssl_context - a SSLContext object that contains your certificate chain + and private key (default: None) + Note: if ssl_context is provided, then parameters keyfile or + certfile should not be set otherwise ValueError is raised. + timeout - socket timeout (default: None) If timeout is not given or is None, + the global default socket timeout is used + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, host='', port=IMAP4_SSL_PORT, keyfile=None, + certfile=None, ssl_context=None, timeout=None): + if ssl_context is not None and keyfile is not None: + raise ValueError("ssl_context and keyfile arguments are mutually " + "exclusive") + if ssl_context is not None and certfile is not None: + raise ValueError("ssl_context and certfile arguments are mutually " + "exclusive") + if keyfile is not None or certfile is not None: + import warnings + warnings.warn("keyfile and certfile are deprecated, use a " + "custom ssl_context instead", DeprecationWarning, 2) + self.keyfile = keyfile + self.certfile = certfile + if ssl_context is None: + ssl_context = ssl._create_stdlib_context(certfile=certfile, + keyfile=keyfile) + self.ssl_context = ssl_context + IMAP4.__init__(self, host, port, timeout) + + def _create_socket(self, timeout): + sock = IMAP4._create_socket(self, timeout) + return self.ssl_context.wrap_socket(sock, + server_hostname=self.host) + + def open(self, host='', port=IMAP4_SSL_PORT, timeout=None): + """Setup connection to remote server on "host:port". + (default: localhost:standard IMAP4 SSL port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + IMAP4.open(self, host, port, timeout) + + __all__.append("IMAP4_SSL") + + +class IMAP4_stream(IMAP4): + + """IMAP4 client class over a stream + + Instantiate with: IMAP4_stream(command) + + "command" - a string that can be passed to subprocess.Popen() + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, command): + self.command = command + IMAP4.__init__(self) + + + def open(self, host=None, port=None, timeout=None): + """Setup a stream connection. + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = None # For compatibility with parent class + self.port = None + self.sock = None + self.file = None + self.process = subprocess.Popen(self.command, + bufsize=DEFAULT_BUFFER_SIZE, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + shell=True, close_fds=True) + self.writefile = self.process.stdin + self.readfile = self.process.stdout + + def read(self, size): + """Read 'size' bytes from remote.""" + return self.readfile.read(size) + + + def readline(self): + """Read line from remote.""" + return self.readfile.readline() + + + def send(self, data): + """Send data to remote.""" + self.writefile.write(data) + self.writefile.flush() + + + def shutdown(self): + """Close I/O established in "open".""" + self.readfile.close() + self.writefile.close() + self.process.wait() + + + +class _Authenticator: + + """Private class to provide en/decoding + for base64-based authentication conversation. + """ + + def __init__(self, mechinst): + self.mech = mechinst # Callable object to provide/process data + + def process(self, data): + ret = self.mech(self.decode(data)) + if ret is None: + return b'*' # Abort conversation + return self.encode(ret) + + def encode(self, inp): + # + # Invoke binascii.b2a_base64 iteratively with + # short even length buffers, strip the trailing + # line feed from the result and append. "Even" + # means a number that factors to both 6 and 8, + # so when it gets to the end of the 8-bit input + # there's no partial 6-bit output. + # + oup = b'' + if isinstance(inp, str): + inp = inp.encode('utf-8') + while inp: + if len(inp) > 48: + t = inp[:48] + inp = inp[48:] + else: + t = inp + inp = b'' + e = binascii.b2a_base64(t) + if e: + oup = oup + e[:-1] + return oup + + def decode(self, inp): + if not inp: + return b'' + return binascii.a2b_base64(inp) + +Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ') +Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])} + +def Internaldate2tuple(resp): + """Parse an IMAP4 INTERNALDATE string. + + Return corresponding local time. The return value is a + time.struct_time tuple or None if the string has wrong format. + """ + + mo = InternalDate.match(resp) + if not mo: + return None + + mon = Mon2num[mo.group('mon')] + zonen = mo.group('zonen') + + day = int(mo.group('day')) + year = int(mo.group('year')) + hour = int(mo.group('hour')) + min = int(mo.group('min')) + sec = int(mo.group('sec')) + zoneh = int(mo.group('zoneh')) + zonem = int(mo.group('zonem')) + + # INTERNALDATE timezone must be subtracted to get UT + + zone = (zoneh*60 + zonem)*60 + if zonen == b'-': + zone = -zone + + tt = (year, mon, day, hour, min, sec, -1, -1, -1) + utc = calendar.timegm(tt) - zone + + return time.localtime(utc) + + + +def Int2AP(num): + + """Convert integer to A-P string representation.""" + + val = b''; AP = b'ABCDEFGHIJKLMNOP' + num = int(abs(num)) + while num: + num, mod = divmod(num, 16) + val = AP[mod:mod+1] + val + return val + + + +def ParseFlags(resp): + + """Convert IMAP4 flags response to python tuple.""" + + mo = Flags.match(resp) + if not mo: + return () + + return tuple(mo.group('flags').split()) + + +def Time2Internaldate(date_time): + + """Convert date_time to IMAP4 INTERNALDATE representation. + + Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The + date_time argument can be a number (int or float) representing + seconds since epoch (as returned by time.time()), a 9-tuple + representing local time, an instance of time.struct_time (as + returned by time.localtime()), an aware datetime instance or a + double-quoted string. In the last case, it is assumed to already + be in the correct format. + """ + if isinstance(date_time, (int, float)): + dt = datetime.fromtimestamp(date_time, + timezone.utc).astimezone() + elif isinstance(date_time, tuple): + try: + gmtoff = date_time.tm_gmtoff + except AttributeError: + if time.daylight: + dst = date_time[8] + if dst == -1: + dst = time.localtime(time.mktime(date_time))[8] + gmtoff = -(time.timezone, time.altzone)[dst] + else: + gmtoff = -time.timezone + delta = timedelta(seconds=gmtoff) + dt = datetime(*date_time[:6], tzinfo=timezone(delta)) + elif isinstance(date_time, datetime): + if date_time.tzinfo is None: + raise ValueError("date_time must be aware") + dt = date_time + elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): + return date_time # Assume in correct format + else: + raise ValueError("date_time not of a known type") + fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) + return dt.strftime(fmt) + + + +if __name__ == '__main__': + + # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' + # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' + # to test the IMAP4_stream class + + import getopt, getpass + + try: + optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') + except getopt.error as val: + optlist, args = (), () + + stream_command = None + for opt,val in optlist: + if opt == '-d': + Debug = int(val) + elif opt == '-s': + stream_command = val + if not args: args = (stream_command,) + + if not args: args = ('',) + + host = args[0] + + USER = getpass.getuser() + PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) + + test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} + test_seq1 = ( + ('login', (USER, PASSWD)), + ('create', ('/tmp/xxx 1',)), + ('rename', ('/tmp/xxx 1', '/tmp/yyy')), + ('CREATE', ('/tmp/yyz 2',)), + ('append', ('/tmp/yyz 2', None, None, test_mesg)), + ('list', ('/tmp', 'yy*')), + ('select', ('/tmp/yyz 2',)), + ('search', (None, 'SUBJECT', 'test')), + ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), + ('store', ('1', 'FLAGS', r'(\Deleted)')), + ('namespace', ()), + ('expunge', ()), + ('recent', ()), + ('close', ()), + ) + + test_seq2 = ( + ('select', ()), + ('response',('UIDVALIDITY',)), + ('uid', ('SEARCH', 'ALL')), + ('response', ('EXISTS',)), + ('append', (None, None, None, test_mesg)), + ('recent', ()), + ('logout', ()), + ) + + def run(cmd, args): + M._mesg('%s %s' % (cmd, args)) + typ, dat = getattr(M, cmd)(*args) + M._mesg('%s => %s %s' % (cmd, typ, dat)) + if typ == 'NO': raise dat[0] + return dat + + try: + if stream_command: + M = IMAP4_stream(stream_command) + else: + M = IMAP4(host) + if M.state == 'AUTH': + test_seq1 = test_seq1[1:] # Login not needed + M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) + M._mesg('CAPABILITIES = %r' % (M.capabilities,)) + + for cmd,args in test_seq1: + run(cmd, args) + + for ml in run('list', ('/tmp/', 'yy%')): + mo = re.match(r'.*"([^"]+)"$', ml) + if mo: path = mo.group(1) + else: path = ml.split()[-1] + run('delete', (path,)) + + for cmd,args in test_seq2: + dat = run(cmd, args) + + if (cmd,args) != ('uid', ('SEARCH', 'ALL')): + continue + + uid = dat[-1].split() + if not uid: continue + run('uid', ('FETCH', '%s' % uid[-1], + '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) + + print('\nAll tests OK.') + + except: + print('\nTests failed.') + + if not Debug: + print(''' +If you would like to see debugging output, +try: %s -d5 +''' % sys.argv[0]) + + raise diff --git a/pllava/lib/python3.10/io.py b/pllava/lib/python3.10/io.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6140c3dd50940f771268aafc20fe40efa01302 --- /dev/null +++ b/pllava/lib/python3.10/io.py @@ -0,0 +1,114 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "open_code", "IOBase", "RawIOBase", + "FileIO", "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] + + +import _io +import abc + +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, open_code, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, text_encoding, TextIOWrapper) + + +def __getattr__(name): + if name == "OpenWrapper": + # bpo-43680: Until Python 3.9, _pyio.open was not a static method and + # builtins.open was set to OpenWrapper to not become a bound method + # when set to a class variable. _io.open is a built-in function whereas + # _pyio.open is a Python function. In Python 3.10, _pyio.open() is now + # a static method, and builtins.open() is now io.open(). + import warnings + warnings.warn('OpenWrapper is deprecated, use open instead', + DeprecationWarning, stacklevel=2) + global OpenWrapper + OpenWrapper = open + return OpenWrapper + raise AttributeError(name) + + +# Pretend this exception was created here. +UnsupportedOperation.__module__ = "io" + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase, metaclass=abc.ABCMeta): + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass + +try: + from _io import _WindowsConsoleIO +except ImportError: + pass +else: + RawIOBase.register(_WindowsConsoleIO) diff --git a/pllava/lib/python3.10/keyword.py b/pllava/lib/python3.10/keyword.py new file mode 100644 index 0000000000000000000000000000000000000000..cc2b46b7229d53a4df953ca730397cfe10da9eb1 --- /dev/null +++ b/pllava/lib/python3.10/keyword.py @@ -0,0 +1,63 @@ +"""Keywords (from "Grammar/python.gram") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree and run: + + PYTHONPATH=Tools/peg_generator python3 -m pegen.keywordgen \ + Grammar/python.gram \ + Grammar/Tokens \ + Lib/keyword.py + +Alternatively, you can run 'make regen-keyword'. +""" + +__all__ = ["iskeyword", "issoftkeyword", "kwlist", "softkwlist"] + +kwlist = [ + 'False', + 'None', + 'True', + 'and', + 'as', + 'assert', + 'async', + 'await', + 'break', + 'class', + 'continue', + 'def', + 'del', + 'elif', + 'else', + 'except', + 'finally', + 'for', + 'from', + 'global', + 'if', + 'import', + 'in', + 'is', + 'lambda', + 'nonlocal', + 'not', + 'or', + 'pass', + 'raise', + 'return', + 'try', + 'while', + 'with', + 'yield' +] + +softkwlist = [ + '_', + 'case', + 'match' +] + +iskeyword = frozenset(kwlist).__contains__ +issoftkeyword = frozenset(softkwlist).__contains__ diff --git a/pllava/lib/python3.10/linecache.py b/pllava/lib/python3.10/linecache.py new file mode 100644 index 0000000000000000000000000000000000000000..97644a8e3794e17151a415bb92af22cda8d595bb --- /dev/null +++ b/pllava/lib/python3.10/linecache.py @@ -0,0 +1,182 @@ +"""Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +import functools +import sys +import os +import tokenize + +__all__ = ["getline", "clearcache", "checkcache", "lazycache"] + + +# The cache. Maps filenames to either a thunk which will provide source code, +# or a tuple (size, mtime, lines, fullname) once loaded. +cache = {} + + +def clearcache(): + """Clear the cache entirely.""" + cache.clear() + + +def getline(filename, lineno, module_globals=None): + """Get a line for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno - 1] + return '' + + +def getlines(filename, module_globals=None): + """Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + if filename in cache: + entry = cache[filename] + if len(entry) != 1: + return cache[filename][2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def checkcache(filename=None): + """Discard cache entries that are out of date. + (This is not checked upon each call!)""" + + if filename is None: + filenames = list(cache.keys()) + elif filename in cache: + filenames = [filename] + else: + return + + for filename in filenames: + entry = cache[filename] + if len(entry) == 1: + # lazy cache entry, leave it lazy. + continue + size, mtime, lines, fullname = entry + if mtime is None: + continue # no-op for files loaded via a __loader__ + try: + stat = os.stat(fullname) + except OSError: + cache.pop(filename, None) + continue + if size != stat.st_size or mtime != stat.st_mtime: + cache.pop(filename, None) + + +def updatecache(filename, module_globals=None): + """Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.""" + + if filename in cache: + if len(cache[filename]) != 1: + cache.pop(filename, None) + if not filename or (filename.startswith('<') and filename.endswith('>')): + return [] + + fullname = filename + try: + stat = os.stat(fullname) + except OSError: + basename = filename + + # Realise a lazy loader based lookup if there is one + # otherwise try to lookup right now. + if lazycache(filename, module_globals): + try: + data = cache[filename][0]() + except (ImportError, OSError): + pass + else: + if data is None: + # No luck, the PEP302 loader cannot find the source + # for this module. + return [] + cache[filename] = ( + len(data), + None, + [line + '\n' for line in data.splitlines()], + fullname + ) + return cache[filename][2] + + # Try looking through the module search path, which is only useful + # when handling a relative filename. + if os.path.isabs(filename): + return [] + + for dirname in sys.path: + try: + fullname = os.path.join(dirname, basename) + except (TypeError, AttributeError): + # Not sufficiently string-like to do anything useful with. + continue + try: + stat = os.stat(fullname) + break + except OSError: + pass + else: + return [] + try: + with tokenize.open(fullname) as fp: + lines = fp.readlines() + except (OSError, UnicodeDecodeError, SyntaxError): + return [] + if lines and not lines[-1].endswith('\n'): + lines[-1] += '\n' + size, mtime = stat.st_size, stat.st_mtime + cache[filename] = size, mtime, lines, fullname + return lines + + +def lazycache(filename, module_globals): + """Seed the cache for filename with module_globals. + + The module loader will be asked for the source only when getlines is + called, not immediately. + + If there is an entry in the cache already, it is not altered. + + :return: True if a lazy load is registered in the cache, + otherwise False. To register such a load a module loader with a + get_source method must be found, the filename must be a cacheable + filename, and the filename must not be already cached. + """ + if filename in cache: + if len(cache[filename]) == 1: + return True + else: + return False + if not filename or (filename.startswith('<') and filename.endswith('>')): + return False + # Try for a __loader__, if available + if module_globals and '__name__' in module_globals: + name = module_globals['__name__'] + if (loader := module_globals.get('__loader__')) is None: + if spec := module_globals.get('__spec__'): + try: + loader = spec.loader + except AttributeError: + pass + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + get_lines = functools.partial(get_source, name) + cache[filename] = (get_lines,) + return True + return False diff --git a/pllava/lib/python3.10/locale.py b/pllava/lib/python3.10/locale.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4f51929923f6f865dd774ddc5c7d601f540816 --- /dev/null +++ b/pllava/lib/python3.10/locale.py @@ -0,0 +1,1761 @@ +"""Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +""" + +import sys +import encodings +import encodings.aliases +import re +import _collections_abc +from builtins import str as _builtin_str +import functools + +# Try importing the _locale module. +# +# If this fails, fall back on a basic 'C' locale emulation. + +# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before +# trying the import. So __all__ is also fiddled at the end of the file. +__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", + "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm", + "str", "atof", "atoi", "format", "format_string", "currency", + "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", + "LC_NUMERIC", "LC_ALL", "CHAR_MAX"] + +def _strcoll(a,b): + """ strcoll(string,string) -> int. + Compares two strings according to the locale. + """ + return (a > b) - (a < b) + +def _strxfrm(s): + """ strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + """ + return s + +try: + + from _locale import * + +except ImportError: + + # Locale emulation + + CHAR_MAX = 127 + LC_ALL = 6 + LC_COLLATE = 3 + LC_CTYPE = 0 + LC_MESSAGES = 5 + LC_MONETARY = 4 + LC_NUMERIC = 1 + LC_TIME = 2 + Error = ValueError + + def localeconv(): + """ localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + """ + # 'C' locale default values + return {'grouping': [127], + 'currency_symbol': '', + 'n_sign_posn': 127, + 'p_cs_precedes': 127, + 'n_cs_precedes': 127, + 'mon_grouping': [], + 'n_sep_by_space': 127, + 'decimal_point': '.', + 'negative_sign': '', + 'positive_sign': '', + 'p_sep_by_space': 127, + 'int_curr_symbol': '', + 'p_sign_posn': 127, + 'thousands_sep': '', + 'mon_thousands_sep': '', + 'frac_digits': 127, + 'mon_decimal_point': '', + 'int_frac_digits': 127} + + def setlocale(category, value=None): + """ setlocale(integer,string=None) -> string. + Activates/queries locale processing. + """ + if value not in (None, '', 'C'): + raise Error('_locale emulation only supports "C" locale') + return 'C' + +# These may or may not exist in _locale, so be sure to set them. +if 'strxfrm' not in globals(): + strxfrm = _strxfrm +if 'strcoll' not in globals(): + strcoll = _strcoll + + +_localeconv = localeconv + +# With this dict, you can override some items of localeconv's return value. +# This is useful for testing purposes. +_override_localeconv = {} + +@functools.wraps(_localeconv) +def localeconv(): + d = _localeconv() + if _override_localeconv: + d.update(_override_localeconv) + return d + + +### Number formatting APIs + +# Author: Martin von Loewis +# improved by Georg Brandl + +# Iterate over grouping intervals +def _grouping_intervals(grouping): + last_interval = None + for interval in grouping: + # if grouping is -1, we are done + if interval == CHAR_MAX: + return + # 0: re-use last group ad infinitum + if interval == 0: + if last_interval is None: + raise ValueError("invalid grouping") + while True: + yield last_interval + yield interval + last_interval = interval + +#perform the grouping from right to left +def _group(s, monetary=False): + conv = localeconv() + thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] + grouping = conv[monetary and 'mon_grouping' or 'grouping'] + if not grouping: + return (s, 0) + if s[-1] == ' ': + stripped = s.rstrip() + right_spaces = s[len(stripped):] + s = stripped + else: + right_spaces = '' + left_spaces = '' + groups = [] + for interval in _grouping_intervals(grouping): + if not s or s[-1] not in "0123456789": + # only non-digit characters remain (sign, spaces) + left_spaces = s + s = '' + break + groups.append(s[-interval:]) + s = s[:-interval] + if s: + groups.append(s) + groups.reverse() + return ( + left_spaces + thousands_sep.join(groups) + right_spaces, + len(thousands_sep) * (len(groups) - 1) + ) + +# Strip a given amount of excess padding from the given string +def _strip_padding(s, amount): + lpos = 0 + while amount and s[lpos] == ' ': + lpos += 1 + amount -= 1 + rpos = len(s) - 1 + while amount and s[rpos] == ' ': + rpos -= 1 + amount -= 1 + return s[lpos:rpos+1] + +_percent_re = re.compile(r'%(?:\((?P.*?)\))?' + r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') + +def _format(percent, value, grouping=False, monetary=False, *additional): + if additional: + formatted = percent % ((value,) + additional) + else: + formatted = percent % value + if percent[-1] in 'eEfFgGdiu': + formatted = _localize(formatted, grouping, monetary) + return formatted + +# Transform formatted as locale number according to the locale settings +def _localize(formatted, grouping=False, monetary=False): + # floats and decimal ints need special action! + if '.' in formatted: + seps = 0 + parts = formatted.split('.') + if grouping: + parts[0], seps = _group(parts[0], monetary=monetary) + decimal_point = localeconv()[monetary and 'mon_decimal_point' + or 'decimal_point'] + formatted = decimal_point.join(parts) + if seps: + formatted = _strip_padding(formatted, seps) + else: + seps = 0 + if grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) + return formatted + +def format_string(f, val, grouping=False, monetary=False): + """Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.""" + percents = list(_percent_re.finditer(f)) + new_f = _percent_re.sub('%s', f) + + if isinstance(val, _collections_abc.Mapping): + new_val = [] + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + new_val.append(_format(perc.group(), val, grouping, monetary)) + else: + if not isinstance(val, tuple): + val = (val,) + new_val = [] + i = 0 + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + starcount = perc.group('modifiers').count('*') + new_val.append(_format(perc.group(), + val[i], + grouping, + monetary, + *val[i+1:i+1+starcount])) + i += (1 + starcount) + val = tuple(new_val) + + return new_f % val + +def format(percent, value, grouping=False, monetary=False, *additional): + """Deprecated, use format_string instead.""" + import warnings + warnings.warn( + "This method will be removed in a future version of Python. " + "Use 'locale.format_string()' instead.", + DeprecationWarning, stacklevel=2 + ) + + match = _percent_re.match(percent) + if not match or len(match.group())!= len(percent): + raise ValueError(("format() must be given exactly one %%char " + "format specifier, %s not valid") % repr(percent)) + return _format(percent, value, grouping, monetary, *additional) + +def currency(val, symbol=True, grouping=False, international=False): + """Formats val according to the currency settings + in the current locale.""" + conv = localeconv() + + # check for illegal values + digits = conv[international and 'int_frac_digits' or 'frac_digits'] + if digits == 127: + raise ValueError("Currency formatting is not possible using " + "the 'C' locale.") + + s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) + # '<' and '>' are markers if the sign must be inserted between symbol and value + s = '<' + s + '>' + + if symbol: + smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] + precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] + separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] + + if precedes: + s = smb + (separated and ' ' or '') + s + else: + if international and smb[-1] == ' ': + smb = smb[:-1] + s = s + (separated and ' ' or '') + smb + + sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] + sign = conv[val<0 and 'negative_sign' or 'positive_sign'] + + if sign_pos == 0: + s = '(' + s + ')' + elif sign_pos == 1: + s = sign + s + elif sign_pos == 2: + s = s + sign + elif sign_pos == 3: + s = s.replace('<', sign) + elif sign_pos == 4: + s = s.replace('>', sign) + else: + # the default if nothing specified; + # this should be the most fitting sign position + s = sign + s + + return s.replace('<', '').replace('>', '') + +def str(val): + """Convert float to string, taking the locale into account.""" + return _format("%.12g", val) + +def delocalize(string): + "Parses a string as a normalized number according to the locale settings." + + conv = localeconv() + + #First, get rid of the grouping + ts = conv['thousands_sep'] + if ts: + string = string.replace(ts, '') + + #next, replace the decimal point with a dot + dd = conv['decimal_point'] + if dd: + string = string.replace(dd, '.') + return string + +def localize(string, grouping=False, monetary=False): + """Parses a string as locale number according to the locale settings.""" + return _localize(string, grouping, monetary) + +def atof(string, func=float): + "Parses a string as a float according to the locale settings." + return func(delocalize(string)) + +def atoi(string): + "Converts a string to an integer according to the locale settings." + return int(delocalize(string)) + +def _test(): + setlocale(LC_ALL, "") + #do grouping + s1 = format_string("%d", 123456789,1) + print(s1, "is", atoi(s1)) + #standard formatting + s1 = str(3.14) + print(s1, "is", atof(s1)) + +### Locale name aliasing engine + +# Author: Marc-Andre Lemburg, mal@lemburg.com +# Various tweaks by Fredrik Lundh + +# store away the low-level version of setlocale (it's +# overridden below) +_setlocale = setlocale + +def _replace_encoding(code, encoding): + if '.' in code: + langname = code[:code.index('.')] + else: + langname = code + # Convert the encoding to a C lib compatible encoding string + norm_encoding = encodings.normalize_encoding(encoding) + #print('norm encoding: %r' % norm_encoding) + norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), + norm_encoding) + #print('aliased encoding: %r' % norm_encoding) + encoding = norm_encoding + norm_encoding = norm_encoding.lower() + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + else: + norm_encoding = norm_encoding.replace('_', '') + norm_encoding = norm_encoding.replace('-', '') + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + #print('found encoding %r' % encoding) + return langname + '.' + encoding + +def _append_modifier(code, modifier): + if modifier == 'euro': + if '.' not in code: + return code + '.ISO8859-15' + _, _, encoding = code.partition('.') + if encoding in ('ISO8859-15', 'UTF-8'): + return code + if encoding == 'ISO8859-1': + return _replace_encoding(code, 'ISO8859-15') + return code + '@' + modifier + +def normalize(localename): + + """ Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + """ + # Normalize the locale name and extract the encoding and modifier + code = localename.lower() + if ':' in code: + # ':' is sometimes used as encoding delimiter. + code = code.replace(':', '.') + if '@' in code: + code, modifier = code.split('@', 1) + else: + modifier = '' + if '.' in code: + langname, encoding = code.split('.')[:2] + else: + langname = code + encoding = '' + + # First lookup: fullname (possibly with encoding and modifier) + lang_enc = langname + if encoding: + norm_encoding = encoding.replace('-', '') + norm_encoding = norm_encoding.replace('_', '') + lang_enc += '.' + norm_encoding + lookup_name = lang_enc + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + return code + #print('first lookup failed') + + if modifier: + # Second try: fullname without modifier (possibly with encoding) + code = locale_alias.get(lang_enc, None) + if code is not None: + #print('lookup without modifier succeeded') + if '@' not in code: + return _append_modifier(code, modifier) + if code.split('@', 1)[1].lower() == modifier: + return code + #print('second lookup failed') + + if encoding: + # Third try: langname (without encoding, possibly with modifier) + lookup_name = langname + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + #print('lookup without encoding succeeded') + if '@' not in code: + return _replace_encoding(code, encoding) + code, modifier = code.split('@', 1) + return _replace_encoding(code, encoding) + '@' + modifier + + if modifier: + # Fourth try: langname (without encoding and modifier) + code = locale_alias.get(langname, None) + if code is not None: + #print('lookup without modifier and encoding succeeded') + if '@' not in code: + code = _replace_encoding(code, encoding) + return _append_modifier(code, modifier) + code, defmod = code.split('@', 1) + if defmod.lower() == modifier: + return _replace_encoding(code, encoding) + '@' + defmod + + return localename + +def _parse_localename(localename): + + """ Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + """ + code = normalize(localename) + if '@' in code: + # Deal with locale modifiers + code, modifier = code.split('@', 1) + if modifier == 'euro' and '.' not in code: + # Assume Latin-9 for @euro locales. This is bogus, + # since some systems may use other encodings for these + # locales. Also, we ignore other modifiers. + return code, 'iso-8859-15' + + if '.' in code: + return tuple(code.split('.')[:2]) + elif code == 'C': + return None, None + elif code == 'UTF-8': + # On macOS "LC_CTYPE=UTF-8" is a valid locale setting + # for getting UTF-8 handling for text. + return None, 'UTF-8' + raise ValueError('unknown locale: %s' % localename) + +def _build_localename(localetuple): + + """ Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + """ + try: + language, encoding = localetuple + + if language is None: + language = 'C' + if encoding is None: + return language + else: + return language + '.' + encoding + except (TypeError, ValueError): + raise TypeError('Locale must be None, a string, or an iterable of ' + 'two strings -- language code, encoding.') from None + +def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + + """ Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + + try: + # check if it's supported by the _locale module + import _locale + code, encoding = _locale._getdefaultlocale() + except (ImportError, AttributeError): + pass + else: + # make sure the code/encoding values are valid + if sys.platform == "win32" and code and code[:2] == "0x": + # map windows language identifier to language name + code = windows_locale.get(int(code, 0)) + # ...add other platform-specific processing here, if + # necessary... + return code, encoding + + # fall back on POSIX behaviour + import os + lookup = os.environ.get + for variable in envvars: + localename = lookup(variable,None) + if localename: + if variable == 'LANGUAGE': + localename = localename.split(':')[0] + break + else: + localename = 'C' + return _parse_localename(localename) + + +def getlocale(category=LC_CTYPE): + + """ Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + localename = _setlocale(category) + if category == LC_ALL and ';' in localename: + raise TypeError('category LC_ALL is not supported') + return _parse_localename(localename) + +def setlocale(category, locale=None): + + """ Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + """ + if locale and not isinstance(locale, _builtin_str): + # convert to string + locale = normalize(_build_localename(locale)) + return _setlocale(category, locale) + +def resetlocale(category=LC_ALL): + + """ Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + """ + _setlocale(category, _build_localename(getdefaultlocale())) + + +try: + from _locale import _get_locale_encoding +except ImportError: + def _get_locale_encoding(): + if hasattr(sys, 'getandroidapilevel'): + # On Android langinfo.h and CODESET are missing, and UTF-8 is + # always used in mbstowcs() and wcstombs(). + return 'UTF-8' + if sys.flags.utf8_mode: + return 'UTF-8' + encoding = getdefaultlocale()[1] + if encoding is None: + # LANG not set, default conservatively to ASCII + encoding = 'ascii' + return encoding + +try: + CODESET +except NameError: + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using.""" + return _get_locale_encoding() +else: + # On Unix, if CODESET is available, use that. + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using, + according to the system configuration.""" + if sys.flags.utf8_mode: + return 'UTF-8' + + if not do_setlocale: + return _get_locale_encoding() + + old_loc = setlocale(LC_CTYPE) + try: + try: + setlocale(LC_CTYPE, "") + except Error: + pass + return _get_locale_encoding() + finally: + setlocale(LC_CTYPE, old_loc) + + +### Database +# +# The following data was extracted from the locale.alias file which +# comes with X11 and then hand edited removing the explicit encoding +# definitions and adding some more aliases. The file is usually +# available as /usr/lib/X11/locale/locale.alias. +# + +# +# The local_encoding_alias table maps lowercase encoding alias names +# to C locale encoding names (case-sensitive). Note that normalize() +# first looks up the encoding in the encodings.aliases dictionary and +# then applies this mapping to find the correct C lib name for the +# encoding. +# +locale_encoding_alias = { + + # Mappings for non-standard encoding names used in locale names + '437': 'C', + 'c': 'C', + 'en': 'ISO8859-1', + 'jis': 'JIS7', + 'jis7': 'JIS7', + 'ajec': 'eucJP', + 'koi8c': 'KOI8-C', + 'microsoftcp1251': 'CP1251', + 'microsoftcp1255': 'CP1255', + 'microsoftcp1256': 'CP1256', + '88591': 'ISO8859-1', + '88592': 'ISO8859-2', + '88595': 'ISO8859-5', + '885915': 'ISO8859-15', + + # Mappings from Python codec names to C lib encoding names + 'ascii': 'ISO8859-1', + 'latin_1': 'ISO8859-1', + 'iso8859_1': 'ISO8859-1', + 'iso8859_10': 'ISO8859-10', + 'iso8859_11': 'ISO8859-11', + 'iso8859_13': 'ISO8859-13', + 'iso8859_14': 'ISO8859-14', + 'iso8859_15': 'ISO8859-15', + 'iso8859_16': 'ISO8859-16', + 'iso8859_2': 'ISO8859-2', + 'iso8859_3': 'ISO8859-3', + 'iso8859_4': 'ISO8859-4', + 'iso8859_5': 'ISO8859-5', + 'iso8859_6': 'ISO8859-6', + 'iso8859_7': 'ISO8859-7', + 'iso8859_8': 'ISO8859-8', + 'iso8859_9': 'ISO8859-9', + 'iso2022_jp': 'JIS7', + 'shift_jis': 'SJIS', + 'tactis': 'TACTIS', + 'euc_jp': 'eucJP', + 'euc_kr': 'eucKR', + 'utf_8': 'UTF-8', + 'koi8_r': 'KOI8-R', + 'koi8_t': 'KOI8-T', + 'koi8_u': 'KOI8-U', + 'kz1048': 'RK1048', + 'cp1251': 'CP1251', + 'cp1255': 'CP1255', + 'cp1256': 'CP1256', + + # XXX This list is still incomplete. If you know more + # mappings, please file a bug report. Thanks. +} + +for k, v in sorted(locale_encoding_alias.items()): + k = k.replace('_', '') + locale_encoding_alias.setdefault(k, v) + +# +# The locale_alias table maps lowercase alias names to C locale names +# (case-sensitive). Encodings are always separated from the locale +# name using a dot ('.'); they should only be given in case the +# language name is needed to interpret the given encoding alias +# correctly (CJK codes often have this need). +# +# Note that the normalize() function which uses this tables +# removes '_' and '-' characters from the encoding part of the +# locale name before doing the lookup. This saves a lot of +# space in the table. +# +# MAL 2004-12-10: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.4 +# and older): +# +# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' +# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' +# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' +# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' +# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' +# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' +# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# +# MAL 2008-05-30: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.5 +# and older): +# +# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' +# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' +# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# +# AP 2010-04-12: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.6.5 +# and older): +# +# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# +# SS 2013-12-20: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 3.3.3 +# and older): +# +# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' +# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# +# SS 2014-10-01: +# Updated alias mapping with glibc 2.19 supported locales. +# +# SS 2018-05-05: +# Updated alias mapping with glibc 2.27 supported locales. +# +# These are the differences compared to the old mapping (Python 3.6.5 +# and older): +# +# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' +# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' +# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' + +locale_alias = { + 'a3': 'az_AZ.KOI8-C', + 'a3_az': 'az_AZ.KOI8-C', + 'a3_az.koic': 'az_AZ.KOI8-C', + 'aa_dj': 'aa_DJ.ISO8859-1', + 'aa_er': 'aa_ER.UTF-8', + 'aa_et': 'aa_ET.UTF-8', + 'af': 'af_ZA.ISO8859-1', + 'af_za': 'af_ZA.ISO8859-1', + 'agr_pe': 'agr_PE.UTF-8', + 'ak_gh': 'ak_GH.UTF-8', + 'am': 'am_ET.UTF-8', + 'am_et': 'am_ET.UTF-8', + 'american': 'en_US.ISO8859-1', + 'an_es': 'an_ES.ISO8859-15', + 'anp_in': 'anp_IN.UTF-8', + 'ar': 'ar_AA.ISO8859-6', + 'ar_aa': 'ar_AA.ISO8859-6', + 'ar_ae': 'ar_AE.ISO8859-6', + 'ar_bh': 'ar_BH.ISO8859-6', + 'ar_dz': 'ar_DZ.ISO8859-6', + 'ar_eg': 'ar_EG.ISO8859-6', + 'ar_in': 'ar_IN.UTF-8', + 'ar_iq': 'ar_IQ.ISO8859-6', + 'ar_jo': 'ar_JO.ISO8859-6', + 'ar_kw': 'ar_KW.ISO8859-6', + 'ar_lb': 'ar_LB.ISO8859-6', + 'ar_ly': 'ar_LY.ISO8859-6', + 'ar_ma': 'ar_MA.ISO8859-6', + 'ar_om': 'ar_OM.ISO8859-6', + 'ar_qa': 'ar_QA.ISO8859-6', + 'ar_sa': 'ar_SA.ISO8859-6', + 'ar_sd': 'ar_SD.ISO8859-6', + 'ar_ss': 'ar_SS.UTF-8', + 'ar_sy': 'ar_SY.ISO8859-6', + 'ar_tn': 'ar_TN.ISO8859-6', + 'ar_ye': 'ar_YE.ISO8859-6', + 'arabic': 'ar_AA.ISO8859-6', + 'as': 'as_IN.UTF-8', + 'as_in': 'as_IN.UTF-8', + 'ast_es': 'ast_ES.ISO8859-15', + 'ayc_pe': 'ayc_PE.UTF-8', + 'az': 'az_AZ.ISO8859-9E', + 'az_az': 'az_AZ.ISO8859-9E', + 'az_az.iso88599e': 'az_AZ.ISO8859-9E', + 'az_ir': 'az_IR.UTF-8', + 'be': 'be_BY.CP1251', + 'be@latin': 'be_BY.UTF-8@latin', + 'be_bg.utf8': 'bg_BG.UTF-8', + 'be_by': 'be_BY.CP1251', + 'be_by@latin': 'be_BY.UTF-8@latin', + 'bem_zm': 'bem_ZM.UTF-8', + 'ber_dz': 'ber_DZ.UTF-8', + 'ber_ma': 'ber_MA.UTF-8', + 'bg': 'bg_BG.CP1251', + 'bg_bg': 'bg_BG.CP1251', + 'bhb_in.utf8': 'bhb_IN.UTF-8', + 'bho_in': 'bho_IN.UTF-8', + 'bho_np': 'bho_NP.UTF-8', + 'bi_vu': 'bi_VU.UTF-8', + 'bn_bd': 'bn_BD.UTF-8', + 'bn_in': 'bn_IN.UTF-8', + 'bo_cn': 'bo_CN.UTF-8', + 'bo_in': 'bo_IN.UTF-8', + 'bokmal': 'nb_NO.ISO8859-1', + 'bokm\xe5l': 'nb_NO.ISO8859-1', + 'br': 'br_FR.ISO8859-1', + 'br_fr': 'br_FR.ISO8859-1', + 'brx_in': 'brx_IN.UTF-8', + 'bs': 'bs_BA.ISO8859-2', + 'bs_ba': 'bs_BA.ISO8859-2', + 'bulgarian': 'bg_BG.CP1251', + 'byn_er': 'byn_ER.UTF-8', + 'c': 'C', + 'c-french': 'fr_CA.ISO8859-1', + 'c.ascii': 'C', + 'c.en': 'C', + 'c.iso88591': 'en_US.ISO8859-1', + 'c.utf8': 'en_US.UTF-8', + 'c_c': 'C', + 'c_c.c': 'C', + 'ca': 'ca_ES.ISO8859-1', + 'ca_ad': 'ca_AD.ISO8859-1', + 'ca_es': 'ca_ES.ISO8859-1', + 'ca_es@valencia': 'ca_ES.UTF-8@valencia', + 'ca_fr': 'ca_FR.ISO8859-1', + 'ca_it': 'ca_IT.ISO8859-1', + 'catalan': 'ca_ES.ISO8859-1', + 'ce_ru': 'ce_RU.UTF-8', + 'cextend': 'en_US.ISO8859-1', + 'chinese-s': 'zh_CN.eucCN', + 'chinese-t': 'zh_TW.eucTW', + 'chr_us': 'chr_US.UTF-8', + 'ckb_iq': 'ckb_IQ.UTF-8', + 'cmn_tw': 'cmn_TW.UTF-8', + 'crh_ua': 'crh_UA.UTF-8', + 'croatian': 'hr_HR.ISO8859-2', + 'cs': 'cs_CZ.ISO8859-2', + 'cs_cs': 'cs_CZ.ISO8859-2', + 'cs_cz': 'cs_CZ.ISO8859-2', + 'csb_pl': 'csb_PL.UTF-8', + 'cv_ru': 'cv_RU.UTF-8', + 'cy': 'cy_GB.ISO8859-1', + 'cy_gb': 'cy_GB.ISO8859-1', + 'cz': 'cs_CZ.ISO8859-2', + 'cz_cz': 'cs_CZ.ISO8859-2', + 'czech': 'cs_CZ.ISO8859-2', + 'da': 'da_DK.ISO8859-1', + 'da_dk': 'da_DK.ISO8859-1', + 'danish': 'da_DK.ISO8859-1', + 'dansk': 'da_DK.ISO8859-1', + 'de': 'de_DE.ISO8859-1', + 'de_at': 'de_AT.ISO8859-1', + 'de_be': 'de_BE.ISO8859-1', + 'de_ch': 'de_CH.ISO8859-1', + 'de_de': 'de_DE.ISO8859-1', + 'de_it': 'de_IT.ISO8859-1', + 'de_li.utf8': 'de_LI.UTF-8', + 'de_lu': 'de_LU.ISO8859-1', + 'deutsch': 'de_DE.ISO8859-1', + 'doi_in': 'doi_IN.UTF-8', + 'dutch': 'nl_NL.ISO8859-1', + 'dutch.iso88591': 'nl_BE.ISO8859-1', + 'dv_mv': 'dv_MV.UTF-8', + 'dz_bt': 'dz_BT.UTF-8', + 'ee': 'ee_EE.ISO8859-4', + 'ee_ee': 'ee_EE.ISO8859-4', + 'eesti': 'et_EE.ISO8859-1', + 'el': 'el_GR.ISO8859-7', + 'el_cy': 'el_CY.ISO8859-7', + 'el_gr': 'el_GR.ISO8859-7', + 'el_gr@euro': 'el_GR.ISO8859-15', + 'en': 'en_US.ISO8859-1', + 'en_ag': 'en_AG.UTF-8', + 'en_au': 'en_AU.ISO8859-1', + 'en_be': 'en_BE.ISO8859-1', + 'en_bw': 'en_BW.ISO8859-1', + 'en_ca': 'en_CA.ISO8859-1', + 'en_dk': 'en_DK.ISO8859-1', + 'en_dl.utf8': 'en_DL.UTF-8', + 'en_gb': 'en_GB.ISO8859-1', + 'en_hk': 'en_HK.ISO8859-1', + 'en_ie': 'en_IE.ISO8859-1', + 'en_il': 'en_IL.UTF-8', + 'en_in': 'en_IN.ISO8859-1', + 'en_ng': 'en_NG.UTF-8', + 'en_nz': 'en_NZ.ISO8859-1', + 'en_ph': 'en_PH.ISO8859-1', + 'en_sc.utf8': 'en_SC.UTF-8', + 'en_sg': 'en_SG.ISO8859-1', + 'en_uk': 'en_GB.ISO8859-1', + 'en_us': 'en_US.ISO8859-1', + 'en_us@euro@euro': 'en_US.ISO8859-15', + 'en_za': 'en_ZA.ISO8859-1', + 'en_zm': 'en_ZM.UTF-8', + 'en_zw': 'en_ZW.ISO8859-1', + 'en_zw.utf8': 'en_ZS.UTF-8', + 'eng_gb': 'en_GB.ISO8859-1', + 'english': 'en_EN.ISO8859-1', + 'english.iso88591': 'en_US.ISO8859-1', + 'english_uk': 'en_GB.ISO8859-1', + 'english_united-states': 'en_US.ISO8859-1', + 'english_united-states.437': 'C', + 'english_us': 'en_US.ISO8859-1', + 'eo': 'eo_XX.ISO8859-3', + 'eo.utf8': 'eo.UTF-8', + 'eo_eo': 'eo_EO.ISO8859-3', + 'eo_us.utf8': 'eo_US.UTF-8', + 'eo_xx': 'eo_XX.ISO8859-3', + 'es': 'es_ES.ISO8859-1', + 'es_ar': 'es_AR.ISO8859-1', + 'es_bo': 'es_BO.ISO8859-1', + 'es_cl': 'es_CL.ISO8859-1', + 'es_co': 'es_CO.ISO8859-1', + 'es_cr': 'es_CR.ISO8859-1', + 'es_cu': 'es_CU.UTF-8', + 'es_do': 'es_DO.ISO8859-1', + 'es_ec': 'es_EC.ISO8859-1', + 'es_es': 'es_ES.ISO8859-1', + 'es_gt': 'es_GT.ISO8859-1', + 'es_hn': 'es_HN.ISO8859-1', + 'es_mx': 'es_MX.ISO8859-1', + 'es_ni': 'es_NI.ISO8859-1', + 'es_pa': 'es_PA.ISO8859-1', + 'es_pe': 'es_PE.ISO8859-1', + 'es_pr': 'es_PR.ISO8859-1', + 'es_py': 'es_PY.ISO8859-1', + 'es_sv': 'es_SV.ISO8859-1', + 'es_us': 'es_US.ISO8859-1', + 'es_uy': 'es_UY.ISO8859-1', + 'es_ve': 'es_VE.ISO8859-1', + 'estonian': 'et_EE.ISO8859-1', + 'et': 'et_EE.ISO8859-15', + 'et_ee': 'et_EE.ISO8859-15', + 'eu': 'eu_ES.ISO8859-1', + 'eu_es': 'eu_ES.ISO8859-1', + 'eu_fr': 'eu_FR.ISO8859-1', + 'fa': 'fa_IR.UTF-8', + 'fa_ir': 'fa_IR.UTF-8', + 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', + 'ff_sn': 'ff_SN.UTF-8', + 'fi': 'fi_FI.ISO8859-15', + 'fi_fi': 'fi_FI.ISO8859-15', + 'fil_ph': 'fil_PH.UTF-8', + 'finnish': 'fi_FI.ISO8859-1', + 'fo': 'fo_FO.ISO8859-1', + 'fo_fo': 'fo_FO.ISO8859-1', + 'fr': 'fr_FR.ISO8859-1', + 'fr_be': 'fr_BE.ISO8859-1', + 'fr_ca': 'fr_CA.ISO8859-1', + 'fr_ch': 'fr_CH.ISO8859-1', + 'fr_fr': 'fr_FR.ISO8859-1', + 'fr_lu': 'fr_LU.ISO8859-1', + 'fran\xe7ais': 'fr_FR.ISO8859-1', + 'fre_fr': 'fr_FR.ISO8859-1', + 'french': 'fr_FR.ISO8859-1', + 'french.iso88591': 'fr_CH.ISO8859-1', + 'french_france': 'fr_FR.ISO8859-1', + 'fur_it': 'fur_IT.UTF-8', + 'fy_de': 'fy_DE.UTF-8', + 'fy_nl': 'fy_NL.UTF-8', + 'ga': 'ga_IE.ISO8859-1', + 'ga_ie': 'ga_IE.ISO8859-1', + 'galego': 'gl_ES.ISO8859-1', + 'galician': 'gl_ES.ISO8859-1', + 'gd': 'gd_GB.ISO8859-1', + 'gd_gb': 'gd_GB.ISO8859-1', + 'ger_de': 'de_DE.ISO8859-1', + 'german': 'de_DE.ISO8859-1', + 'german.iso88591': 'de_CH.ISO8859-1', + 'german_germany': 'de_DE.ISO8859-1', + 'gez_er': 'gez_ER.UTF-8', + 'gez_et': 'gez_ET.UTF-8', + 'gl': 'gl_ES.ISO8859-1', + 'gl_es': 'gl_ES.ISO8859-1', + 'greek': 'el_GR.ISO8859-7', + 'gu_in': 'gu_IN.UTF-8', + 'gv': 'gv_GB.ISO8859-1', + 'gv_gb': 'gv_GB.ISO8859-1', + 'ha_ng': 'ha_NG.UTF-8', + 'hak_tw': 'hak_TW.UTF-8', + 'he': 'he_IL.ISO8859-8', + 'he_il': 'he_IL.ISO8859-8', + 'hebrew': 'he_IL.ISO8859-8', + 'hi': 'hi_IN.ISCII-DEV', + 'hi_in': 'hi_IN.ISCII-DEV', + 'hi_in.isciidev': 'hi_IN.ISCII-DEV', + 'hif_fj': 'hif_FJ.UTF-8', + 'hne': 'hne_IN.UTF-8', + 'hne_in': 'hne_IN.UTF-8', + 'hr': 'hr_HR.ISO8859-2', + 'hr_hr': 'hr_HR.ISO8859-2', + 'hrvatski': 'hr_HR.ISO8859-2', + 'hsb_de': 'hsb_DE.ISO8859-2', + 'ht_ht': 'ht_HT.UTF-8', + 'hu': 'hu_HU.ISO8859-2', + 'hu_hu': 'hu_HU.ISO8859-2', + 'hungarian': 'hu_HU.ISO8859-2', + 'hy_am': 'hy_AM.UTF-8', + 'hy_am.armscii8': 'hy_AM.ARMSCII_8', + 'ia': 'ia.UTF-8', + 'ia_fr': 'ia_FR.UTF-8', + 'icelandic': 'is_IS.ISO8859-1', + 'id': 'id_ID.ISO8859-1', + 'id_id': 'id_ID.ISO8859-1', + 'ig_ng': 'ig_NG.UTF-8', + 'ik_ca': 'ik_CA.UTF-8', + 'in': 'id_ID.ISO8859-1', + 'in_id': 'id_ID.ISO8859-1', + 'is': 'is_IS.ISO8859-1', + 'is_is': 'is_IS.ISO8859-1', + 'iso-8859-1': 'en_US.ISO8859-1', + 'iso-8859-15': 'en_US.ISO8859-15', + 'iso8859-1': 'en_US.ISO8859-1', + 'iso8859-15': 'en_US.ISO8859-15', + 'iso_8859_1': 'en_US.ISO8859-1', + 'iso_8859_15': 'en_US.ISO8859-15', + 'it': 'it_IT.ISO8859-1', + 'it_ch': 'it_CH.ISO8859-1', + 'it_it': 'it_IT.ISO8859-1', + 'italian': 'it_IT.ISO8859-1', + 'iu': 'iu_CA.NUNACOM-8', + 'iu_ca': 'iu_CA.NUNACOM-8', + 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', + 'iw': 'he_IL.ISO8859-8', + 'iw_il': 'he_IL.ISO8859-8', + 'iw_il.utf8': 'iw_IL.UTF-8', + 'ja': 'ja_JP.eucJP', + 'ja_jp': 'ja_JP.eucJP', + 'ja_jp.euc': 'ja_JP.eucJP', + 'ja_jp.mscode': 'ja_JP.SJIS', + 'ja_jp.pck': 'ja_JP.SJIS', + 'japan': 'ja_JP.eucJP', + 'japanese': 'ja_JP.eucJP', + 'japanese-euc': 'ja_JP.eucJP', + 'japanese.euc': 'ja_JP.eucJP', + 'jp_jp': 'ja_JP.eucJP', + 'ka': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', + 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', + 'kab_dz': 'kab_DZ.UTF-8', + 'kk_kz': 'kk_KZ.ptcp154', + 'kl': 'kl_GL.ISO8859-1', + 'kl_gl': 'kl_GL.ISO8859-1', + 'km_kh': 'km_KH.UTF-8', + 'kn': 'kn_IN.UTF-8', + 'kn_in': 'kn_IN.UTF-8', + 'ko': 'ko_KR.eucKR', + 'ko_kr': 'ko_KR.eucKR', + 'ko_kr.euc': 'ko_KR.eucKR', + 'kok_in': 'kok_IN.UTF-8', + 'korean': 'ko_KR.eucKR', + 'korean.euc': 'ko_KR.eucKR', + 'ks': 'ks_IN.UTF-8', + 'ks_in': 'ks_IN.UTF-8', + 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', + 'ku_tr': 'ku_TR.ISO8859-9', + 'kw': 'kw_GB.ISO8859-1', + 'kw_gb': 'kw_GB.ISO8859-1', + 'ky': 'ky_KG.UTF-8', + 'ky_kg': 'ky_KG.UTF-8', + 'lb_lu': 'lb_LU.UTF-8', + 'lg_ug': 'lg_UG.ISO8859-10', + 'li_be': 'li_BE.UTF-8', + 'li_nl': 'li_NL.UTF-8', + 'lij_it': 'lij_IT.UTF-8', + 'lithuanian': 'lt_LT.ISO8859-13', + 'ln_cd': 'ln_CD.UTF-8', + 'lo': 'lo_LA.MULELAO-1', + 'lo_la': 'lo_LA.MULELAO-1', + 'lo_la.cp1133': 'lo_LA.IBM-CP1133', + 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', + 'lo_la.mulelao1': 'lo_LA.MULELAO-1', + 'lt': 'lt_LT.ISO8859-13', + 'lt_lt': 'lt_LT.ISO8859-13', + 'lv': 'lv_LV.ISO8859-13', + 'lv_lv': 'lv_LV.ISO8859-13', + 'lzh_tw': 'lzh_TW.UTF-8', + 'mag_in': 'mag_IN.UTF-8', + 'mai': 'mai_IN.UTF-8', + 'mai_in': 'mai_IN.UTF-8', + 'mai_np': 'mai_NP.UTF-8', + 'mfe_mu': 'mfe_MU.UTF-8', + 'mg_mg': 'mg_MG.ISO8859-15', + 'mhr_ru': 'mhr_RU.UTF-8', + 'mi': 'mi_NZ.ISO8859-1', + 'mi_nz': 'mi_NZ.ISO8859-1', + 'miq_ni': 'miq_NI.UTF-8', + 'mjw_in': 'mjw_IN.UTF-8', + 'mk': 'mk_MK.ISO8859-5', + 'mk_mk': 'mk_MK.ISO8859-5', + 'ml': 'ml_IN.UTF-8', + 'ml_in': 'ml_IN.UTF-8', + 'mn_mn': 'mn_MN.UTF-8', + 'mni_in': 'mni_IN.UTF-8', + 'mr': 'mr_IN.UTF-8', + 'mr_in': 'mr_IN.UTF-8', + 'ms': 'ms_MY.ISO8859-1', + 'ms_my': 'ms_MY.ISO8859-1', + 'mt': 'mt_MT.ISO8859-3', + 'mt_mt': 'mt_MT.ISO8859-3', + 'my_mm': 'my_MM.UTF-8', + 'nan_tw': 'nan_TW.UTF-8', + 'nb': 'nb_NO.ISO8859-1', + 'nb_no': 'nb_NO.ISO8859-1', + 'nds_de': 'nds_DE.UTF-8', + 'nds_nl': 'nds_NL.UTF-8', + 'ne_np': 'ne_NP.UTF-8', + 'nhn_mx': 'nhn_MX.UTF-8', + 'niu_nu': 'niu_NU.UTF-8', + 'niu_nz': 'niu_NZ.UTF-8', + 'nl': 'nl_NL.ISO8859-1', + 'nl_aw': 'nl_AW.UTF-8', + 'nl_be': 'nl_BE.ISO8859-1', + 'nl_nl': 'nl_NL.ISO8859-1', + 'nn': 'nn_NO.ISO8859-1', + 'nn_no': 'nn_NO.ISO8859-1', + 'no': 'no_NO.ISO8859-1', + 'no@nynorsk': 'ny_NO.ISO8859-1', + 'no_no': 'no_NO.ISO8859-1', + 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', + 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', + 'norwegian': 'no_NO.ISO8859-1', + 'nr': 'nr_ZA.ISO8859-1', + 'nr_za': 'nr_ZA.ISO8859-1', + 'nso': 'nso_ZA.ISO8859-15', + 'nso_za': 'nso_ZA.ISO8859-15', + 'ny': 'ny_NO.ISO8859-1', + 'ny_no': 'ny_NO.ISO8859-1', + 'nynorsk': 'nn_NO.ISO8859-1', + 'oc': 'oc_FR.ISO8859-1', + 'oc_fr': 'oc_FR.ISO8859-1', + 'om_et': 'om_ET.UTF-8', + 'om_ke': 'om_KE.ISO8859-1', + 'or': 'or_IN.UTF-8', + 'or_in': 'or_IN.UTF-8', + 'os_ru': 'os_RU.UTF-8', + 'pa': 'pa_IN.UTF-8', + 'pa_in': 'pa_IN.UTF-8', + 'pa_pk': 'pa_PK.UTF-8', + 'pap_an': 'pap_AN.UTF-8', + 'pap_aw': 'pap_AW.UTF-8', + 'pap_cw': 'pap_CW.UTF-8', + 'pd': 'pd_US.ISO8859-1', + 'pd_de': 'pd_DE.ISO8859-1', + 'pd_us': 'pd_US.ISO8859-1', + 'ph': 'ph_PH.ISO8859-1', + 'ph_ph': 'ph_PH.ISO8859-1', + 'pl': 'pl_PL.ISO8859-2', + 'pl_pl': 'pl_PL.ISO8859-2', + 'polish': 'pl_PL.ISO8859-2', + 'portuguese': 'pt_PT.ISO8859-1', + 'portuguese_brazil': 'pt_BR.ISO8859-1', + 'posix': 'C', + 'posix-utf2': 'C', + 'pp': 'pp_AN.ISO8859-1', + 'pp_an': 'pp_AN.ISO8859-1', + 'ps_af': 'ps_AF.UTF-8', + 'pt': 'pt_PT.ISO8859-1', + 'pt_br': 'pt_BR.ISO8859-1', + 'pt_pt': 'pt_PT.ISO8859-1', + 'quz_pe': 'quz_PE.UTF-8', + 'raj_in': 'raj_IN.UTF-8', + 'ro': 'ro_RO.ISO8859-2', + 'ro_ro': 'ro_RO.ISO8859-2', + 'romanian': 'ro_RO.ISO8859-2', + 'ru': 'ru_RU.UTF-8', + 'ru_ru': 'ru_RU.UTF-8', + 'ru_ua': 'ru_UA.KOI8-U', + 'rumanian': 'ro_RO.ISO8859-2', + 'russian': 'ru_RU.KOI8-R', + 'rw': 'rw_RW.ISO8859-1', + 'rw_rw': 'rw_RW.ISO8859-1', + 'sa_in': 'sa_IN.UTF-8', + 'sat_in': 'sat_IN.UTF-8', + 'sc_it': 'sc_IT.UTF-8', + 'sd': 'sd_IN.UTF-8', + 'sd_in': 'sd_IN.UTF-8', + 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', + 'sd_pk': 'sd_PK.UTF-8', + 'se_no': 'se_NO.UTF-8', + 'serbocroatian': 'sr_RS.UTF-8@latin', + 'sgs_lt': 'sgs_LT.UTF-8', + 'sh': 'sr_RS.UTF-8@latin', + 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', + 'sh_hr': 'sh_HR.ISO8859-2', + 'sh_hr.iso88592': 'hr_HR.ISO8859-2', + 'sh_sp': 'sr_CS.ISO8859-2', + 'sh_yu': 'sr_RS.UTF-8@latin', + 'shn_mm': 'shn_MM.UTF-8', + 'shs_ca': 'shs_CA.UTF-8', + 'si': 'si_LK.UTF-8', + 'si_lk': 'si_LK.UTF-8', + 'sid_et': 'sid_ET.UTF-8', + 'sinhala': 'si_LK.UTF-8', + 'sk': 'sk_SK.ISO8859-2', + 'sk_sk': 'sk_SK.ISO8859-2', + 'sl': 'sl_SI.ISO8859-2', + 'sl_cs': 'sl_CS.ISO8859-2', + 'sl_si': 'sl_SI.ISO8859-2', + 'slovak': 'sk_SK.ISO8859-2', + 'slovene': 'sl_SI.ISO8859-2', + 'slovenian': 'sl_SI.ISO8859-2', + 'sm_ws': 'sm_WS.UTF-8', + 'so_dj': 'so_DJ.ISO8859-1', + 'so_et': 'so_ET.UTF-8', + 'so_ke': 'so_KE.ISO8859-1', + 'so_so': 'so_SO.ISO8859-1', + 'sp': 'sr_CS.ISO8859-5', + 'sp_yu': 'sr_CS.ISO8859-5', + 'spanish': 'es_ES.ISO8859-1', + 'spanish_spain': 'es_ES.ISO8859-1', + 'sq': 'sq_AL.ISO8859-2', + 'sq_al': 'sq_AL.ISO8859-2', + 'sq_mk': 'sq_MK.UTF-8', + 'sr': 'sr_RS.UTF-8', + 'sr@cyrillic': 'sr_RS.UTF-8', + 'sr@latn': 'sr_CS.UTF-8@latin', + 'sr_cs': 'sr_CS.UTF-8', + 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', + 'sr_cs@latn': 'sr_CS.UTF-8@latin', + 'sr_me': 'sr_ME.UTF-8', + 'sr_rs': 'sr_RS.UTF-8', + 'sr_rs@latn': 'sr_RS.UTF-8@latin', + 'sr_sp': 'sr_CS.ISO8859-2', + 'sr_yu': 'sr_RS.UTF-8@latin', + 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.iso88592': 'sr_CS.ISO8859-2', + 'sr_yu.iso88595': 'sr_CS.ISO8859-5', + 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', + 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.utf8': 'sr_RS.UTF-8', + 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', + 'sr_yu@cyrillic': 'sr_RS.UTF-8', + 'ss': 'ss_ZA.ISO8859-1', + 'ss_za': 'ss_ZA.ISO8859-1', + 'st': 'st_ZA.ISO8859-1', + 'st_za': 'st_ZA.ISO8859-1', + 'sv': 'sv_SE.ISO8859-1', + 'sv_fi': 'sv_FI.ISO8859-1', + 'sv_se': 'sv_SE.ISO8859-1', + 'sw_ke': 'sw_KE.UTF-8', + 'sw_tz': 'sw_TZ.UTF-8', + 'swedish': 'sv_SE.ISO8859-1', + 'szl_pl': 'szl_PL.UTF-8', + 'ta': 'ta_IN.TSCII-0', + 'ta_in': 'ta_IN.TSCII-0', + 'ta_in.tscii': 'ta_IN.TSCII-0', + 'ta_in.tscii0': 'ta_IN.TSCII-0', + 'ta_lk': 'ta_LK.UTF-8', + 'tcy_in.utf8': 'tcy_IN.UTF-8', + 'te': 'te_IN.UTF-8', + 'te_in': 'te_IN.UTF-8', + 'tg': 'tg_TJ.KOI8-C', + 'tg_tj': 'tg_TJ.KOI8-C', + 'th': 'th_TH.ISO8859-11', + 'th_th': 'th_TH.ISO8859-11', + 'th_th.tactis': 'th_TH.TIS620', + 'th_th.tis620': 'th_TH.TIS620', + 'thai': 'th_TH.ISO8859-11', + 'the_np': 'the_NP.UTF-8', + 'ti_er': 'ti_ER.UTF-8', + 'ti_et': 'ti_ET.UTF-8', + 'tig_er': 'tig_ER.UTF-8', + 'tk_tm': 'tk_TM.UTF-8', + 'tl': 'tl_PH.ISO8859-1', + 'tl_ph': 'tl_PH.ISO8859-1', + 'tn': 'tn_ZA.ISO8859-15', + 'tn_za': 'tn_ZA.ISO8859-15', + 'to_to': 'to_TO.UTF-8', + 'tpi_pg': 'tpi_PG.UTF-8', + 'tr': 'tr_TR.ISO8859-9', + 'tr_cy': 'tr_CY.ISO8859-9', + 'tr_tr': 'tr_TR.ISO8859-9', + 'ts': 'ts_ZA.ISO8859-1', + 'ts_za': 'ts_ZA.ISO8859-1', + 'tt': 'tt_RU.TATAR-CYR', + 'tt_ru': 'tt_RU.TATAR-CYR', + 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', + 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', + 'turkish': 'tr_TR.ISO8859-9', + 'ug_cn': 'ug_CN.UTF-8', + 'uk': 'uk_UA.KOI8-U', + 'uk_ua': 'uk_UA.KOI8-U', + 'univ': 'en_US.utf', + 'universal': 'en_US.utf', + 'universal.utf8@ucs4': 'en_US.UTF-8', + 'unm_us': 'unm_US.UTF-8', + 'ur': 'ur_PK.CP1256', + 'ur_in': 'ur_IN.UTF-8', + 'ur_pk': 'ur_PK.CP1256', + 'uz': 'uz_UZ.UTF-8', + 'uz_uz': 'uz_UZ.UTF-8', + 'uz_uz@cyrillic': 'uz_UZ.UTF-8', + 've': 've_ZA.UTF-8', + 've_za': 've_ZA.UTF-8', + 'vi': 'vi_VN.TCVN', + 'vi_vn': 'vi_VN.TCVN', + 'vi_vn.tcvn': 'vi_VN.TCVN', + 'vi_vn.tcvn5712': 'vi_VN.TCVN', + 'vi_vn.viscii': 'vi_VN.VISCII', + 'vi_vn.viscii111': 'vi_VN.VISCII', + 'wa': 'wa_BE.ISO8859-1', + 'wa_be': 'wa_BE.ISO8859-1', + 'wae_ch': 'wae_CH.UTF-8', + 'wal_et': 'wal_ET.UTF-8', + 'wo_sn': 'wo_SN.UTF-8', + 'xh': 'xh_ZA.ISO8859-1', + 'xh_za': 'xh_ZA.ISO8859-1', + 'yi': 'yi_US.CP1255', + 'yi_us': 'yi_US.CP1255', + 'yo_ng': 'yo_NG.UTF-8', + 'yue_hk': 'yue_HK.UTF-8', + 'yuw_pg': 'yuw_PG.UTF-8', + 'zh': 'zh_CN.eucCN', + 'zh_cn': 'zh_CN.gb2312', + 'zh_cn.big5': 'zh_TW.big5', + 'zh_cn.euc': 'zh_CN.eucCN', + 'zh_hk': 'zh_HK.big5hkscs', + 'zh_hk.big5hk': 'zh_HK.big5hkscs', + 'zh_sg': 'zh_SG.GB2312', + 'zh_sg.gbk': 'zh_SG.GBK', + 'zh_tw': 'zh_TW.big5', + 'zh_tw.euc': 'zh_TW.eucTW', + 'zh_tw.euctw': 'zh_TW.eucTW', + 'zu': 'zu_ZA.ISO8859-1', + 'zu_za': 'zu_ZA.ISO8859-1', +} + +# +# This maps Windows language identifiers to locale strings. +# +# This list has been updated from +# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp +# to include every locale up to Windows Vista. +# +# NOTE: this mapping is incomplete. If your language is missing, please +# submit a bug report to the Python bug tracker at http://bugs.python.org/ +# Make sure you include the missing language identifier and the suggested +# locale code. +# + +windows_locale = { + 0x0436: "af_ZA", # Afrikaans + 0x041c: "sq_AL", # Albanian + 0x0484: "gsw_FR",# Alsatian - France + 0x045e: "am_ET", # Amharic - Ethiopia + 0x0401: "ar_SA", # Arabic - Saudi Arabia + 0x0801: "ar_IQ", # Arabic - Iraq + 0x0c01: "ar_EG", # Arabic - Egypt + 0x1001: "ar_LY", # Arabic - Libya + 0x1401: "ar_DZ", # Arabic - Algeria + 0x1801: "ar_MA", # Arabic - Morocco + 0x1c01: "ar_TN", # Arabic - Tunisia + 0x2001: "ar_OM", # Arabic - Oman + 0x2401: "ar_YE", # Arabic - Yemen + 0x2801: "ar_SY", # Arabic - Syria + 0x2c01: "ar_JO", # Arabic - Jordan + 0x3001: "ar_LB", # Arabic - Lebanon + 0x3401: "ar_KW", # Arabic - Kuwait + 0x3801: "ar_AE", # Arabic - United Arab Emirates + 0x3c01: "ar_BH", # Arabic - Bahrain + 0x4001: "ar_QA", # Arabic - Qatar + 0x042b: "hy_AM", # Armenian + 0x044d: "as_IN", # Assamese - India + 0x042c: "az_AZ", # Azeri - Latin + 0x082c: "az_AZ", # Azeri - Cyrillic + 0x046d: "ba_RU", # Bashkir + 0x042d: "eu_ES", # Basque - Russia + 0x0423: "be_BY", # Belarusian + 0x0445: "bn_IN", # Begali + 0x201a: "bs_BA", # Bosnian - Cyrillic + 0x141a: "bs_BA", # Bosnian - Latin + 0x047e: "br_FR", # Breton - France + 0x0402: "bg_BG", # Bulgarian +# 0x0455: "my_MM", # Burmese - Not supported + 0x0403: "ca_ES", # Catalan + 0x0004: "zh_CHS",# Chinese - Simplified + 0x0404: "zh_TW", # Chinese - Taiwan + 0x0804: "zh_CN", # Chinese - PRC + 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. + 0x1004: "zh_SG", # Chinese - Singapore + 0x1404: "zh_MO", # Chinese - Macao S.A.R. + 0x7c04: "zh_CHT",# Chinese - Traditional + 0x0483: "co_FR", # Corsican - France + 0x041a: "hr_HR", # Croatian + 0x101a: "hr_BA", # Croatian - Bosnia + 0x0405: "cs_CZ", # Czech + 0x0406: "da_DK", # Danish + 0x048c: "gbz_AF",# Dari - Afghanistan + 0x0465: "div_MV",# Divehi - Maldives + 0x0413: "nl_NL", # Dutch - The Netherlands + 0x0813: "nl_BE", # Dutch - Belgium + 0x0409: "en_US", # English - United States + 0x0809: "en_GB", # English - United Kingdom + 0x0c09: "en_AU", # English - Australia + 0x1009: "en_CA", # English - Canada + 0x1409: "en_NZ", # English - New Zealand + 0x1809: "en_IE", # English - Ireland + 0x1c09: "en_ZA", # English - South Africa + 0x2009: "en_JA", # English - Jamaica + 0x2409: "en_CB", # English - Caribbean + 0x2809: "en_BZ", # English - Belize + 0x2c09: "en_TT", # English - Trinidad + 0x3009: "en_ZW", # English - Zimbabwe + 0x3409: "en_PH", # English - Philippines + 0x4009: "en_IN", # English - India + 0x4409: "en_MY", # English - Malaysia + 0x4809: "en_IN", # English - Singapore + 0x0425: "et_EE", # Estonian + 0x0438: "fo_FO", # Faroese + 0x0464: "fil_PH",# Filipino + 0x040b: "fi_FI", # Finnish + 0x040c: "fr_FR", # French - France + 0x080c: "fr_BE", # French - Belgium + 0x0c0c: "fr_CA", # French - Canada + 0x100c: "fr_CH", # French - Switzerland + 0x140c: "fr_LU", # French - Luxembourg + 0x180c: "fr_MC", # French - Monaco + 0x0462: "fy_NL", # Frisian - Netherlands + 0x0456: "gl_ES", # Galician + 0x0437: "ka_GE", # Georgian + 0x0407: "de_DE", # German - Germany + 0x0807: "de_CH", # German - Switzerland + 0x0c07: "de_AT", # German - Austria + 0x1007: "de_LU", # German - Luxembourg + 0x1407: "de_LI", # German - Liechtenstein + 0x0408: "el_GR", # Greek + 0x046f: "kl_GL", # Greenlandic - Greenland + 0x0447: "gu_IN", # Gujarati + 0x0468: "ha_NG", # Hausa - Latin + 0x040d: "he_IL", # Hebrew + 0x0439: "hi_IN", # Hindi + 0x040e: "hu_HU", # Hungarian + 0x040f: "is_IS", # Icelandic + 0x0421: "id_ID", # Indonesian + 0x045d: "iu_CA", # Inuktitut - Syllabics + 0x085d: "iu_CA", # Inuktitut - Latin + 0x083c: "ga_IE", # Irish - Ireland + 0x0410: "it_IT", # Italian - Italy + 0x0810: "it_CH", # Italian - Switzerland + 0x0411: "ja_JP", # Japanese + 0x044b: "kn_IN", # Kannada - India + 0x043f: "kk_KZ", # Kazakh + 0x0453: "kh_KH", # Khmer - Cambodia + 0x0486: "qut_GT",# K'iche - Guatemala + 0x0487: "rw_RW", # Kinyarwanda - Rwanda + 0x0457: "kok_IN",# Konkani + 0x0412: "ko_KR", # Korean + 0x0440: "ky_KG", # Kyrgyz + 0x0454: "lo_LA", # Lao - Lao PDR + 0x0426: "lv_LV", # Latvian + 0x0427: "lt_LT", # Lithuanian + 0x082e: "dsb_DE",# Lower Sorbian - Germany + 0x046e: "lb_LU", # Luxembourgish + 0x042f: "mk_MK", # FYROM Macedonian + 0x043e: "ms_MY", # Malay - Malaysia + 0x083e: "ms_BN", # Malay - Brunei Darussalam + 0x044c: "ml_IN", # Malayalam - India + 0x043a: "mt_MT", # Maltese + 0x0481: "mi_NZ", # Maori + 0x047a: "arn_CL",# Mapudungun + 0x044e: "mr_IN", # Marathi + 0x047c: "moh_CA",# Mohawk - Canada + 0x0450: "mn_MN", # Mongolian - Cyrillic + 0x0850: "mn_CN", # Mongolian - PRC + 0x0461: "ne_NP", # Nepali + 0x0414: "nb_NO", # Norwegian - Bokmal + 0x0814: "nn_NO", # Norwegian - Nynorsk + 0x0482: "oc_FR", # Occitan - France + 0x0448: "or_IN", # Oriya - India + 0x0463: "ps_AF", # Pashto - Afghanistan + 0x0429: "fa_IR", # Persian + 0x0415: "pl_PL", # Polish + 0x0416: "pt_BR", # Portuguese - Brazil + 0x0816: "pt_PT", # Portuguese - Portugal + 0x0446: "pa_IN", # Punjabi + 0x046b: "quz_BO",# Quechua (Bolivia) + 0x086b: "quz_EC",# Quechua (Ecuador) + 0x0c6b: "quz_PE",# Quechua (Peru) + 0x0418: "ro_RO", # Romanian - Romania + 0x0417: "rm_CH", # Romansh + 0x0419: "ru_RU", # Russian + 0x243b: "smn_FI",# Sami Finland + 0x103b: "smj_NO",# Sami Norway + 0x143b: "smj_SE",# Sami Sweden + 0x043b: "se_NO", # Sami Northern Norway + 0x083b: "se_SE", # Sami Northern Sweden + 0x0c3b: "se_FI", # Sami Northern Finland + 0x203b: "sms_FI",# Sami Skolt + 0x183b: "sma_NO",# Sami Southern Norway + 0x1c3b: "sma_SE",# Sami Southern Sweden + 0x044f: "sa_IN", # Sanskrit + 0x0c1a: "sr_SP", # Serbian - Cyrillic + 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic + 0x081a: "sr_SP", # Serbian - Latin + 0x181a: "sr_BA", # Serbian - Bosnia Latin + 0x045b: "si_LK", # Sinhala - Sri Lanka + 0x046c: "ns_ZA", # Northern Sotho + 0x0432: "tn_ZA", # Setswana - Southern Africa + 0x041b: "sk_SK", # Slovak + 0x0424: "sl_SI", # Slovenian + 0x040a: "es_ES", # Spanish - Spain + 0x080a: "es_MX", # Spanish - Mexico + 0x0c0a: "es_ES", # Spanish - Spain (Modern) + 0x100a: "es_GT", # Spanish - Guatemala + 0x140a: "es_CR", # Spanish - Costa Rica + 0x180a: "es_PA", # Spanish - Panama + 0x1c0a: "es_DO", # Spanish - Dominican Republic + 0x200a: "es_VE", # Spanish - Venezuela + 0x240a: "es_CO", # Spanish - Colombia + 0x280a: "es_PE", # Spanish - Peru + 0x2c0a: "es_AR", # Spanish - Argentina + 0x300a: "es_EC", # Spanish - Ecuador + 0x340a: "es_CL", # Spanish - Chile + 0x380a: "es_UR", # Spanish - Uruguay + 0x3c0a: "es_PY", # Spanish - Paraguay + 0x400a: "es_BO", # Spanish - Bolivia + 0x440a: "es_SV", # Spanish - El Salvador + 0x480a: "es_HN", # Spanish - Honduras + 0x4c0a: "es_NI", # Spanish - Nicaragua + 0x500a: "es_PR", # Spanish - Puerto Rico + 0x540a: "es_US", # Spanish - United States +# 0x0430: "", # Sutu - Not supported + 0x0441: "sw_KE", # Swahili + 0x041d: "sv_SE", # Swedish - Sweden + 0x081d: "sv_FI", # Swedish - Finland + 0x045a: "syr_SY",# Syriac + 0x0428: "tg_TJ", # Tajik - Cyrillic + 0x085f: "tmz_DZ",# Tamazight - Latin + 0x0449: "ta_IN", # Tamil + 0x0444: "tt_RU", # Tatar + 0x044a: "te_IN", # Telugu + 0x041e: "th_TH", # Thai + 0x0851: "bo_BT", # Tibetan - Bhutan + 0x0451: "bo_CN", # Tibetan - PRC + 0x041f: "tr_TR", # Turkish + 0x0442: "tk_TM", # Turkmen - Cyrillic + 0x0480: "ug_CN", # Uighur - Arabic + 0x0422: "uk_UA", # Ukrainian + 0x042e: "wen_DE",# Upper Sorbian - Germany + 0x0420: "ur_PK", # Urdu + 0x0820: "ur_IN", # Urdu - India + 0x0443: "uz_UZ", # Uzbek - Latin + 0x0843: "uz_UZ", # Uzbek - Cyrillic + 0x042a: "vi_VN", # Vietnamese + 0x0452: "cy_GB", # Welsh + 0x0488: "wo_SN", # Wolof - Senegal + 0x0434: "xh_ZA", # Xhosa - South Africa + 0x0485: "sah_RU",# Yakut - Cyrillic + 0x0478: "ii_CN", # Yi - PRC + 0x046a: "yo_NG", # Yoruba - Nigeria + 0x0435: "zu_ZA", # Zulu +} + +def _print_locale(): + + """ Test function. + """ + categories = {} + def _init_categories(categories=categories): + for k,v in globals().items(): + if k[:3] == 'LC_': + categories[k] = v + _init_categories() + del categories['LC_ALL'] + + print('Locale defaults as determined by getdefaultlocale():') + print('-'*72) + lang, enc = getdefaultlocale() + print('Language: ', lang or '(undefined)') + print('Encoding: ', enc or '(undefined)') + print() + + print('Locale settings on startup:') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + print() + print('Locale settings after calling resetlocale():') + print('-'*72) + resetlocale() + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + try: + setlocale(LC_ALL, "") + except: + print('NOTE:') + print('setlocale(LC_ALL, "") does not support the default locale') + print('given in the OS environment variables.') + else: + print() + print('Locale settings after calling setlocale(LC_ALL, ""):') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + +### + +try: + LC_MESSAGES +except NameError: + pass +else: + __all__.append("LC_MESSAGES") + +if __name__=='__main__': + print('Locale aliasing:') + print() + _print_locale() + print() + print('Number formatting:') + print() + _test() diff --git a/pllava/lib/python3.10/mailbox.py b/pllava/lib/python3.10/mailbox.py new file mode 100644 index 0000000000000000000000000000000000000000..70da07ed2e9e8bc18dfc26ac1762f4102bcf4426 --- /dev/null +++ b/pllava/lib/python3.10/mailbox.py @@ -0,0 +1,2151 @@ +"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes.""" + +# Notes for authors of new mailbox subclasses: +# +# Remember to fsync() changes to disk before closing a modified file +# or returning from a flush() method. See functions _sync_flush() and +# _sync_close(). + +import os +import time +import calendar +import socket +import errno +import copy +import warnings +import email +import email.message +import email.generator +import io +import contextlib +from types import GenericAlias +try: + import fcntl +except ImportError: + fcntl = None + +__all__ = ['Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF', + 'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage', + 'BabylMessage', 'MMDFMessage', 'Error', 'NoSuchMailboxError', + 'NotEmptyError', 'ExternalClashError', 'FormatError'] + +linesep = os.linesep.encode('ascii') + +class Mailbox: + """A group of messages in a particular place.""" + + def __init__(self, path, factory=None, create=True): + """Initialize a Mailbox instance.""" + self._path = os.path.abspath(os.path.expanduser(path)) + self._factory = factory + + def add(self, message): + """Add message and return assigned key.""" + raise NotImplementedError('Method must be implemented by subclass') + + def remove(self, key): + """Remove the keyed message; raise KeyError if it doesn't exist.""" + raise NotImplementedError('Method must be implemented by subclass') + + def __delitem__(self, key): + self.remove(key) + + def discard(self, key): + """If the keyed message exists, remove it.""" + try: + self.remove(key) + except KeyError: + pass + + def __setitem__(self, key, message): + """Replace the keyed message; raise KeyError if it doesn't exist.""" + raise NotImplementedError('Method must be implemented by subclass') + + def get(self, key, default=None): + """Return the keyed message, or default if it doesn't exist.""" + try: + return self.__getitem__(key) + except KeyError: + return default + + def __getitem__(self, key): + """Return the keyed message; raise KeyError if it doesn't exist.""" + if not self._factory: + return self.get_message(key) + else: + with contextlib.closing(self.get_file(key)) as file: + return self._factory(file) + + def get_message(self, key): + """Return a Message representation or raise a KeyError.""" + raise NotImplementedError('Method must be implemented by subclass') + + def get_string(self, key): + """Return a string representation or raise a KeyError. + + Uses email.message.Message to create a 7bit clean string + representation of the message.""" + return email.message_from_bytes(self.get_bytes(key)).as_string() + + def get_bytes(self, key): + """Return a byte string representation or raise a KeyError.""" + raise NotImplementedError('Method must be implemented by subclass') + + def get_file(self, key): + """Return a file-like representation or raise a KeyError.""" + raise NotImplementedError('Method must be implemented by subclass') + + def iterkeys(self): + """Return an iterator over keys.""" + raise NotImplementedError('Method must be implemented by subclass') + + def keys(self): + """Return a list of keys.""" + return list(self.iterkeys()) + + def itervalues(self): + """Return an iterator over all messages.""" + for key in self.iterkeys(): + try: + value = self[key] + except KeyError: + continue + yield value + + def __iter__(self): + return self.itervalues() + + def values(self): + """Return a list of messages. Memory intensive.""" + return list(self.itervalues()) + + def iteritems(self): + """Return an iterator over (key, message) tuples.""" + for key in self.iterkeys(): + try: + value = self[key] + except KeyError: + continue + yield (key, value) + + def items(self): + """Return a list of (key, message) tuples. Memory intensive.""" + return list(self.iteritems()) + + def __contains__(self, key): + """Return True if the keyed message exists, False otherwise.""" + raise NotImplementedError('Method must be implemented by subclass') + + def __len__(self): + """Return a count of messages in the mailbox.""" + raise NotImplementedError('Method must be implemented by subclass') + + def clear(self): + """Delete all messages.""" + for key in self.keys(): + self.discard(key) + + def pop(self, key, default=None): + """Delete the keyed message and return it, or default.""" + try: + result = self[key] + except KeyError: + return default + self.discard(key) + return result + + def popitem(self): + """Delete an arbitrary (key, message) pair and return it.""" + for key in self.iterkeys(): + return (key, self.pop(key)) # This is only run once. + else: + raise KeyError('No messages in mailbox') + + def update(self, arg=None): + """Change the messages that correspond to certain keys.""" + if hasattr(arg, 'iteritems'): + source = arg.iteritems() + elif hasattr(arg, 'items'): + source = arg.items() + else: + source = arg + bad_key = False + for key, message in source: + try: + self[key] = message + except KeyError: + bad_key = True + if bad_key: + raise KeyError('No message with key(s)') + + def flush(self): + """Write any pending changes to the disk.""" + raise NotImplementedError('Method must be implemented by subclass') + + def lock(self): + """Lock the mailbox.""" + raise NotImplementedError('Method must be implemented by subclass') + + def unlock(self): + """Unlock the mailbox if it is locked.""" + raise NotImplementedError('Method must be implemented by subclass') + + def close(self): + """Flush and close the mailbox.""" + raise NotImplementedError('Method must be implemented by subclass') + + def _string_to_bytes(self, message): + # If a message is not 7bit clean, we refuse to handle it since it + # likely came from reading invalid messages in text mode, and that way + # lies mojibake. + try: + return message.encode('ascii') + except UnicodeError: + raise ValueError("String input must be ASCII-only; " + "use bytes or a Message instead") + + # Whether each message must end in a newline + _append_newline = False + + def _dump_message(self, message, target, mangle_from_=False): + # This assumes the target file is open in binary mode. + """Dump message contents to target file.""" + if isinstance(message, email.message.Message): + buffer = io.BytesIO() + gen = email.generator.BytesGenerator(buffer, mangle_from_, 0) + gen.flatten(message) + buffer.seek(0) + data = buffer.read() + data = data.replace(b'\n', linesep) + target.write(data) + if self._append_newline and not data.endswith(linesep): + # Make sure the message ends with a newline + target.write(linesep) + elif isinstance(message, (str, bytes, io.StringIO)): + if isinstance(message, io.StringIO): + warnings.warn("Use of StringIO input is deprecated, " + "use BytesIO instead", DeprecationWarning, 3) + message = message.getvalue() + if isinstance(message, str): + message = self._string_to_bytes(message) + if mangle_from_: + message = message.replace(b'\nFrom ', b'\n>From ') + message = message.replace(b'\n', linesep) + target.write(message) + if self._append_newline and not message.endswith(linesep): + # Make sure the message ends with a newline + target.write(linesep) + elif hasattr(message, 'read'): + if hasattr(message, 'buffer'): + warnings.warn("Use of text mode files is deprecated, " + "use a binary mode file instead", DeprecationWarning, 3) + message = message.buffer + lastline = None + while True: + line = message.readline() + # Universal newline support. + if line.endswith(b'\r\n'): + line = line[:-2] + b'\n' + elif line.endswith(b'\r'): + line = line[:-1] + b'\n' + if not line: + break + if mangle_from_ and line.startswith(b'From '): + line = b'>From ' + line[5:] + line = line.replace(b'\n', linesep) + target.write(line) + lastline = line + if self._append_newline and lastline and not lastline.endswith(linesep): + # Make sure the message ends with a newline + target.write(linesep) + else: + raise TypeError('Invalid message type: %s' % type(message)) + + __class_getitem__ = classmethod(GenericAlias) + + +class Maildir(Mailbox): + """A qmail-style Maildir mailbox.""" + + colon = ':' + + def __init__(self, dirname, factory=None, create=True): + """Initialize a Maildir instance.""" + Mailbox.__init__(self, dirname, factory, create) + self._paths = { + 'tmp': os.path.join(self._path, 'tmp'), + 'new': os.path.join(self._path, 'new'), + 'cur': os.path.join(self._path, 'cur'), + } + if not os.path.exists(self._path): + if create: + os.mkdir(self._path, 0o700) + for path in self._paths.values(): + os.mkdir(path, 0o700) + else: + raise NoSuchMailboxError(self._path) + self._toc = {} + self._toc_mtimes = {'cur': 0, 'new': 0} + self._last_read = 0 # Records last time we read cur/new + self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing + + def add(self, message): + """Add message and return assigned key.""" + tmp_file = self._create_tmp() + try: + self._dump_message(message, tmp_file) + except BaseException: + tmp_file.close() + os.remove(tmp_file.name) + raise + _sync_close(tmp_file) + if isinstance(message, MaildirMessage): + subdir = message.get_subdir() + suffix = self.colon + message.get_info() + if suffix == self.colon: + suffix = '' + else: + subdir = 'new' + suffix = '' + uniq = os.path.basename(tmp_file.name).split(self.colon)[0] + dest = os.path.join(self._path, subdir, uniq + suffix) + if isinstance(message, MaildirMessage): + os.utime(tmp_file.name, + (os.path.getatime(tmp_file.name), message.get_date())) + # No file modification should be done after the file is moved to its + # final position in order to prevent race conditions with changes + # from other programs + try: + try: + os.link(tmp_file.name, dest) + except (AttributeError, PermissionError): + os.rename(tmp_file.name, dest) + else: + os.remove(tmp_file.name) + except OSError as e: + os.remove(tmp_file.name) + if e.errno == errno.EEXIST: + raise ExternalClashError('Name clash with existing message: %s' + % dest) + else: + raise + return uniq + + def remove(self, key): + """Remove the keyed message; raise KeyError if it doesn't exist.""" + os.remove(os.path.join(self._path, self._lookup(key))) + + def discard(self, key): + """If the keyed message exists, remove it.""" + # This overrides an inapplicable implementation in the superclass. + try: + self.remove(key) + except (KeyError, FileNotFoundError): + pass + + def __setitem__(self, key, message): + """Replace the keyed message; raise KeyError if it doesn't exist.""" + old_subpath = self._lookup(key) + temp_key = self.add(message) + temp_subpath = self._lookup(temp_key) + if isinstance(message, MaildirMessage): + # temp's subdir and suffix were specified by message. + dominant_subpath = temp_subpath + else: + # temp's subdir and suffix were defaults from add(). + dominant_subpath = old_subpath + subdir = os.path.dirname(dominant_subpath) + if self.colon in dominant_subpath: + suffix = self.colon + dominant_subpath.split(self.colon)[-1] + else: + suffix = '' + self.discard(key) + tmp_path = os.path.join(self._path, temp_subpath) + new_path = os.path.join(self._path, subdir, key + suffix) + if isinstance(message, MaildirMessage): + os.utime(tmp_path, + (os.path.getatime(tmp_path), message.get_date())) + # No file modification should be done after the file is moved to its + # final position in order to prevent race conditions with changes + # from other programs + os.rename(tmp_path, new_path) + + def get_message(self, key): + """Return a Message representation or raise a KeyError.""" + subpath = self._lookup(key) + with open(os.path.join(self._path, subpath), 'rb') as f: + if self._factory: + msg = self._factory(f) + else: + msg = MaildirMessage(f) + subdir, name = os.path.split(subpath) + msg.set_subdir(subdir) + if self.colon in name: + msg.set_info(name.split(self.colon)[-1]) + msg.set_date(os.path.getmtime(os.path.join(self._path, subpath))) + return msg + + def get_bytes(self, key): + """Return a bytes representation or raise a KeyError.""" + with open(os.path.join(self._path, self._lookup(key)), 'rb') as f: + return f.read().replace(linesep, b'\n') + + def get_file(self, key): + """Return a file-like representation or raise a KeyError.""" + f = open(os.path.join(self._path, self._lookup(key)), 'rb') + return _ProxyFile(f) + + def iterkeys(self): + """Return an iterator over keys.""" + self._refresh() + for key in self._toc: + try: + self._lookup(key) + except KeyError: + continue + yield key + + def __contains__(self, key): + """Return True if the keyed message exists, False otherwise.""" + self._refresh() + return key in self._toc + + def __len__(self): + """Return a count of messages in the mailbox.""" + self._refresh() + return len(self._toc) + + def flush(self): + """Write any pending changes to disk.""" + # Maildir changes are always written immediately, so there's nothing + # to do. + pass + + def lock(self): + """Lock the mailbox.""" + return + + def unlock(self): + """Unlock the mailbox if it is locked.""" + return + + def close(self): + """Flush and close the mailbox.""" + return + + def list_folders(self): + """Return a list of folder names.""" + result = [] + for entry in os.listdir(self._path): + if len(entry) > 1 and entry[0] == '.' and \ + os.path.isdir(os.path.join(self._path, entry)): + result.append(entry[1:]) + return result + + def get_folder(self, folder): + """Return a Maildir instance for the named folder.""" + return Maildir(os.path.join(self._path, '.' + folder), + factory=self._factory, + create=False) + + def add_folder(self, folder): + """Create a folder and return a Maildir instance representing it.""" + path = os.path.join(self._path, '.' + folder) + result = Maildir(path, factory=self._factory) + maildirfolder_path = os.path.join(path, 'maildirfolder') + if not os.path.exists(maildirfolder_path): + os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY, + 0o666)) + return result + + def remove_folder(self, folder): + """Delete the named folder, which must be empty.""" + path = os.path.join(self._path, '.' + folder) + for entry in os.listdir(os.path.join(path, 'new')) + \ + os.listdir(os.path.join(path, 'cur')): + if len(entry) < 1 or entry[0] != '.': + raise NotEmptyError('Folder contains message(s): %s' % folder) + for entry in os.listdir(path): + if entry != 'new' and entry != 'cur' and entry != 'tmp' and \ + os.path.isdir(os.path.join(path, entry)): + raise NotEmptyError("Folder contains subdirectory '%s': %s" % + (folder, entry)) + for root, dirs, files in os.walk(path, topdown=False): + for entry in files: + os.remove(os.path.join(root, entry)) + for entry in dirs: + os.rmdir(os.path.join(root, entry)) + os.rmdir(path) + + def clean(self): + """Delete old files in "tmp".""" + now = time.time() + for entry in os.listdir(os.path.join(self._path, 'tmp')): + path = os.path.join(self._path, 'tmp', entry) + if now - os.path.getatime(path) > 129600: # 60 * 60 * 36 + os.remove(path) + + _count = 1 # This is used to generate unique file names. + + def _create_tmp(self): + """Create a file in the tmp subdirectory and open and return it.""" + now = time.time() + hostname = socket.gethostname() + if '/' in hostname: + hostname = hostname.replace('/', r'\057') + if ':' in hostname: + hostname = hostname.replace(':', r'\072') + uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(), + Maildir._count, hostname) + path = os.path.join(self._path, 'tmp', uniq) + try: + os.stat(path) + except FileNotFoundError: + Maildir._count += 1 + try: + return _create_carefully(path) + except FileExistsError: + pass + + # Fall through to here if stat succeeded or open raised EEXIST. + raise ExternalClashError('Name clash prevented file creation: %s' % + path) + + def _refresh(self): + """Update table of contents mapping.""" + # If it has been less than two seconds since the last _refresh() call, + # we have to unconditionally re-read the mailbox just in case it has + # been modified, because os.path.mtime() has a 2 sec resolution in the + # most common worst case (FAT) and a 1 sec resolution typically. This + # results in a few unnecessary re-reads when _refresh() is called + # multiple times in that interval, but once the clock ticks over, we + # will only re-read as needed. Because the filesystem might be being + # served by an independent system with its own clock, we record and + # compare with the mtimes from the filesystem. Because the other + # system's clock might be skewing relative to our clock, we add an + # extra delta to our wait. The default is one tenth second, but is an + # instance variable and so can be adjusted if dealing with a + # particularly skewed or irregular system. + if time.time() - self._last_read > 2 + self._skewfactor: + refresh = False + for subdir in self._toc_mtimes: + mtime = os.path.getmtime(self._paths[subdir]) + if mtime > self._toc_mtimes[subdir]: + refresh = True + self._toc_mtimes[subdir] = mtime + if not refresh: + return + # Refresh toc + self._toc = {} + for subdir in self._toc_mtimes: + path = self._paths[subdir] + for entry in os.listdir(path): + p = os.path.join(path, entry) + if os.path.isdir(p): + continue + uniq = entry.split(self.colon)[0] + self._toc[uniq] = os.path.join(subdir, entry) + self._last_read = time.time() + + def _lookup(self, key): + """Use TOC to return subpath for given key, or raise a KeyError.""" + try: + if os.path.exists(os.path.join(self._path, self._toc[key])): + return self._toc[key] + except KeyError: + pass + self._refresh() + try: + return self._toc[key] + except KeyError: + raise KeyError('No message with key: %s' % key) from None + + # This method is for backward compatibility only. + def next(self): + """Return the next message in a one-time iteration.""" + if not hasattr(self, '_onetime_keys'): + self._onetime_keys = self.iterkeys() + while True: + try: + return self[next(self._onetime_keys)] + except StopIteration: + return None + except KeyError: + continue + + +class _singlefileMailbox(Mailbox): + """A single-file mailbox.""" + + def __init__(self, path, factory=None, create=True): + """Initialize a single-file mailbox.""" + Mailbox.__init__(self, path, factory, create) + try: + f = open(self._path, 'rb+') + except OSError as e: + if e.errno == errno.ENOENT: + if create: + f = open(self._path, 'wb+') + else: + raise NoSuchMailboxError(self._path) + elif e.errno in (errno.EACCES, errno.EROFS): + f = open(self._path, 'rb') + else: + raise + self._file = f + self._toc = None + self._next_key = 0 + self._pending = False # No changes require rewriting the file. + self._pending_sync = False # No need to sync the file + self._locked = False + self._file_length = None # Used to record mailbox size + + def add(self, message): + """Add message and return assigned key.""" + self._lookup() + self._toc[self._next_key] = self._append_message(message) + self._next_key += 1 + # _append_message appends the message to the mailbox file. We + # don't need a full rewrite + rename, sync is enough. + self._pending_sync = True + return self._next_key - 1 + + def remove(self, key): + """Remove the keyed message; raise KeyError if it doesn't exist.""" + self._lookup(key) + del self._toc[key] + self._pending = True + + def __setitem__(self, key, message): + """Replace the keyed message; raise KeyError if it doesn't exist.""" + self._lookup(key) + self._toc[key] = self._append_message(message) + self._pending = True + + def iterkeys(self): + """Return an iterator over keys.""" + self._lookup() + yield from self._toc.keys() + + def __contains__(self, key): + """Return True if the keyed message exists, False otherwise.""" + self._lookup() + return key in self._toc + + def __len__(self): + """Return a count of messages in the mailbox.""" + self._lookup() + return len(self._toc) + + def lock(self): + """Lock the mailbox.""" + if not self._locked: + _lock_file(self._file) + self._locked = True + + def unlock(self): + """Unlock the mailbox if it is locked.""" + if self._locked: + _unlock_file(self._file) + self._locked = False + + def flush(self): + """Write any pending changes to disk.""" + if not self._pending: + if self._pending_sync: + # Messages have only been added, so syncing the file + # is enough. + _sync_flush(self._file) + self._pending_sync = False + return + + # In order to be writing anything out at all, self._toc must + # already have been generated (and presumably has been modified + # by adding or deleting an item). + assert self._toc is not None + + # Check length of self._file; if it's changed, some other process + # has modified the mailbox since we scanned it. + self._file.seek(0, 2) + cur_len = self._file.tell() + if cur_len != self._file_length: + raise ExternalClashError('Size of mailbox file changed ' + '(expected %i, found %i)' % + (self._file_length, cur_len)) + + new_file = _create_temporary(self._path) + try: + new_toc = {} + self._pre_mailbox_hook(new_file) + for key in sorted(self._toc.keys()): + start, stop = self._toc[key] + self._file.seek(start) + self._pre_message_hook(new_file) + new_start = new_file.tell() + while True: + buffer = self._file.read(min(4096, + stop - self._file.tell())) + if not buffer: + break + new_file.write(buffer) + new_toc[key] = (new_start, new_file.tell()) + self._post_message_hook(new_file) + self._file_length = new_file.tell() + except: + new_file.close() + os.remove(new_file.name) + raise + _sync_close(new_file) + # self._file is about to get replaced, so no need to sync. + self._file.close() + # Make sure the new file's mode is the same as the old file's + mode = os.stat(self._path).st_mode + os.chmod(new_file.name, mode) + try: + os.rename(new_file.name, self._path) + except FileExistsError: + os.remove(self._path) + os.rename(new_file.name, self._path) + self._file = open(self._path, 'rb+') + self._toc = new_toc + self._pending = False + self._pending_sync = False + if self._locked: + _lock_file(self._file, dotlock=False) + + def _pre_mailbox_hook(self, f): + """Called before writing the mailbox to file f.""" + return + + def _pre_message_hook(self, f): + """Called before writing each message to file f.""" + return + + def _post_message_hook(self, f): + """Called after writing each message to file f.""" + return + + def close(self): + """Flush and close the mailbox.""" + try: + self.flush() + finally: + try: + if self._locked: + self.unlock() + finally: + self._file.close() # Sync has been done by self.flush() above. + + def _lookup(self, key=None): + """Return (start, stop) or raise KeyError.""" + if self._toc is None: + self._generate_toc() + if key is not None: + try: + return self._toc[key] + except KeyError: + raise KeyError('No message with key: %s' % key) from None + + def _append_message(self, message): + """Append message to mailbox and return (start, stop) offsets.""" + self._file.seek(0, 2) + before = self._file.tell() + if len(self._toc) == 0 and not self._pending: + # This is the first message, and the _pre_mailbox_hook + # hasn't yet been called. If self._pending is True, + # messages have been removed, so _pre_mailbox_hook must + # have been called already. + self._pre_mailbox_hook(self._file) + try: + self._pre_message_hook(self._file) + offsets = self._install_message(message) + self._post_message_hook(self._file) + except BaseException: + self._file.truncate(before) + raise + self._file.flush() + self._file_length = self._file.tell() # Record current length of mailbox + return offsets + + + +class _mboxMMDF(_singlefileMailbox): + """An mbox or MMDF mailbox.""" + + _mangle_from_ = True + + def get_message(self, key): + """Return a Message representation or raise a KeyError.""" + start, stop = self._lookup(key) + self._file.seek(start) + from_line = self._file.readline().replace(linesep, b'') + string = self._file.read(stop - self._file.tell()) + msg = self._message_factory(string.replace(linesep, b'\n')) + msg.set_from(from_line[5:].decode('ascii')) + return msg + + def get_string(self, key, from_=False): + """Return a string representation or raise a KeyError.""" + return email.message_from_bytes( + self.get_bytes(key, from_)).as_string(unixfrom=from_) + + def get_bytes(self, key, from_=False): + """Return a string representation or raise a KeyError.""" + start, stop = self._lookup(key) + self._file.seek(start) + if not from_: + self._file.readline() + string = self._file.read(stop - self._file.tell()) + return string.replace(linesep, b'\n') + + def get_file(self, key, from_=False): + """Return a file-like representation or raise a KeyError.""" + start, stop = self._lookup(key) + self._file.seek(start) + if not from_: + self._file.readline() + return _PartialFile(self._file, self._file.tell(), stop) + + def _install_message(self, message): + """Format a message and blindly write to self._file.""" + from_line = None + if isinstance(message, str): + message = self._string_to_bytes(message) + if isinstance(message, bytes) and message.startswith(b'From '): + newline = message.find(b'\n') + if newline != -1: + from_line = message[:newline] + message = message[newline + 1:] + else: + from_line = message + message = b'' + elif isinstance(message, _mboxMMDFMessage): + author = message.get_from().encode('ascii') + from_line = b'From ' + author + elif isinstance(message, email.message.Message): + from_line = message.get_unixfrom() # May be None. + if from_line is not None: + from_line = from_line.encode('ascii') + if from_line is None: + from_line = b'From MAILER-DAEMON ' + time.asctime(time.gmtime()).encode() + start = self._file.tell() + self._file.write(from_line + linesep) + self._dump_message(message, self._file, self._mangle_from_) + stop = self._file.tell() + return (start, stop) + + +class mbox(_mboxMMDF): + """A classic mbox mailbox.""" + + _mangle_from_ = True + + # All messages must end in a newline character, and + # _post_message_hooks outputs an empty line between messages. + _append_newline = True + + def __init__(self, path, factory=None, create=True): + """Initialize an mbox mailbox.""" + self._message_factory = mboxMessage + _mboxMMDF.__init__(self, path, factory, create) + + def _post_message_hook(self, f): + """Called after writing each message to file f.""" + f.write(linesep) + + def _generate_toc(self): + """Generate key-to-(start, stop) table of contents.""" + starts, stops = [], [] + last_was_empty = False + self._file.seek(0) + while True: + line_pos = self._file.tell() + line = self._file.readline() + if line.startswith(b'From '): + if len(stops) < len(starts): + if last_was_empty: + stops.append(line_pos - len(linesep)) + else: + # The last line before the "From " line wasn't + # blank, but we consider it a start of a + # message anyway. + stops.append(line_pos) + starts.append(line_pos) + last_was_empty = False + elif not line: + if last_was_empty: + stops.append(line_pos - len(linesep)) + else: + stops.append(line_pos) + break + elif line == linesep: + last_was_empty = True + else: + last_was_empty = False + self._toc = dict(enumerate(zip(starts, stops))) + self._next_key = len(self._toc) + self._file_length = self._file.tell() + + +class MMDF(_mboxMMDF): + """An MMDF mailbox.""" + + def __init__(self, path, factory=None, create=True): + """Initialize an MMDF mailbox.""" + self._message_factory = MMDFMessage + _mboxMMDF.__init__(self, path, factory, create) + + def _pre_message_hook(self, f): + """Called before writing each message to file f.""" + f.write(b'\001\001\001\001' + linesep) + + def _post_message_hook(self, f): + """Called after writing each message to file f.""" + f.write(linesep + b'\001\001\001\001' + linesep) + + def _generate_toc(self): + """Generate key-to-(start, stop) table of contents.""" + starts, stops = [], [] + self._file.seek(0) + next_pos = 0 + while True: + line_pos = next_pos + line = self._file.readline() + next_pos = self._file.tell() + if line.startswith(b'\001\001\001\001' + linesep): + starts.append(next_pos) + while True: + line_pos = next_pos + line = self._file.readline() + next_pos = self._file.tell() + if line == b'\001\001\001\001' + linesep: + stops.append(line_pos - len(linesep)) + break + elif not line: + stops.append(line_pos) + break + elif not line: + break + self._toc = dict(enumerate(zip(starts, stops))) + self._next_key = len(self._toc) + self._file.seek(0, 2) + self._file_length = self._file.tell() + + +class MH(Mailbox): + """An MH mailbox.""" + + def __init__(self, path, factory=None, create=True): + """Initialize an MH instance.""" + Mailbox.__init__(self, path, factory, create) + if not os.path.exists(self._path): + if create: + os.mkdir(self._path, 0o700) + os.close(os.open(os.path.join(self._path, '.mh_sequences'), + os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)) + else: + raise NoSuchMailboxError(self._path) + self._locked = False + + def add(self, message): + """Add message and return assigned key.""" + keys = self.keys() + if len(keys) == 0: + new_key = 1 + else: + new_key = max(keys) + 1 + new_path = os.path.join(self._path, str(new_key)) + f = _create_carefully(new_path) + closed = False + try: + if self._locked: + _lock_file(f) + try: + try: + self._dump_message(message, f) + except BaseException: + # Unlock and close so it can be deleted on Windows + if self._locked: + _unlock_file(f) + _sync_close(f) + closed = True + os.remove(new_path) + raise + if isinstance(message, MHMessage): + self._dump_sequences(message, new_key) + finally: + if self._locked: + _unlock_file(f) + finally: + if not closed: + _sync_close(f) + return new_key + + def remove(self, key): + """Remove the keyed message; raise KeyError if it doesn't exist.""" + path = os.path.join(self._path, str(key)) + try: + f = open(path, 'rb+') + except OSError as e: + if e.errno == errno.ENOENT: + raise KeyError('No message with key: %s' % key) + else: + raise + else: + f.close() + os.remove(path) + + def __setitem__(self, key, message): + """Replace the keyed message; raise KeyError if it doesn't exist.""" + path = os.path.join(self._path, str(key)) + try: + f = open(path, 'rb+') + except OSError as e: + if e.errno == errno.ENOENT: + raise KeyError('No message with key: %s' % key) + else: + raise + try: + if self._locked: + _lock_file(f) + try: + os.close(os.open(path, os.O_WRONLY | os.O_TRUNC)) + self._dump_message(message, f) + if isinstance(message, MHMessage): + self._dump_sequences(message, key) + finally: + if self._locked: + _unlock_file(f) + finally: + _sync_close(f) + + def get_message(self, key): + """Return a Message representation or raise a KeyError.""" + try: + if self._locked: + f = open(os.path.join(self._path, str(key)), 'rb+') + else: + f = open(os.path.join(self._path, str(key)), 'rb') + except OSError as e: + if e.errno == errno.ENOENT: + raise KeyError('No message with key: %s' % key) + else: + raise + with f: + if self._locked: + _lock_file(f) + try: + msg = MHMessage(f) + finally: + if self._locked: + _unlock_file(f) + for name, key_list in self.get_sequences().items(): + if key in key_list: + msg.add_sequence(name) + return msg + + def get_bytes(self, key): + """Return a bytes representation or raise a KeyError.""" + try: + if self._locked: + f = open(os.path.join(self._path, str(key)), 'rb+') + else: + f = open(os.path.join(self._path, str(key)), 'rb') + except OSError as e: + if e.errno == errno.ENOENT: + raise KeyError('No message with key: %s' % key) + else: + raise + with f: + if self._locked: + _lock_file(f) + try: + return f.read().replace(linesep, b'\n') + finally: + if self._locked: + _unlock_file(f) + + def get_file(self, key): + """Return a file-like representation or raise a KeyError.""" + try: + f = open(os.path.join(self._path, str(key)), 'rb') + except OSError as e: + if e.errno == errno.ENOENT: + raise KeyError('No message with key: %s' % key) + else: + raise + return _ProxyFile(f) + + def iterkeys(self): + """Return an iterator over keys.""" + return iter(sorted(int(entry) for entry in os.listdir(self._path) + if entry.isdigit())) + + def __contains__(self, key): + """Return True if the keyed message exists, False otherwise.""" + return os.path.exists(os.path.join(self._path, str(key))) + + def __len__(self): + """Return a count of messages in the mailbox.""" + return len(list(self.iterkeys())) + + def lock(self): + """Lock the mailbox.""" + if not self._locked: + self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+') + _lock_file(self._file) + self._locked = True + + def unlock(self): + """Unlock the mailbox if it is locked.""" + if self._locked: + _unlock_file(self._file) + _sync_close(self._file) + del self._file + self._locked = False + + def flush(self): + """Write any pending changes to the disk.""" + return + + def close(self): + """Flush and close the mailbox.""" + if self._locked: + self.unlock() + + def list_folders(self): + """Return a list of folder names.""" + result = [] + for entry in os.listdir(self._path): + if os.path.isdir(os.path.join(self._path, entry)): + result.append(entry) + return result + + def get_folder(self, folder): + """Return an MH instance for the named folder.""" + return MH(os.path.join(self._path, folder), + factory=self._factory, create=False) + + def add_folder(self, folder): + """Create a folder and return an MH instance representing it.""" + return MH(os.path.join(self._path, folder), + factory=self._factory) + + def remove_folder(self, folder): + """Delete the named folder, which must be empty.""" + path = os.path.join(self._path, folder) + entries = os.listdir(path) + if entries == ['.mh_sequences']: + os.remove(os.path.join(path, '.mh_sequences')) + elif entries == []: + pass + else: + raise NotEmptyError('Folder not empty: %s' % self._path) + os.rmdir(path) + + def get_sequences(self): + """Return a name-to-key-list dictionary to define each sequence.""" + results = {} + with open(os.path.join(self._path, '.mh_sequences'), 'r', encoding='ASCII') as f: + all_keys = set(self.keys()) + for line in f: + try: + name, contents = line.split(':') + keys = set() + for spec in contents.split(): + if spec.isdigit(): + keys.add(int(spec)) + else: + start, stop = (int(x) for x in spec.split('-')) + keys.update(range(start, stop + 1)) + results[name] = [key for key in sorted(keys) \ + if key in all_keys] + if len(results[name]) == 0: + del results[name] + except ValueError: + raise FormatError('Invalid sequence specification: %s' % + line.rstrip()) + return results + + def set_sequences(self, sequences): + """Set sequences using the given name-to-key-list dictionary.""" + f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII') + try: + os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC)) + for name, keys in sequences.items(): + if len(keys) == 0: + continue + f.write(name + ':') + prev = None + completing = False + for key in sorted(set(keys)): + if key - 1 == prev: + if not completing: + completing = True + f.write('-') + elif completing: + completing = False + f.write('%s %s' % (prev, key)) + else: + f.write(' %s' % key) + prev = key + if completing: + f.write(str(prev) + '\n') + else: + f.write('\n') + finally: + _sync_close(f) + + def pack(self): + """Re-name messages to eliminate numbering gaps. Invalidates keys.""" + sequences = self.get_sequences() + prev = 0 + changes = [] + for key in self.iterkeys(): + if key - 1 != prev: + changes.append((key, prev + 1)) + try: + os.link(os.path.join(self._path, str(key)), + os.path.join(self._path, str(prev + 1))) + except (AttributeError, PermissionError): + os.rename(os.path.join(self._path, str(key)), + os.path.join(self._path, str(prev + 1))) + else: + os.unlink(os.path.join(self._path, str(key))) + prev += 1 + self._next_key = prev + 1 + if len(changes) == 0: + return + for name, key_list in sequences.items(): + for old, new in changes: + if old in key_list: + key_list[key_list.index(old)] = new + self.set_sequences(sequences) + + def _dump_sequences(self, message, key): + """Inspect a new MHMessage and update sequences appropriately.""" + pending_sequences = message.get_sequences() + all_sequences = self.get_sequences() + for name, key_list in all_sequences.items(): + if name in pending_sequences: + key_list.append(key) + elif key in key_list: + del key_list[key_list.index(key)] + for sequence in pending_sequences: + if sequence not in all_sequences: + all_sequences[sequence] = [key] + self.set_sequences(all_sequences) + + +class Babyl(_singlefileMailbox): + """An Rmail-style Babyl mailbox.""" + + _special_labels = frozenset({'unseen', 'deleted', 'filed', 'answered', + 'forwarded', 'edited', 'resent'}) + + def __init__(self, path, factory=None, create=True): + """Initialize a Babyl mailbox.""" + _singlefileMailbox.__init__(self, path, factory, create) + self._labels = {} + + def add(self, message): + """Add message and return assigned key.""" + key = _singlefileMailbox.add(self, message) + if isinstance(message, BabylMessage): + self._labels[key] = message.get_labels() + return key + + def remove(self, key): + """Remove the keyed message; raise KeyError if it doesn't exist.""" + _singlefileMailbox.remove(self, key) + if key in self._labels: + del self._labels[key] + + def __setitem__(self, key, message): + """Replace the keyed message; raise KeyError if it doesn't exist.""" + _singlefileMailbox.__setitem__(self, key, message) + if isinstance(message, BabylMessage): + self._labels[key] = message.get_labels() + + def get_message(self, key): + """Return a Message representation or raise a KeyError.""" + start, stop = self._lookup(key) + self._file.seek(start) + self._file.readline() # Skip b'1,' line specifying labels. + original_headers = io.BytesIO() + while True: + line = self._file.readline() + if line == b'*** EOOH ***' + linesep or not line: + break + original_headers.write(line.replace(linesep, b'\n')) + visible_headers = io.BytesIO() + while True: + line = self._file.readline() + if line == linesep or not line: + break + visible_headers.write(line.replace(linesep, b'\n')) + # Read up to the stop, or to the end + n = stop - self._file.tell() + assert n >= 0 + body = self._file.read(n) + body = body.replace(linesep, b'\n') + msg = BabylMessage(original_headers.getvalue() + body) + msg.set_visible(visible_headers.getvalue()) + if key in self._labels: + msg.set_labels(self._labels[key]) + return msg + + def get_bytes(self, key): + """Return a string representation or raise a KeyError.""" + start, stop = self._lookup(key) + self._file.seek(start) + self._file.readline() # Skip b'1,' line specifying labels. + original_headers = io.BytesIO() + while True: + line = self._file.readline() + if line == b'*** EOOH ***' + linesep or not line: + break + original_headers.write(line.replace(linesep, b'\n')) + while True: + line = self._file.readline() + if line == linesep or not line: + break + headers = original_headers.getvalue() + n = stop - self._file.tell() + assert n >= 0 + data = self._file.read(n) + data = data.replace(linesep, b'\n') + return headers + data + + def get_file(self, key): + """Return a file-like representation or raise a KeyError.""" + return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep)) + + def get_labels(self): + """Return a list of user-defined labels in the mailbox.""" + self._lookup() + labels = set() + for label_list in self._labels.values(): + labels.update(label_list) + labels.difference_update(self._special_labels) + return list(labels) + + def _generate_toc(self): + """Generate key-to-(start, stop) table of contents.""" + starts, stops = [], [] + self._file.seek(0) + next_pos = 0 + label_lists = [] + while True: + line_pos = next_pos + line = self._file.readline() + next_pos = self._file.tell() + if line == b'\037\014' + linesep: + if len(stops) < len(starts): + stops.append(line_pos - len(linesep)) + starts.append(next_pos) + labels = [label.strip() for label + in self._file.readline()[1:].split(b',') + if label.strip()] + label_lists.append(labels) + elif line == b'\037' or line == b'\037' + linesep: + if len(stops) < len(starts): + stops.append(line_pos - len(linesep)) + elif not line: + stops.append(line_pos - len(linesep)) + break + self._toc = dict(enumerate(zip(starts, stops))) + self._labels = dict(enumerate(label_lists)) + self._next_key = len(self._toc) + self._file.seek(0, 2) + self._file_length = self._file.tell() + + def _pre_mailbox_hook(self, f): + """Called before writing the mailbox to file f.""" + babyl = b'BABYL OPTIONS:' + linesep + babyl += b'Version: 5' + linesep + labels = self.get_labels() + labels = (label.encode() for label in labels) + babyl += b'Labels:' + b','.join(labels) + linesep + babyl += b'\037' + f.write(babyl) + + def _pre_message_hook(self, f): + """Called before writing each message to file f.""" + f.write(b'\014' + linesep) + + def _post_message_hook(self, f): + """Called after writing each message to file f.""" + f.write(linesep + b'\037') + + def _install_message(self, message): + """Write message contents and return (start, stop).""" + start = self._file.tell() + if isinstance(message, BabylMessage): + special_labels = [] + labels = [] + for label in message.get_labels(): + if label in self._special_labels: + special_labels.append(label) + else: + labels.append(label) + self._file.write(b'1') + for label in special_labels: + self._file.write(b', ' + label.encode()) + self._file.write(b',,') + for label in labels: + self._file.write(b' ' + label.encode() + b',') + self._file.write(linesep) + else: + self._file.write(b'1,,' + linesep) + if isinstance(message, email.message.Message): + orig_buffer = io.BytesIO() + orig_generator = email.generator.BytesGenerator(orig_buffer, False, 0) + orig_generator.flatten(message) + orig_buffer.seek(0) + while True: + line = orig_buffer.readline() + self._file.write(line.replace(b'\n', linesep)) + if line == b'\n' or not line: + break + self._file.write(b'*** EOOH ***' + linesep) + if isinstance(message, BabylMessage): + vis_buffer = io.BytesIO() + vis_generator = email.generator.BytesGenerator(vis_buffer, False, 0) + vis_generator.flatten(message.get_visible()) + while True: + line = vis_buffer.readline() + self._file.write(line.replace(b'\n', linesep)) + if line == b'\n' or not line: + break + else: + orig_buffer.seek(0) + while True: + line = orig_buffer.readline() + self._file.write(line.replace(b'\n', linesep)) + if line == b'\n' or not line: + break + while True: + buffer = orig_buffer.read(4096) # Buffer size is arbitrary. + if not buffer: + break + self._file.write(buffer.replace(b'\n', linesep)) + elif isinstance(message, (bytes, str, io.StringIO)): + if isinstance(message, io.StringIO): + warnings.warn("Use of StringIO input is deprecated, " + "use BytesIO instead", DeprecationWarning, 3) + message = message.getvalue() + if isinstance(message, str): + message = self._string_to_bytes(message) + body_start = message.find(b'\n\n') + 2 + if body_start - 2 != -1: + self._file.write(message[:body_start].replace(b'\n', linesep)) + self._file.write(b'*** EOOH ***' + linesep) + self._file.write(message[:body_start].replace(b'\n', linesep)) + self._file.write(message[body_start:].replace(b'\n', linesep)) + else: + self._file.write(b'*** EOOH ***' + linesep + linesep) + self._file.write(message.replace(b'\n', linesep)) + elif hasattr(message, 'readline'): + if hasattr(message, 'buffer'): + warnings.warn("Use of text mode files is deprecated, " + "use a binary mode file instead", DeprecationWarning, 3) + message = message.buffer + original_pos = message.tell() + first_pass = True + while True: + line = message.readline() + # Universal newline support. + if line.endswith(b'\r\n'): + line = line[:-2] + b'\n' + elif line.endswith(b'\r'): + line = line[:-1] + b'\n' + self._file.write(line.replace(b'\n', linesep)) + if line == b'\n' or not line: + if first_pass: + first_pass = False + self._file.write(b'*** EOOH ***' + linesep) + message.seek(original_pos) + else: + break + while True: + line = message.readline() + if not line: + break + # Universal newline support. + if line.endswith(b'\r\n'): + line = line[:-2] + linesep + elif line.endswith(b'\r'): + line = line[:-1] + linesep + elif line.endswith(b'\n'): + line = line[:-1] + linesep + self._file.write(line) + else: + raise TypeError('Invalid message type: %s' % type(message)) + stop = self._file.tell() + return (start, stop) + + +class Message(email.message.Message): + """Message with mailbox-format-specific properties.""" + + def __init__(self, message=None): + """Initialize a Message instance.""" + if isinstance(message, email.message.Message): + self._become_message(copy.deepcopy(message)) + if isinstance(message, Message): + message._explain_to(self) + elif isinstance(message, bytes): + self._become_message(email.message_from_bytes(message)) + elif isinstance(message, str): + self._become_message(email.message_from_string(message)) + elif isinstance(message, io.TextIOWrapper): + self._become_message(email.message_from_file(message)) + elif hasattr(message, "read"): + self._become_message(email.message_from_binary_file(message)) + elif message is None: + email.message.Message.__init__(self) + else: + raise TypeError('Invalid message type: %s' % type(message)) + + def _become_message(self, message): + """Assume the non-format-specific state of message.""" + type_specific = getattr(message, '_type_specific_attributes', []) + for name in message.__dict__: + if name not in type_specific: + self.__dict__[name] = message.__dict__[name] + + def _explain_to(self, message): + """Copy format-specific state to message insofar as possible.""" + if isinstance(message, Message): + return # There's nothing format-specific to explain. + else: + raise TypeError('Cannot convert to specified type') + + +class MaildirMessage(Message): + """Message with Maildir-specific properties.""" + + _type_specific_attributes = ['_subdir', '_info', '_date'] + + def __init__(self, message=None): + """Initialize a MaildirMessage instance.""" + self._subdir = 'new' + self._info = '' + self._date = time.time() + Message.__init__(self, message) + + def get_subdir(self): + """Return 'new' or 'cur'.""" + return self._subdir + + def set_subdir(self, subdir): + """Set subdir to 'new' or 'cur'.""" + if subdir == 'new' or subdir == 'cur': + self._subdir = subdir + else: + raise ValueError("subdir must be 'new' or 'cur': %s" % subdir) + + def get_flags(self): + """Return as a string the flags that are set.""" + if self._info.startswith('2,'): + return self._info[2:] + else: + return '' + + def set_flags(self, flags): + """Set the given flags and unset all others.""" + self._info = '2,' + ''.join(sorted(flags)) + + def add_flag(self, flag): + """Set the given flag(s) without changing others.""" + self.set_flags(''.join(set(self.get_flags()) | set(flag))) + + def remove_flag(self, flag): + """Unset the given string flag(s) without changing others.""" + if self.get_flags(): + self.set_flags(''.join(set(self.get_flags()) - set(flag))) + + def get_date(self): + """Return delivery date of message, in seconds since the epoch.""" + return self._date + + def set_date(self, date): + """Set delivery date of message, in seconds since the epoch.""" + try: + self._date = float(date) + except ValueError: + raise TypeError("can't convert to float: %s" % date) from None + + def get_info(self): + """Get the message's "info" as a string.""" + return self._info + + def set_info(self, info): + """Set the message's "info" string.""" + if isinstance(info, str): + self._info = info + else: + raise TypeError('info must be a string: %s' % type(info)) + + def _explain_to(self, message): + """Copy Maildir-specific state to message insofar as possible.""" + if isinstance(message, MaildirMessage): + message.set_flags(self.get_flags()) + message.set_subdir(self.get_subdir()) + message.set_date(self.get_date()) + elif isinstance(message, _mboxMMDFMessage): + flags = set(self.get_flags()) + if 'S' in flags: + message.add_flag('R') + if self.get_subdir() == 'cur': + message.add_flag('O') + if 'T' in flags: + message.add_flag('D') + if 'F' in flags: + message.add_flag('F') + if 'R' in flags: + message.add_flag('A') + message.set_from('MAILER-DAEMON', time.gmtime(self.get_date())) + elif isinstance(message, MHMessage): + flags = set(self.get_flags()) + if 'S' not in flags: + message.add_sequence('unseen') + if 'R' in flags: + message.add_sequence('replied') + if 'F' in flags: + message.add_sequence('flagged') + elif isinstance(message, BabylMessage): + flags = set(self.get_flags()) + if 'S' not in flags: + message.add_label('unseen') + if 'T' in flags: + message.add_label('deleted') + if 'R' in flags: + message.add_label('answered') + if 'P' in flags: + message.add_label('forwarded') + elif isinstance(message, Message): + pass + else: + raise TypeError('Cannot convert to specified type: %s' % + type(message)) + + +class _mboxMMDFMessage(Message): + """Message with mbox- or MMDF-specific properties.""" + + _type_specific_attributes = ['_from'] + + def __init__(self, message=None): + """Initialize an mboxMMDFMessage instance.""" + self.set_from('MAILER-DAEMON', True) + if isinstance(message, email.message.Message): + unixfrom = message.get_unixfrom() + if unixfrom is not None and unixfrom.startswith('From '): + self.set_from(unixfrom[5:]) + Message.__init__(self, message) + + def get_from(self): + """Return contents of "From " line.""" + return self._from + + def set_from(self, from_, time_=None): + """Set "From " line, formatting and appending time_ if specified.""" + if time_ is not None: + if time_ is True: + time_ = time.gmtime() + from_ += ' ' + time.asctime(time_) + self._from = from_ + + def get_flags(self): + """Return as a string the flags that are set.""" + return self.get('Status', '') + self.get('X-Status', '') + + def set_flags(self, flags): + """Set the given flags and unset all others.""" + flags = set(flags) + status_flags, xstatus_flags = '', '' + for flag in ('R', 'O'): + if flag in flags: + status_flags += flag + flags.remove(flag) + for flag in ('D', 'F', 'A'): + if flag in flags: + xstatus_flags += flag + flags.remove(flag) + xstatus_flags += ''.join(sorted(flags)) + try: + self.replace_header('Status', status_flags) + except KeyError: + self.add_header('Status', status_flags) + try: + self.replace_header('X-Status', xstatus_flags) + except KeyError: + self.add_header('X-Status', xstatus_flags) + + def add_flag(self, flag): + """Set the given flag(s) without changing others.""" + self.set_flags(''.join(set(self.get_flags()) | set(flag))) + + def remove_flag(self, flag): + """Unset the given string flag(s) without changing others.""" + if 'Status' in self or 'X-Status' in self: + self.set_flags(''.join(set(self.get_flags()) - set(flag))) + + def _explain_to(self, message): + """Copy mbox- or MMDF-specific state to message insofar as possible.""" + if isinstance(message, MaildirMessage): + flags = set(self.get_flags()) + if 'O' in flags: + message.set_subdir('cur') + if 'F' in flags: + message.add_flag('F') + if 'A' in flags: + message.add_flag('R') + if 'R' in flags: + message.add_flag('S') + if 'D' in flags: + message.add_flag('T') + del message['status'] + del message['x-status'] + maybe_date = ' '.join(self.get_from().split()[-5:]) + try: + message.set_date(calendar.timegm(time.strptime(maybe_date, + '%a %b %d %H:%M:%S %Y'))) + except (ValueError, OverflowError): + pass + elif isinstance(message, _mboxMMDFMessage): + message.set_flags(self.get_flags()) + message.set_from(self.get_from()) + elif isinstance(message, MHMessage): + flags = set(self.get_flags()) + if 'R' not in flags: + message.add_sequence('unseen') + if 'A' in flags: + message.add_sequence('replied') + if 'F' in flags: + message.add_sequence('flagged') + del message['status'] + del message['x-status'] + elif isinstance(message, BabylMessage): + flags = set(self.get_flags()) + if 'R' not in flags: + message.add_label('unseen') + if 'D' in flags: + message.add_label('deleted') + if 'A' in flags: + message.add_label('answered') + del message['status'] + del message['x-status'] + elif isinstance(message, Message): + pass + else: + raise TypeError('Cannot convert to specified type: %s' % + type(message)) + + +class mboxMessage(_mboxMMDFMessage): + """Message with mbox-specific properties.""" + + +class MHMessage(Message): + """Message with MH-specific properties.""" + + _type_specific_attributes = ['_sequences'] + + def __init__(self, message=None): + """Initialize an MHMessage instance.""" + self._sequences = [] + Message.__init__(self, message) + + def get_sequences(self): + """Return a list of sequences that include the message.""" + return self._sequences[:] + + def set_sequences(self, sequences): + """Set the list of sequences that include the message.""" + self._sequences = list(sequences) + + def add_sequence(self, sequence): + """Add sequence to list of sequences including the message.""" + if isinstance(sequence, str): + if not sequence in self._sequences: + self._sequences.append(sequence) + else: + raise TypeError('sequence type must be str: %s' % type(sequence)) + + def remove_sequence(self, sequence): + """Remove sequence from the list of sequences including the message.""" + try: + self._sequences.remove(sequence) + except ValueError: + pass + + def _explain_to(self, message): + """Copy MH-specific state to message insofar as possible.""" + if isinstance(message, MaildirMessage): + sequences = set(self.get_sequences()) + if 'unseen' in sequences: + message.set_subdir('cur') + else: + message.set_subdir('cur') + message.add_flag('S') + if 'flagged' in sequences: + message.add_flag('F') + if 'replied' in sequences: + message.add_flag('R') + elif isinstance(message, _mboxMMDFMessage): + sequences = set(self.get_sequences()) + if 'unseen' not in sequences: + message.add_flag('RO') + else: + message.add_flag('O') + if 'flagged' in sequences: + message.add_flag('F') + if 'replied' in sequences: + message.add_flag('A') + elif isinstance(message, MHMessage): + for sequence in self.get_sequences(): + message.add_sequence(sequence) + elif isinstance(message, BabylMessage): + sequences = set(self.get_sequences()) + if 'unseen' in sequences: + message.add_label('unseen') + if 'replied' in sequences: + message.add_label('answered') + elif isinstance(message, Message): + pass + else: + raise TypeError('Cannot convert to specified type: %s' % + type(message)) + + +class BabylMessage(Message): + """Message with Babyl-specific properties.""" + + _type_specific_attributes = ['_labels', '_visible'] + + def __init__(self, message=None): + """Initialize a BabylMessage instance.""" + self._labels = [] + self._visible = Message() + Message.__init__(self, message) + + def get_labels(self): + """Return a list of labels on the message.""" + return self._labels[:] + + def set_labels(self, labels): + """Set the list of labels on the message.""" + self._labels = list(labels) + + def add_label(self, label): + """Add label to list of labels on the message.""" + if isinstance(label, str): + if label not in self._labels: + self._labels.append(label) + else: + raise TypeError('label must be a string: %s' % type(label)) + + def remove_label(self, label): + """Remove label from the list of labels on the message.""" + try: + self._labels.remove(label) + except ValueError: + pass + + def get_visible(self): + """Return a Message representation of visible headers.""" + return Message(self._visible) + + def set_visible(self, visible): + """Set the Message representation of visible headers.""" + self._visible = Message(visible) + + def update_visible(self): + """Update and/or sensibly generate a set of visible headers.""" + for header in self._visible.keys(): + if header in self: + self._visible.replace_header(header, self[header]) + else: + del self._visible[header] + for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'): + if header in self and header not in self._visible: + self._visible[header] = self[header] + + def _explain_to(self, message): + """Copy Babyl-specific state to message insofar as possible.""" + if isinstance(message, MaildirMessage): + labels = set(self.get_labels()) + if 'unseen' in labels: + message.set_subdir('cur') + else: + message.set_subdir('cur') + message.add_flag('S') + if 'forwarded' in labels or 'resent' in labels: + message.add_flag('P') + if 'answered' in labels: + message.add_flag('R') + if 'deleted' in labels: + message.add_flag('T') + elif isinstance(message, _mboxMMDFMessage): + labels = set(self.get_labels()) + if 'unseen' not in labels: + message.add_flag('RO') + else: + message.add_flag('O') + if 'deleted' in labels: + message.add_flag('D') + if 'answered' in labels: + message.add_flag('A') + elif isinstance(message, MHMessage): + labels = set(self.get_labels()) + if 'unseen' in labels: + message.add_sequence('unseen') + if 'answered' in labels: + message.add_sequence('replied') + elif isinstance(message, BabylMessage): + message.set_visible(self.get_visible()) + for label in self.get_labels(): + message.add_label(label) + elif isinstance(message, Message): + pass + else: + raise TypeError('Cannot convert to specified type: %s' % + type(message)) + + +class MMDFMessage(_mboxMMDFMessage): + """Message with MMDF-specific properties.""" + + +class _ProxyFile: + """A read-only wrapper of a file.""" + + def __init__(self, f, pos=None): + """Initialize a _ProxyFile.""" + self._file = f + if pos is None: + self._pos = f.tell() + else: + self._pos = pos + + def read(self, size=None): + """Read bytes.""" + return self._read(size, self._file.read) + + def read1(self, size=None): + """Read bytes.""" + return self._read(size, self._file.read1) + + def readline(self, size=None): + """Read a line.""" + return self._read(size, self._file.readline) + + def readlines(self, sizehint=None): + """Read multiple lines.""" + result = [] + for line in self: + result.append(line) + if sizehint is not None: + sizehint -= len(line) + if sizehint <= 0: + break + return result + + def __iter__(self): + """Iterate over lines.""" + while True: + line = self.readline() + if not line: + return + yield line + + def tell(self): + """Return the position.""" + return self._pos + + def seek(self, offset, whence=0): + """Change position.""" + if whence == 1: + self._file.seek(self._pos) + self._file.seek(offset, whence) + self._pos = self._file.tell() + + def close(self): + """Close the file.""" + if hasattr(self, '_file'): + try: + if hasattr(self._file, 'close'): + self._file.close() + finally: + del self._file + + def _read(self, size, read_method): + """Read size bytes using read_method.""" + if size is None: + size = -1 + self._file.seek(self._pos) + result = read_method(size) + self._pos = self._file.tell() + return result + + def __enter__(self): + """Context management protocol support.""" + return self + + def __exit__(self, *exc): + self.close() + + def readable(self): + return self._file.readable() + + def writable(self): + return self._file.writable() + + def seekable(self): + return self._file.seekable() + + def flush(self): + return self._file.flush() + + @property + def closed(self): + if not hasattr(self, '_file'): + return True + if not hasattr(self._file, 'closed'): + return False + return self._file.closed + + __class_getitem__ = classmethod(GenericAlias) + + +class _PartialFile(_ProxyFile): + """A read-only wrapper of part of a file.""" + + def __init__(self, f, start=None, stop=None): + """Initialize a _PartialFile.""" + _ProxyFile.__init__(self, f, start) + self._start = start + self._stop = stop + + def tell(self): + """Return the position with respect to start.""" + return _ProxyFile.tell(self) - self._start + + def seek(self, offset, whence=0): + """Change position, possibly with respect to start or stop.""" + if whence == 0: + self._pos = self._start + whence = 1 + elif whence == 2: + self._pos = self._stop + whence = 1 + _ProxyFile.seek(self, offset, whence) + + def _read(self, size, read_method): + """Read size bytes using read_method, honoring start and stop.""" + remaining = self._stop - self._pos + if remaining <= 0: + return b'' + if size is None or size < 0 or size > remaining: + size = remaining + return _ProxyFile._read(self, size, read_method) + + def close(self): + # do *not* close the underlying file object for partial files, + # since it's global to the mailbox object + if hasattr(self, '_file'): + del self._file + + +def _lock_file(f, dotlock=True): + """Lock file f using lockf and dot locking.""" + dotlock_done = False + try: + if fcntl: + try: + fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as e: + if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS): + raise ExternalClashError('lockf: lock unavailable: %s' % + f.name) + else: + raise + if dotlock: + try: + pre_lock = _create_temporary(f.name + '.lock') + pre_lock.close() + except OSError as e: + if e.errno in (errno.EACCES, errno.EROFS): + return # Without write access, just skip dotlocking. + else: + raise + try: + try: + os.link(pre_lock.name, f.name + '.lock') + dotlock_done = True + except (AttributeError, PermissionError): + os.rename(pre_lock.name, f.name + '.lock') + dotlock_done = True + else: + os.unlink(pre_lock.name) + except FileExistsError: + os.remove(pre_lock.name) + raise ExternalClashError('dot lock unavailable: %s' % + f.name) + except: + if fcntl: + fcntl.lockf(f, fcntl.LOCK_UN) + if dotlock_done: + os.remove(f.name + '.lock') + raise + +def _unlock_file(f): + """Unlock file f using lockf and dot locking.""" + if fcntl: + fcntl.lockf(f, fcntl.LOCK_UN) + if os.path.exists(f.name + '.lock'): + os.remove(f.name + '.lock') + +def _create_carefully(path): + """Create a file if it doesn't exist and open for reading and writing.""" + fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o666) + try: + return open(path, 'rb+') + finally: + os.close(fd) + +def _create_temporary(path): + """Create a temp file based on path and open for reading and writing.""" + return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), + socket.gethostname(), + os.getpid())) + +def _sync_flush(f): + """Ensure changes to file f are physically on disk.""" + f.flush() + if hasattr(os, 'fsync'): + os.fsync(f.fileno()) + +def _sync_close(f): + """Close file f, ensuring all changes are physically on disk.""" + _sync_flush(f) + f.close() + + +class Error(Exception): + """Raised for module-specific errors.""" + +class NoSuchMailboxError(Error): + """The specified mailbox does not exist and won't be created.""" + +class NotEmptyError(Error): + """The specified mailbox is not empty and deletion was requested.""" + +class ExternalClashError(Error): + """Another process caused an action to fail.""" + +class FormatError(Error): + """A file appears to have an invalid format.""" diff --git a/pllava/lib/python3.10/modulefinder.py b/pllava/lib/python3.10/modulefinder.py new file mode 100644 index 0000000000000000000000000000000000000000..cb455f40c4d7894ef73ab25bed6659e917565394 --- /dev/null +++ b/pllava/lib/python3.10/modulefinder.py @@ -0,0 +1,685 @@ +"""Find modules used by a script, using introspection.""" + +import dis +import importlib._bootstrap_external +import importlib.machinery +import marshal +import os +import io +import sys + + +LOAD_CONST = dis.opmap['LOAD_CONST'] +IMPORT_NAME = dis.opmap['IMPORT_NAME'] +STORE_NAME = dis.opmap['STORE_NAME'] +STORE_GLOBAL = dis.opmap['STORE_GLOBAL'] +STORE_OPS = STORE_NAME, STORE_GLOBAL +EXTENDED_ARG = dis.EXTENDED_ARG + +# Old imp constants: + +_SEARCH_ERROR = 0 +_PY_SOURCE = 1 +_PY_COMPILED = 2 +_C_EXTENSION = 3 +_PKG_DIRECTORY = 5 +_C_BUILTIN = 6 +_PY_FROZEN = 7 + +# Modulefinder does a good job at simulating Python's, but it can not +# handle __path__ modifications packages make at runtime. Therefore there +# is a mechanism whereby you can register extra paths in this map for a +# package, and it will be honored. + +# Note this is a mapping is lists of paths. +packagePathMap = {} + +# A Public interface +def AddPackagePath(packagename, path): + packagePathMap.setdefault(packagename, []).append(path) + +replacePackageMap = {} + +# This ReplacePackage mechanism allows modulefinder to work around +# situations in which a package injects itself under the name +# of another package into sys.modules at runtime by calling +# ReplacePackage("real_package_name", "faked_package_name") +# before running ModuleFinder. + +def ReplacePackage(oldname, newname): + replacePackageMap[oldname] = newname + + +def _find_module(name, path=None): + """An importlib reimplementation of imp.find_module (for our purposes).""" + + # It's necessary to clear the caches for our Finder first, in case any + # modules are being added/deleted/modified at runtime. In particular, + # test_modulefinder.py changes file tree contents in a cache-breaking way: + + importlib.machinery.PathFinder.invalidate_caches() + + spec = importlib.machinery.PathFinder.find_spec(name, path) + + if spec is None: + raise ImportError("No module named {name!r}".format(name=name), name=name) + + # Some special cases: + + if spec.loader is importlib.machinery.BuiltinImporter: + return None, None, ("", "", _C_BUILTIN) + + if spec.loader is importlib.machinery.FrozenImporter: + return None, None, ("", "", _PY_FROZEN) + + file_path = spec.origin + + if spec.loader.is_package(name): + return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY) + + if isinstance(spec.loader, importlib.machinery.SourceFileLoader): + kind = _PY_SOURCE + + elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader): + kind = _C_EXTENSION + + elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader): + kind = _PY_COMPILED + + else: # Should never happen. + return None, None, ("", "", _SEARCH_ERROR) + + file = io.open_code(file_path) + suffix = os.path.splitext(file_path)[-1] + + return file, file_path, (suffix, "rb", kind) + + +class Module: + + def __init__(self, name, file=None, path=None): + self.__name__ = name + self.__file__ = file + self.__path__ = path + self.__code__ = None + # The set of global names that are assigned to in the module. + # This includes those names imported through starimports of + # Python modules. + self.globalnames = {} + # The set of starimports this module did that could not be + # resolved, ie. a starimport from a non-Python module. + self.starimports = {} + + def __repr__(self): + s = "Module(%r" % (self.__name__,) + if self.__file__ is not None: + s = s + ", %r" % (self.__file__,) + if self.__path__ is not None: + s = s + ", %r" % (self.__path__,) + s = s + ")" + return s + +class ModuleFinder: + + def __init__(self, path=None, debug=0, excludes=None, replace_paths=None): + if path is None: + path = sys.path + self.path = path + self.modules = {} + self.badmodules = {} + self.debug = debug + self.indent = 0 + self.excludes = excludes if excludes is not None else [] + self.replace_paths = replace_paths if replace_paths is not None else [] + self.processed_paths = [] # Used in debugging only + + def msg(self, level, str, *args): + if level <= self.debug: + for i in range(self.indent): + print(" ", end=' ') + print(str, end=' ') + for arg in args: + print(repr(arg), end=' ') + print() + + def msgin(self, *args): + level = args[0] + if level <= self.debug: + self.indent = self.indent + 1 + self.msg(*args) + + def msgout(self, *args): + level = args[0] + if level <= self.debug: + self.indent = self.indent - 1 + self.msg(*args) + + def run_script(self, pathname): + self.msg(2, "run_script", pathname) + with io.open_code(pathname) as fp: + stuff = ("", "rb", _PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) + + def load_file(self, pathname): + dir, name = os.path.split(pathname) + name, ext = os.path.splitext(name) + with io.open_code(pathname) as fp: + stuff = (ext, "rb", _PY_SOURCE) + self.load_module(name, fp, pathname, stuff) + + def import_hook(self, name, caller=None, fromlist=None, level=-1): + self.msg(3, "import_hook", name, caller, fromlist, level) + parent = self.determine_parent(caller, level=level) + q, tail = self.find_head_package(parent, name) + m = self.load_tail(q, tail) + if not fromlist: + return q + if m.__path__: + self.ensure_fromlist(m, fromlist) + return None + + def determine_parent(self, caller, level=-1): + self.msgin(4, "determine_parent", caller, level) + if not caller or level == 0: + self.msgout(4, "determine_parent -> None") + return None + pname = caller.__name__ + if level >= 1: # relative import + if caller.__path__: + level -= 1 + if level == 0: + parent = self.modules[pname] + assert parent is caller + self.msgout(4, "determine_parent ->", parent) + return parent + if pname.count(".") < level: + raise ImportError("relative importpath too deep") + pname = ".".join(pname.split(".")[:-level]) + parent = self.modules[pname] + self.msgout(4, "determine_parent ->", parent) + return parent + if caller.__path__: + parent = self.modules[pname] + assert caller is parent + self.msgout(4, "determine_parent ->", parent) + return parent + if '.' in pname: + i = pname.rfind('.') + pname = pname[:i] + parent = self.modules[pname] + assert parent.__name__ == pname + self.msgout(4, "determine_parent ->", parent) + return parent + self.msgout(4, "determine_parent -> None") + return None + + def find_head_package(self, parent, name): + self.msgin(4, "find_head_package", parent, name) + if '.' in name: + i = name.find('.') + head = name[:i] + tail = name[i+1:] + else: + head = name + tail = "" + if parent: + qname = "%s.%s" % (parent.__name__, head) + else: + qname = head + q = self.import_module(head, qname, parent) + if q: + self.msgout(4, "find_head_package ->", (q, tail)) + return q, tail + if parent: + qname = head + parent = None + q = self.import_module(head, qname, parent) + if q: + self.msgout(4, "find_head_package ->", (q, tail)) + return q, tail + self.msgout(4, "raise ImportError: No module named", qname) + raise ImportError("No module named " + qname) + + def load_tail(self, q, tail): + self.msgin(4, "load_tail", q, tail) + m = q + while tail: + i = tail.find('.') + if i < 0: i = len(tail) + head, tail = tail[:i], tail[i+1:] + mname = "%s.%s" % (m.__name__, head) + m = self.import_module(head, mname, m) + if not m: + self.msgout(4, "raise ImportError: No module named", mname) + raise ImportError("No module named " + mname) + self.msgout(4, "load_tail ->", m) + return m + + def ensure_fromlist(self, m, fromlist, recursive=0): + self.msg(4, "ensure_fromlist", m, fromlist, recursive) + for sub in fromlist: + if sub == "*": + if not recursive: + all = self.find_all_submodules(m) + if all: + self.ensure_fromlist(m, all, 1) + elif not hasattr(m, sub): + subname = "%s.%s" % (m.__name__, sub) + submod = self.import_module(sub, subname, m) + if not submod: + raise ImportError("No module named " + subname) + + def find_all_submodules(self, m): + if not m.__path__: + return + modules = {} + # 'suffixes' used to be a list hardcoded to [".py", ".pyc"]. + # But we must also collect Python extension modules - although + # we cannot separate normal dlls from Python extensions. + suffixes = [] + suffixes += importlib.machinery.EXTENSION_SUFFIXES[:] + suffixes += importlib.machinery.SOURCE_SUFFIXES[:] + suffixes += importlib.machinery.BYTECODE_SUFFIXES[:] + for dir in m.__path__: + try: + names = os.listdir(dir) + except OSError: + self.msg(2, "can't list directory", dir) + continue + for name in names: + mod = None + for suff in suffixes: + n = len(suff) + if name[-n:] == suff: + mod = name[:-n] + break + if mod and mod != "__init__": + modules[mod] = mod + return modules.keys() + + def import_module(self, partname, fqname, parent): + self.msgin(3, "import_module", partname, fqname, parent) + try: + m = self.modules[fqname] + except KeyError: + pass + else: + self.msgout(3, "import_module ->", m) + return m + if fqname in self.badmodules: + self.msgout(3, "import_module -> None") + return None + if parent and parent.__path__ is None: + self.msgout(3, "import_module -> None") + return None + try: + fp, pathname, stuff = self.find_module(partname, + parent and parent.__path__, parent) + except ImportError: + self.msgout(3, "import_module ->", None) + return None + + try: + m = self.load_module(fqname, fp, pathname, stuff) + finally: + if fp: + fp.close() + if parent: + setattr(parent, partname, m) + self.msgout(3, "import_module ->", m) + return m + + def load_module(self, fqname, fp, pathname, file_info): + suffix, mode, type = file_info + self.msgin(2, "load_module", fqname, fp and "fp", pathname) + if type == _PKG_DIRECTORY: + m = self.load_package(fqname, pathname) + self.msgout(2, "load_module ->", m) + return m + if type == _PY_SOURCE: + co = compile(fp.read(), pathname, 'exec') + elif type == _PY_COMPILED: + try: + data = fp.read() + importlib._bootstrap_external._classify_pyc(data, fqname, {}) + except ImportError as exc: + self.msgout(2, "raise ImportError: " + str(exc), pathname) + raise + co = marshal.loads(memoryview(data)[16:]) + else: + co = None + m = self.add_module(fqname) + m.__file__ = pathname + if co: + if self.replace_paths: + co = self.replace_paths_in_code(co) + m.__code__ = co + self.scan_code(co, m) + self.msgout(2, "load_module ->", m) + return m + + def _add_badmodule(self, name, caller): + if name not in self.badmodules: + self.badmodules[name] = {} + if caller: + self.badmodules[name][caller.__name__] = 1 + else: + self.badmodules[name]["-"] = 1 + + def _safe_import_hook(self, name, caller, fromlist, level=-1): + # wrapper for self.import_hook() that won't raise ImportError + if name in self.badmodules: + self._add_badmodule(name, caller) + return + try: + self.import_hook(name, caller, level=level) + except ImportError as msg: + self.msg(2, "ImportError:", str(msg)) + self._add_badmodule(name, caller) + except SyntaxError as msg: + self.msg(2, "SyntaxError:", str(msg)) + self._add_badmodule(name, caller) + else: + if fromlist: + for sub in fromlist: + fullname = name + "." + sub + if fullname in self.badmodules: + self._add_badmodule(fullname, caller) + continue + try: + self.import_hook(name, caller, [sub], level=level) + except ImportError as msg: + self.msg(2, "ImportError:", str(msg)) + self._add_badmodule(fullname, caller) + + def scan_opcodes(self, co): + # Scan the code, and yield 'interesting' opcode combinations + code = co.co_code + names = co.co_names + consts = co.co_consts + opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code) + if op != EXTENDED_ARG] + for i, (op, oparg) in enumerate(opargs): + if op in STORE_OPS: + yield "store", (names[oparg],) + continue + if (op == IMPORT_NAME and i >= 2 + and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST): + level = consts[opargs[i-2][1]] + fromlist = consts[opargs[i-1][1]] + if level == 0: # absolute import + yield "absolute_import", (fromlist, names[oparg]) + else: # relative import + yield "relative_import", (level, fromlist, names[oparg]) + continue + + def scan_code(self, co, m): + code = co.co_code + scanner = self.scan_opcodes + for what, args in scanner(co): + if what == "store": + name, = args + m.globalnames[name] = 1 + elif what == "absolute_import": + fromlist, name = args + have_star = 0 + if fromlist is not None: + if "*" in fromlist: + have_star = 1 + fromlist = [f for f in fromlist if f != "*"] + self._safe_import_hook(name, m, fromlist, level=0) + if have_star: + # We've encountered an "import *". If it is a Python module, + # the code has already been parsed and we can suck out the + # global names. + mm = None + if m.__path__: + # At this point we don't know whether 'name' is a + # submodule of 'm' or a global module. Let's just try + # the full name first. + mm = self.modules.get(m.__name__ + "." + name) + if mm is None: + mm = self.modules.get(name) + if mm is not None: + m.globalnames.update(mm.globalnames) + m.starimports.update(mm.starimports) + if mm.__code__ is None: + m.starimports[name] = 1 + else: + m.starimports[name] = 1 + elif what == "relative_import": + level, fromlist, name = args + if name: + self._safe_import_hook(name, m, fromlist, level=level) + else: + parent = self.determine_parent(m, level=level) + self._safe_import_hook(parent.__name__, None, fromlist, level=0) + else: + # We don't expect anything else from the generator. + raise RuntimeError(what) + + for c in co.co_consts: + if isinstance(c, type(co)): + self.scan_code(c, m) + + def load_package(self, fqname, pathname): + self.msgin(2, "load_package", fqname, pathname) + newname = replacePackageMap.get(fqname) + if newname: + fqname = newname + m = self.add_module(fqname) + m.__file__ = pathname + m.__path__ = [pathname] + + # As per comment at top of file, simulate runtime __path__ additions. + m.__path__ = m.__path__ + packagePathMap.get(fqname, []) + + fp, buf, stuff = self.find_module("__init__", m.__path__) + try: + self.load_module(fqname, fp, buf, stuff) + self.msgout(2, "load_package ->", m) + return m + finally: + if fp: + fp.close() + + def add_module(self, fqname): + if fqname in self.modules: + return self.modules[fqname] + self.modules[fqname] = m = Module(fqname) + return m + + def find_module(self, name, path, parent=None): + if parent is not None: + # assert path is not None + fullname = parent.__name__+'.'+name + else: + fullname = name + if fullname in self.excludes: + self.msgout(3, "find_module -> Excluded", fullname) + raise ImportError(name) + + if path is None: + if name in sys.builtin_module_names: + return (None, None, ("", "", _C_BUILTIN)) + + path = self.path + + return _find_module(name, path) + + def report(self): + """Print a report to stdout, listing the found modules with their + paths, as well as modules that are missing, or seem to be missing. + """ + print() + print(" %-25s %s" % ("Name", "File")) + print(" %-25s %s" % ("----", "----")) + # Print modules found + keys = sorted(self.modules.keys()) + for key in keys: + m = self.modules[key] + if m.__path__: + print("P", end=' ') + else: + print("m", end=' ') + print("%-25s" % key, m.__file__ or "") + + # Print missing modules + missing, maybe = self.any_missing_maybe() + if missing: + print() + print("Missing modules:") + for name in missing: + mods = sorted(self.badmodules[name].keys()) + print("?", name, "imported from", ', '.join(mods)) + # Print modules that may be missing, but then again, maybe not... + if maybe: + print() + print("Submodules that appear to be missing, but could also be", end=' ') + print("global names in the parent package:") + for name in maybe: + mods = sorted(self.badmodules[name].keys()) + print("?", name, "imported from", ', '.join(mods)) + + def any_missing(self): + """Return a list of modules that appear to be missing. Use + any_missing_maybe() if you want to know which modules are + certain to be missing, and which *may* be missing. + """ + missing, maybe = self.any_missing_maybe() + return missing + maybe + + def any_missing_maybe(self): + """Return two lists, one with modules that are certainly missing + and one with modules that *may* be missing. The latter names could + either be submodules *or* just global names in the package. + + The reason it can't always be determined is that it's impossible to + tell which names are imported when "from module import *" is done + with an extension module, short of actually importing it. + """ + missing = [] + maybe = [] + for name in self.badmodules: + if name in self.excludes: + continue + i = name.rfind(".") + if i < 0: + missing.append(name) + continue + subname = name[i+1:] + pkgname = name[:i] + pkg = self.modules.get(pkgname) + if pkg is not None: + if pkgname in self.badmodules[name]: + # The package tried to import this module itself and + # failed. It's definitely missing. + missing.append(name) + elif subname in pkg.globalnames: + # It's a global in the package: definitely not missing. + pass + elif pkg.starimports: + # It could be missing, but the package did an "import *" + # from a non-Python module, so we simply can't be sure. + maybe.append(name) + else: + # It's not a global in the package, the package didn't + # do funny star imports, it's very likely to be missing. + # The symbol could be inserted into the package from the + # outside, but since that's not good style we simply list + # it missing. + missing.append(name) + else: + missing.append(name) + missing.sort() + maybe.sort() + return missing, maybe + + def replace_paths_in_code(self, co): + new_filename = original_filename = os.path.normpath(co.co_filename) + for f, r in self.replace_paths: + if original_filename.startswith(f): + new_filename = r + original_filename[len(f):] + break + + if self.debug and original_filename not in self.processed_paths: + if new_filename != original_filename: + self.msgout(2, "co_filename %r changed to %r" \ + % (original_filename,new_filename,)) + else: + self.msgout(2, "co_filename %r remains unchanged" \ + % (original_filename,)) + self.processed_paths.append(original_filename) + + consts = list(co.co_consts) + for i in range(len(consts)): + if isinstance(consts[i], type(co)): + consts[i] = self.replace_paths_in_code(consts[i]) + + return co.replace(co_consts=tuple(consts), co_filename=new_filename) + + +def test(): + # Parse command line + import getopt + try: + opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:") + except getopt.error as msg: + print(msg) + return + + # Process options + debug = 1 + domods = 0 + addpath = [] + exclude = [] + for o, a in opts: + if o == '-d': + debug = debug + 1 + if o == '-m': + domods = 1 + if o == '-p': + addpath = addpath + a.split(os.pathsep) + if o == '-q': + debug = 0 + if o == '-x': + exclude.append(a) + + # Provide default arguments + if not args: + script = "hello.py" + else: + script = args[0] + + # Set the path based on sys.path and the script directory + path = sys.path[:] + path[0] = os.path.dirname(script) + path = addpath + path + if debug > 1: + print("path:") + for item in path: + print(" ", repr(item)) + + # Create the module finder and turn its crank + mf = ModuleFinder(path, debug, exclude) + for arg in args[1:]: + if arg == '-m': + domods = 1 + continue + if domods: + if arg[-2:] == '.*': + mf.import_hook(arg[:-2], None, ["*"]) + else: + mf.import_hook(arg) + else: + mf.load_file(arg) + mf.run_script(script) + mf.report() + return mf # for -i debugging + + +if __name__ == '__main__': + try: + mf = test() + except KeyboardInterrupt: + print("\n[interrupted]") diff --git a/pllava/lib/python3.10/netrc.py b/pllava/lib/python3.10/netrc.py new file mode 100644 index 0000000000000000000000000000000000000000..734d94c8a628535274a58a84c272a9e4924434e5 --- /dev/null +++ b/pllava/lib/python3.10/netrc.py @@ -0,0 +1,143 @@ +"""An object-oriented interface to .netrc files.""" + +# Module and documentation by Eric S. Raymond, 21 Dec 1998 + +import os, shlex, stat + +__all__ = ["netrc", "NetrcParseError"] + + +class NetrcParseError(Exception): + """Exception raised on syntax errors in the .netrc file.""" + def __init__(self, msg, filename=None, lineno=None): + self.filename = filename + self.lineno = lineno + self.msg = msg + Exception.__init__(self, msg) + + def __str__(self): + return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno) + + +class netrc: + def __init__(self, file=None): + default_netrc = file is None + if file is None: + file = os.path.join(os.path.expanduser("~"), ".netrc") + self.hosts = {} + self.macros = {} + try: + with open(file, encoding="utf-8") as fp: + self._parse(file, fp, default_netrc) + except UnicodeDecodeError: + with open(file, encoding="locale") as fp: + self._parse(file, fp, default_netrc) + + def _parse(self, file, fp, default_netrc): + lexer = shlex.shlex(fp) + lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" + lexer.commenters = lexer.commenters.replace('#', '') + while 1: + # Look for a machine, default, or macdef top-level keyword + saved_lineno = lexer.lineno + toplevel = tt = lexer.get_token() + if not tt: + break + elif tt[0] == '#': + if lexer.lineno == saved_lineno and len(tt) == 1: + lexer.instream.readline() + continue + elif tt == 'machine': + entryname = lexer.get_token() + elif tt == 'default': + entryname = 'default' + elif tt == 'macdef': # Just skip to end of macdefs + entryname = lexer.get_token() + self.macros[entryname] = [] + lexer.whitespace = ' \t' + while 1: + line = lexer.instream.readline() + if not line or line == '\012': + lexer.whitespace = ' \t\r\n' + break + self.macros[entryname].append(line) + continue + else: + raise NetrcParseError( + "bad toplevel token %r" % tt, file, lexer.lineno) + + # We're looking at start of an entry for a named machine or default. + login = '' + account = password = None + self.hosts[entryname] = {} + while 1: + tt = lexer.get_token() + if (tt.startswith('#') or + tt in {'', 'machine', 'default', 'macdef'}): + if password: + self.hosts[entryname] = (login, account, password) + lexer.push_token(tt) + break + else: + raise NetrcParseError( + "malformed %s entry %s terminated by %s" + % (toplevel, entryname, repr(tt)), + file, lexer.lineno) + elif tt == 'login' or tt == 'user': + login = lexer.get_token() + elif tt == 'account': + account = lexer.get_token() + elif tt == 'password': + if os.name == 'posix' and default_netrc: + prop = os.fstat(fp.fileno()) + if prop.st_uid != os.getuid(): + import pwd + try: + fowner = pwd.getpwuid(prop.st_uid)[0] + except KeyError: + fowner = 'uid %s' % prop.st_uid + try: + user = pwd.getpwuid(os.getuid())[0] + except KeyError: + user = 'uid %s' % os.getuid() + raise NetrcParseError( + ("~/.netrc file owner (%s) does not match" + " current user (%s)") % (fowner, user), + file, lexer.lineno) + if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)): + raise NetrcParseError( + "~/.netrc access too permissive: access" + " permissions must restrict access to only" + " the owner", file, lexer.lineno) + password = lexer.get_token() + else: + raise NetrcParseError("bad follower token %r" % tt, + file, lexer.lineno) + + def authenticators(self, host): + """Return a (user, account, password) tuple for given host.""" + if host in self.hosts: + return self.hosts[host] + elif 'default' in self.hosts: + return self.hosts['default'] + else: + return None + + def __repr__(self): + """Dump the class data in the format of a .netrc file.""" + rep = "" + for host in self.hosts.keys(): + attrs = self.hosts[host] + rep += f"machine {host}\n\tlogin {attrs[0]}\n" + if attrs[1]: + rep += f"\taccount {attrs[1]}\n" + rep += f"\tpassword {attrs[2]}\n" + for macro in self.macros.keys(): + rep += f"macdef {macro}\n" + for line in self.macros[macro]: + rep += line + rep += "\n" + return rep + +if __name__ == '__main__': + print(netrc()) diff --git a/pllava/lib/python3.10/ntpath.py b/pllava/lib/python3.10/ntpath.py new file mode 100644 index 0000000000000000000000000000000000000000..c14e5c7ceca5deb5a712145b6fe27354e4842b9f --- /dev/null +++ b/pllava/lib/python3.10/ntpath.py @@ -0,0 +1,838 @@ +# Module 'ntpath' -- common operations on WinNT/Win95 pathnames +"""Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +""" + +# strings representing various path-related bits and pieces +# These are primarily for export; internally, they are hardcoded. +# Should be set before imports for resolving cyclic dependency. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '\\' +pathsep = ';' +altsep = '/' +defpath = '.;C:\\bin' +devnull = 'nul' + +import os +import sys +import stat +import genericpath +from genericpath import * + + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime", "islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "curdir","pardir","sep","pathsep","defpath","altsep", + "extsep","devnull","realpath","supports_unicode_filenames","relpath", + "samefile", "sameopenfile", "samestat", "commonpath"] + +def _get_bothseps(path): + if isinstance(path, bytes): + return b'\\/' + else: + return '\\/' + +# Normalize the case of a pathname and map slashes to backslashes. +# Other normalizations (such as optimizing '../' away) are not done +# (this is done by normpath). + +try: + from _winapi import ( + LCMapStringEx as _LCMapStringEx, + LOCALE_NAME_INVARIANT as _LOCALE_NAME_INVARIANT, + LCMAP_LOWERCASE as _LCMAP_LOWERCASE) + + def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes. + """ + s = os.fspath(s) + if not s: + return s + if isinstance(s, bytes): + encoding = sys.getfilesystemencoding() + s = s.decode(encoding, 'surrogateescape').replace('/', '\\') + s = _LCMapStringEx(_LOCALE_NAME_INVARIANT, + _LCMAP_LOWERCASE, s) + return s.encode(encoding, 'surrogateescape') + else: + return _LCMapStringEx(_LOCALE_NAME_INVARIANT, + _LCMAP_LOWERCASE, + s.replace('/', '\\')) +except ImportError: + def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes. + """ + s = os.fspath(s) + if isinstance(s, bytes): + return os.fsencode(os.fsdecode(s).replace('/', '\\').lower()) + return s.replace('/', '\\').lower() + + +# Return whether a path is absolute. +# Trivial in Posix, harder on Windows. +# For Windows it is absolute if it starts with a slash or backslash (current +# volume), or if a pathname after the volume-letter-and-colon or UNC-resource +# starts with a slash or backslash. + +def isabs(s): + """Test whether a path is absolute""" + s = os.fspath(s) + # Paths beginning with \\?\ are always absolute, but do not + # necessarily contain a drive. + if isinstance(s, bytes): + if s.replace(b'/', b'\\').startswith(b'\\\\?\\'): + return True + else: + if s.replace('/', '\\').startswith('\\\\?\\'): + return True + s = splitdrive(s)[1] + return len(s) > 0 and s[0] in _get_bothseps(s) + + +# Join two (or more) paths. +def join(path, *paths): + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + seps = b'\\/' + colon = b':' + else: + sep = '\\' + seps = '\\/' + colon = ':' + try: + if not paths: + path[:0] + sep #23780: Ensure compatible data type even if p is null. + result_drive, result_path = splitdrive(path) + for p in map(os.fspath, paths): + p_drive, p_path = splitdrive(p) + if p_path and p_path[0] in seps: + # Second path is absolute + if p_drive or not result_drive: + result_drive = p_drive + result_path = p_path + continue + elif p_drive and p_drive != result_drive: + if p_drive.lower() != result_drive.lower(): + # Different drives => ignore the first path entirely + result_drive = p_drive + result_path = p_path + continue + # Same drive in different case + result_drive = p_drive + # Second path is relative to the first + if result_path and result_path[-1] not in seps: + result_path = result_path + sep + result_path = result_path + p_path + ## add separator between UNC and non-absolute path + if (result_path and result_path[0] not in seps and + result_drive and result_drive[-1:] != colon): + return result_drive + sep + result_path + return result_drive + result_path + except (TypeError, AttributeError, BytesWarning): + genericpath._check_arg_types('join', path, *paths) + raise + + +# Split a path in a drive specification (a drive letter followed by a +# colon) and the path specification. +# It is always true that drivespec + pathspec == p +def splitdrive(p): + """Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + """ + p = os.fspath(p) + if len(p) >= 2: + if isinstance(p, bytes): + sep = b'\\' + altsep = b'/' + colon = b':' + else: + sep = '\\' + altsep = '/' + colon = ':' + normp = p.replace(altsep, sep) + if (normp[0:2] == sep*2) and (normp[2:3] != sep): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^^ + index = normp.find(sep, 2) + if index == -1: + return p[:0], p + index2 = normp.find(sep, index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return p[:0], p + if index2 == -1: + index2 = len(p) + return p[:index2], p[index2:] + if normp[1:2] == colon: + return p[:2], p[2:] + return p[:0], p + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). After the trailing '/' is stripped, the invariant +# join(head, tail) == p holds. +# The resulting head won't end in '/' unless it is the root. + +def split(p): + """Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.""" + p = os.fspath(p) + seps = _get_bothseps(p) + d, p = splitdrive(p) + # set i to index beyond p's last slash + i = len(p) + while i and p[i-1] not in seps: + i -= 1 + head, tail = p[:i], p[i:] # now tail has no slashes + # remove trailing slashes from head, unless it's all slashes + head = head.rstrip(seps) or head + return d + head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + p = os.fspath(p) + if isinstance(p, bytes): + return genericpath._splitext(p, b'\\', b'/', b'.') + else: + return genericpath._splitext(p, '\\', '/', '.') +splitext.__doc__ = genericpath._splitext.__doc__ + + +# Return the tail (basename) part of a path. + +def basename(p): + """Returns the final component of a pathname""" + return split(p)[1] + + +# Return the head (dirname) part of a path. + +def dirname(p): + """Returns the directory component of a pathname""" + return split(p)[0] + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + """ + try: + st = os.lstat(path) + except (OSError, ValueError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + st = os.lstat(path) + except (OSError, ValueError): + return False + return True + +# Is a path a mount point? +# Any drive letter root (eg c:\) +# Any share UNC (eg \\server\share) +# Any volume mounted on a filesystem folder +# +# No one method detects all three situations. Historically we've lexically +# detected drive letter roots and share UNCs. The canonical approach to +# detecting mounted volumes (querying the reparse tag) fails for the most +# common case: drive letter roots. The alternative which uses GetVolumePathName +# fails if the drive letter is the result of a SUBST. +try: + from nt import _getvolumepathname +except ImportError: + _getvolumepathname = None +def ismount(path): + """Test whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)""" + path = os.fspath(path) + seps = _get_bothseps(path) + path = abspath(path) + root, rest = splitdrive(path) + if root and root[0] in seps: + return (not rest) or (rest in seps) + if rest in seps: + return True + + if _getvolumepathname: + return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps) + else: + return False + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + path = os.fspath(path) + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + i, n = 1, len(path) + while i < n and path[i] not in _get_bothseps(path): + i += 1 + + if 'USERPROFILE' in os.environ: + userhome = os.environ['USERPROFILE'] + elif not 'HOMEPATH' in os.environ: + return path + else: + try: + drive = os.environ['HOMEDRIVE'] + except KeyError: + drive = '' + userhome = join(drive, os.environ['HOMEPATH']) + + if i != 1: #~user + target_user = path[1:i] + if isinstance(target_user, bytes): + target_user = os.fsdecode(target_user) + current_user = os.environ.get('USERNAME') + + if target_user != current_user: + # Try to guess user home directory. By default all user + # profile directories are located in the same place and are + # named by corresponding usernames. If userhome isn't a + # normal profile directory, this guess is likely wrong, + # so we bail out. + if current_user != basename(userhome): + return path + userhome = join(dirname(userhome), target_user) + + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + + return userhome + path[i:] + + +# Expand paths containing shell variable substitutions. +# The following rules apply: +# - no expansion within single quotes +# - '$$' is translated into '$' +# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% +# - ${varname} is accepted. +# - $varname is accepted. +# - %varname% is accepted. +# - varnames can be made out of letters, digits and the characters '_-' +# (though is not verified in the ${varname} and %varname% cases) +# XXX With COMMAND.COM you can use any characters in a variable name, +# XXX except '^|<>='. + +def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + path = os.fspath(path) + if isinstance(path, bytes): + if b'$' not in path and b'%' not in path: + return path + import string + varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii') + quote = b'\'' + percent = b'%' + brace = b'{' + rbrace = b'}' + dollar = b'$' + environ = getattr(os, 'environb', None) + else: + if '$' not in path and '%' not in path: + return path + import string + varchars = string.ascii_letters + string.digits + '_-' + quote = '\'' + percent = '%' + brace = '{' + rbrace = '}' + dollar = '$' + environ = os.environ + res = path[:0] + index = 0 + pathlen = len(path) + while index < pathlen: + c = path[index:index+1] + if c == quote: # no expansion within single quotes + path = path[index + 1:] + pathlen = len(path) + try: + index = path.index(c) + res += c + path[:index + 1] + except ValueError: + res += c + path + index = pathlen - 1 + elif c == percent: # variable or '%' + if path[index + 1:index + 2] == percent: + res += c + index += 1 + else: + path = path[index+1:] + pathlen = len(path) + try: + index = path.index(percent) + except ValueError: + res += percent + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = percent + var + percent + res += value + elif c == dollar: # variable or '$$' + if path[index + 1:index + 2] == dollar: + res += c + index += 1 + elif path[index + 1:index + 2] == brace: + path = path[index+2:] + pathlen = len(path) + try: + index = path.index(rbrace) + except ValueError: + res += dollar + brace + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = dollar + brace + var + rbrace + res += value + else: + var = path[:0] + index += 1 + c = path[index:index + 1] + while c and c in varchars: + var += c + index += 1 + c = path[index:index + 1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = dollar + var + res += value + if c: + index -= 1 + else: + res += c + index += 1 + return res + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +# Previously, this function also truncated pathnames to 8+3 format, +# but as this module is called "ntpath", that's obviously wrong! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + altsep = b'/' + curdir = b'.' + pardir = b'..' + special_prefixes = (b'\\\\.\\', b'\\\\?\\') + else: + sep = '\\' + altsep = '/' + curdir = '.' + pardir = '..' + special_prefixes = ('\\\\.\\', '\\\\?\\') + if path.startswith(special_prefixes): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path + # unchanged apart from the call to os.fspath() + return path + path = path.replace(altsep, sep) + prefix, path = splitdrive(path) + + # collapse initial backslashes + if path.startswith(sep): + prefix += sep + path = path.lstrip(sep) + + comps = path.split(sep) + i = 0 + while i < len(comps): + if not comps[i] or comps[i] == curdir: + del comps[i] + elif comps[i] == pardir: + if i > 0 and comps[i-1] != pardir: + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith(sep): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(curdir) + return prefix + sep.join(comps) + +def _abspath_fallback(path): + """Return the absolute version of a path as a fallback function in case + `nt._getfullpathname` is not available or raises OSError. See bpo-31047 for + more. + + """ + + path = os.fspath(path) + if not isabs(path): + if isinstance(path, bytes): + cwd = os.getcwdb() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + +# Return an absolute path. +try: + from nt import _getfullpathname + +except ImportError: # not running on Windows - mock up something sensible + abspath = _abspath_fallback + +else: # use native Windows method on Windows + def abspath(path): + """Return the absolute version of a path.""" + try: + return normpath(_getfullpathname(path)) + except (OSError, ValueError): + return _abspath_fallback(path) + +try: + from nt import _getfinalpathname, readlink as _nt_readlink +except ImportError: + # realpath is a no-op on systems without _getfinalpathname support. + realpath = abspath +else: + def _readlink_deep(path): + # These error codes indicate that we should stop reading links and + # return the path we currently have. + # 1: ERROR_INVALID_FUNCTION + # 2: ERROR_FILE_NOT_FOUND + # 3: ERROR_DIRECTORY_NOT_FOUND + # 5: ERROR_ACCESS_DENIED + # 21: ERROR_NOT_READY (implies drive with no media) + # 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file) + # 50: ERROR_NOT_SUPPORTED (implies no support for reparse points) + # 67: ERROR_BAD_NET_NAME (implies remote server unavailable) + # 87: ERROR_INVALID_PARAMETER + # 4390: ERROR_NOT_A_REPARSE_POINT + # 4392: ERROR_INVALID_REPARSE_DATA + # 4393: ERROR_REPARSE_TAG_INVALID + allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 67, 87, 4390, 4392, 4393 + + seen = set() + while normcase(path) not in seen: + seen.add(normcase(path)) + try: + old_path = path + path = _nt_readlink(path) + # Links may be relative, so resolve them against their + # own location + if not isabs(path): + # If it's something other than a symlink, we don't know + # what it's actually going to be resolved against, so + # just return the old path. + if not islink(old_path): + path = old_path + break + path = normpath(join(dirname(old_path), path)) + except OSError as ex: + if ex.winerror in allowed_winerror: + break + raise + except ValueError: + # Stop on reparse points that are not symlinks + break + return path + + def _getfinalpathname_nonstrict(path): + # These error codes indicate that we should stop resolving the path + # and return the value we currently have. + # 1: ERROR_INVALID_FUNCTION + # 2: ERROR_FILE_NOT_FOUND + # 3: ERROR_DIRECTORY_NOT_FOUND + # 5: ERROR_ACCESS_DENIED + # 21: ERROR_NOT_READY (implies drive with no media) + # 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file) + # 50: ERROR_NOT_SUPPORTED + # 53: ERROR_BAD_NETPATH + # 65: ERROR_NETWORK_ACCESS_DENIED + # 67: ERROR_BAD_NET_NAME (implies remote server unavailable) + # 87: ERROR_INVALID_PARAMETER + # 123: ERROR_INVALID_NAME + # 161: ERROR_BAD_PATHNAME + # 1920: ERROR_CANT_ACCESS_FILE + # 1921: ERROR_CANT_RESOLVE_FILENAME (implies unfollowable symlink) + allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 53, 65, 67, 87, 123, 161, 1920, 1921 + + # Non-strict algorithm is to find as much of the target directory + # as we can and join the rest. + tail = '' + while path: + try: + path = _getfinalpathname(path) + return join(path, tail) if tail else path + except OSError as ex: + if ex.winerror not in allowed_winerror: + raise + try: + # The OS could not resolve this path fully, so we attempt + # to follow the link ourselves. If we succeed, join the tail + # and return. + new_path = _readlink_deep(path) + if new_path != path: + return join(new_path, tail) if tail else new_path + except OSError: + # If we fail to readlink(), let's keep traversing + pass + path, name = split(path) + # TODO (bpo-38186): Request the real file name from the directory + # entry using FindFirstFileW. For now, we will return the path + # as best we have it + if path and not name: + return path + tail + tail = join(name, tail) if tail else name + return tail + + def realpath(path, *, strict=False): + path = normpath(path) + if isinstance(path, bytes): + prefix = b'\\\\?\\' + unc_prefix = b'\\\\?\\UNC\\' + new_unc_prefix = b'\\\\' + cwd = os.getcwdb() + # bpo-38081: Special case for realpath(b'nul') + if normcase(path) == normcase(os.fsencode(devnull)): + return b'\\\\.\\NUL' + else: + prefix = '\\\\?\\' + unc_prefix = '\\\\?\\UNC\\' + new_unc_prefix = '\\\\' + cwd = os.getcwd() + # bpo-38081: Special case for realpath('nul') + if normcase(path) == normcase(devnull): + return '\\\\.\\NUL' + had_prefix = path.startswith(prefix) + if not had_prefix and not isabs(path): + path = join(cwd, path) + try: + path = _getfinalpathname(path) + initial_winerror = 0 + except OSError as ex: + if strict: + raise + initial_winerror = ex.winerror + path = _getfinalpathname_nonstrict(path) + # The path returned by _getfinalpathname will always start with \\?\ - + # strip off that prefix unless it was already provided on the original + # path. + if not had_prefix and path.startswith(prefix): + # For UNC paths, the prefix will actually be \\?\UNC\ + # Handle that case as well. + if path.startswith(unc_prefix): + spath = new_unc_prefix + path[len(unc_prefix):] + else: + spath = path[len(prefix):] + # Ensure that the non-prefixed path resolves to the same path + try: + if _getfinalpathname(spath) == path: + path = spath + except OSError as ex: + # If the path does not exist and originally did not exist, then + # strip the prefix anyway. + if ex.winerror == initial_winerror: + path = spath + return path + + +# Win9x family and earlier have no Unicode filename support. +supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and + sys.getwindowsversion()[3] >= 2) + +def relpath(path, start=None): + """Return a relative version of a path""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + curdir = b'.' + pardir = b'..' + else: + sep = '\\' + curdir = '.' + pardir = '..' + + if start is None: + start = curdir + + if not path: + raise ValueError("no path specified") + + start = os.fspath(start) + try: + start_abs = abspath(normpath(start)) + path_abs = abspath(normpath(path)) + start_drive, start_rest = splitdrive(start_abs) + path_drive, path_rest = splitdrive(path_abs) + if normcase(start_drive) != normcase(path_drive): + raise ValueError("path is on mount %r, start on mount %r" % ( + path_drive, start_drive)) + + start_list = [x for x in start_rest.split(sep) if x] + path_list = [x for x in path_rest.split(sep) if x] + # Work out how much of the filepath is shared by start and path. + i = 0 + for e1, e2 in zip(start_list, path_list): + if normcase(e1) != normcase(e2): + break + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning): + genericpath._check_arg_types('relpath', path, start) + raise + + +# Return the longest common sub-path of the sequence of paths given as input. +# The function is case-insensitive and 'separator-insensitive', i.e. if the +# only difference between two paths is the use of '\' versus '/' as separator, +# they are deemed to be equal. +# +# However, the returned path will have the standard '\' separator (even if the +# given paths had the alternative '/' separator) and will have the case of the +# first path given in the sequence. Additionally, any trailing separator is +# stripped from the returned path. + +def commonpath(paths): + """Given a sequence of path names, returns the longest common sub-path.""" + + if not paths: + raise ValueError('commonpath() arg is an empty sequence') + + paths = tuple(map(os.fspath, paths)) + if isinstance(paths[0], bytes): + sep = b'\\' + altsep = b'/' + curdir = b'.' + else: + sep = '\\' + altsep = '/' + curdir = '.' + + try: + drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths] + split_paths = [p.split(sep) for d, p in drivesplits] + + try: + isabs, = set(p[:1] == sep for d, p in drivesplits) + except ValueError: + raise ValueError("Can't mix absolute and relative paths") from None + + # Check that all drive letters or UNC paths match. The check is made only + # now otherwise type errors for mixing strings and bytes would not be + # caught. + if len(set(d for d, p in drivesplits)) != 1: + raise ValueError("Paths don't have the same drive") + + drive, path = splitdrive(paths[0].replace(altsep, sep)) + common = path.split(sep) + common = [c for c in common if c and c != curdir] + + split_paths = [[c for c in s if c and c != curdir] for s in split_paths] + s1 = min(split_paths) + s2 = max(split_paths) + for i, c in enumerate(s1): + if c != s2[i]: + common = common[:i] + break + else: + common = common[:len(s1)] + + prefix = drive + sep if isabs else drive + return prefix + sep.join(common) + except (TypeError, AttributeError): + genericpath._check_arg_types('commonpath', *paths) + raise + + +try: + # The genericpath.isdir implementation uses os.stat and checks the mode + # attribute to tell whether or not the path is a directory. + # This is overkill on Windows - just pass the path to GetFileAttributes + # and check the attribute from there. + from nt import _isdir as isdir +except ImportError: + # Use genericpath.isdir as imported above. + pass diff --git a/pllava/lib/python3.10/opcode.py b/pllava/lib/python3.10/opcode.py new file mode 100644 index 0000000000000000000000000000000000000000..37e88e92df70ecc58ea9f59bea40fa20a209f916 --- /dev/null +++ b/pllava/lib/python3.10/opcode.py @@ -0,0 +1,216 @@ + +""" +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +""" + +__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", + "haslocal", "hascompare", "hasfree", "opname", "opmap", + "HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"] + +# It's a chicken-and-egg I'm afraid: +# We're imported before _opcode's made. +# With exception unheeded +# (stack_effect is not needed) +# Both our chickens and eggs are allayed. +# --Larry Hastings, 2013/11/23 + +try: + from _opcode import stack_effect + __all__.append('stack_effect') +except ImportError: + pass + +cmp_op = ('<', '<=', '==', '!=', '>', '>=') + +hasconst = [] +hasname = [] +hasjrel = [] +hasjabs = [] +haslocal = [] +hascompare = [] +hasfree = [] +hasnargs = [] # unused + +opmap = {} +opname = ['<%r>' % (op,) for op in range(256)] + +def def_op(name, op): + opname[op] = name + opmap[name] = op + +def name_op(name, op): + def_op(name, op) + hasname.append(op) + +def jrel_op(name, op): + def_op(name, op) + hasjrel.append(op) + +def jabs_op(name, op): + def_op(name, op) + hasjabs.append(op) + +# Instruction opcodes for compiled code +# Blank lines correspond to available opcodes + +def_op('POP_TOP', 1) +def_op('ROT_TWO', 2) +def_op('ROT_THREE', 3) +def_op('DUP_TOP', 4) +def_op('DUP_TOP_TWO', 5) +def_op('ROT_FOUR', 6) + +def_op('NOP', 9) +def_op('UNARY_POSITIVE', 10) +def_op('UNARY_NEGATIVE', 11) +def_op('UNARY_NOT', 12) + +def_op('UNARY_INVERT', 15) +def_op('BINARY_MATRIX_MULTIPLY', 16) +def_op('INPLACE_MATRIX_MULTIPLY', 17) + +def_op('BINARY_POWER', 19) +def_op('BINARY_MULTIPLY', 20) + +def_op('BINARY_MODULO', 22) +def_op('BINARY_ADD', 23) +def_op('BINARY_SUBTRACT', 24) +def_op('BINARY_SUBSCR', 25) +def_op('BINARY_FLOOR_DIVIDE', 26) +def_op('BINARY_TRUE_DIVIDE', 27) +def_op('INPLACE_FLOOR_DIVIDE', 28) +def_op('INPLACE_TRUE_DIVIDE', 29) +def_op('GET_LEN', 30) +def_op('MATCH_MAPPING', 31) +def_op('MATCH_SEQUENCE', 32) +def_op('MATCH_KEYS', 33) +def_op('COPY_DICT_WITHOUT_KEYS', 34) + +def_op('WITH_EXCEPT_START', 49) +def_op('GET_AITER', 50) +def_op('GET_ANEXT', 51) +def_op('BEFORE_ASYNC_WITH', 52) + +def_op('END_ASYNC_FOR', 54) +def_op('INPLACE_ADD', 55) +def_op('INPLACE_SUBTRACT', 56) +def_op('INPLACE_MULTIPLY', 57) + +def_op('INPLACE_MODULO', 59) +def_op('STORE_SUBSCR', 60) +def_op('DELETE_SUBSCR', 61) +def_op('BINARY_LSHIFT', 62) +def_op('BINARY_RSHIFT', 63) +def_op('BINARY_AND', 64) +def_op('BINARY_XOR', 65) +def_op('BINARY_OR', 66) +def_op('INPLACE_POWER', 67) +def_op('GET_ITER', 68) +def_op('GET_YIELD_FROM_ITER', 69) +def_op('PRINT_EXPR', 70) +def_op('LOAD_BUILD_CLASS', 71) +def_op('YIELD_FROM', 72) +def_op('GET_AWAITABLE', 73) +def_op('LOAD_ASSERTION_ERROR', 74) +def_op('INPLACE_LSHIFT', 75) +def_op('INPLACE_RSHIFT', 76) +def_op('INPLACE_AND', 77) +def_op('INPLACE_XOR', 78) +def_op('INPLACE_OR', 79) + +def_op('LIST_TO_TUPLE', 82) +def_op('RETURN_VALUE', 83) +def_op('IMPORT_STAR', 84) +def_op('SETUP_ANNOTATIONS', 85) +def_op('YIELD_VALUE', 86) +def_op('POP_BLOCK', 87) + +def_op('POP_EXCEPT', 89) + +HAVE_ARGUMENT = 90 # Opcodes from here have an argument: + +name_op('STORE_NAME', 90) # Index in name list +name_op('DELETE_NAME', 91) # "" +def_op('UNPACK_SEQUENCE', 92) # Number of tuple items +jrel_op('FOR_ITER', 93) +def_op('UNPACK_EX', 94) +name_op('STORE_ATTR', 95) # Index in name list +name_op('DELETE_ATTR', 96) # "" +name_op('STORE_GLOBAL', 97) # "" +name_op('DELETE_GLOBAL', 98) # "" +def_op('ROT_N', 99) +def_op('LOAD_CONST', 100) # Index in const list +hasconst.append(100) +name_op('LOAD_NAME', 101) # Index in name list +def_op('BUILD_TUPLE', 102) # Number of tuple items +def_op('BUILD_LIST', 103) # Number of list items +def_op('BUILD_SET', 104) # Number of set items +def_op('BUILD_MAP', 105) # Number of dict entries +name_op('LOAD_ATTR', 106) # Index in name list +def_op('COMPARE_OP', 107) # Comparison operator +hascompare.append(107) +name_op('IMPORT_NAME', 108) # Index in name list +name_op('IMPORT_FROM', 109) # Index in name list +jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip +jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code +jabs_op('JUMP_IF_TRUE_OR_POP', 112) # "" +jabs_op('JUMP_ABSOLUTE', 113) # "" +jabs_op('POP_JUMP_IF_FALSE', 114) # "" +jabs_op('POP_JUMP_IF_TRUE', 115) # "" +name_op('LOAD_GLOBAL', 116) # Index in name list +def_op('IS_OP', 117) +def_op('CONTAINS_OP', 118) +def_op('RERAISE', 119) + +jabs_op('JUMP_IF_NOT_EXC_MATCH', 121) +jrel_op('SETUP_FINALLY', 122) # Distance to target address + +def_op('LOAD_FAST', 124) # Local variable number +haslocal.append(124) +def_op('STORE_FAST', 125) # Local variable number +haslocal.append(125) +def_op('DELETE_FAST', 126) # Local variable number +haslocal.append(126) + +def_op('GEN_START', 129) # Kind of generator/coroutine +def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3) +def_op('CALL_FUNCTION', 131) # #args +def_op('MAKE_FUNCTION', 132) # Flags +def_op('BUILD_SLICE', 133) # Number of items + +def_op('LOAD_CLOSURE', 135) +hasfree.append(135) +def_op('LOAD_DEREF', 136) +hasfree.append(136) +def_op('STORE_DEREF', 137) +hasfree.append(137) +def_op('DELETE_DEREF', 138) +hasfree.append(138) + +def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs +def_op('CALL_FUNCTION_EX', 142) # Flags +jrel_op('SETUP_WITH', 143) +def_op('EXTENDED_ARG', 144) +EXTENDED_ARG = 144 +def_op('LIST_APPEND', 145) +def_op('SET_ADD', 146) +def_op('MAP_ADD', 147) +def_op('LOAD_CLASSDEREF', 148) +hasfree.append(148) + +def_op('MATCH_CLASS', 152) + +jrel_op('SETUP_ASYNC_WITH', 154) +def_op('FORMAT_VALUE', 155) +def_op('BUILD_CONST_KEY_MAP', 156) +def_op('BUILD_STRING', 157) + +name_op('LOAD_METHOD', 160) +def_op('CALL_METHOD', 161) +def_op('LIST_EXTEND', 162) +def_op('SET_UPDATE', 163) +def_op('DICT_MERGE', 164) +def_op('DICT_UPDATE', 165) + +del def_op, name_op, jrel_op, jabs_op diff --git a/pllava/lib/python3.10/operator.py b/pllava/lib/python3.10/operator.py new file mode 100644 index 0000000000000000000000000000000000000000..241fdbb679e7c136808336539cd97a458543662d --- /dev/null +++ b/pllava/lib/python3.10/operator.py @@ -0,0 +1,460 @@ +""" +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +""" + +__all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf', + 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', + 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', + 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', + 'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', + 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', + 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', + 'setitem', 'sub', 'truediv', 'truth', 'xor'] + +from builtins import abs as _abs + + +# Comparison Operations *******************************************************# + +def lt(a, b): + "Same as a < b." + return a < b + +def le(a, b): + "Same as a <= b." + return a <= b + +def eq(a, b): + "Same as a == b." + return a == b + +def ne(a, b): + "Same as a != b." + return a != b + +def ge(a, b): + "Same as a >= b." + return a >= b + +def gt(a, b): + "Same as a > b." + return a > b + +# Logical Operations **********************************************************# + +def not_(a): + "Same as not a." + return not a + +def truth(a): + "Return True if a is true, False otherwise." + return True if a else False + +def is_(a, b): + "Same as a is b." + return a is b + +def is_not(a, b): + "Same as a is not b." + return a is not b + +# Mathematical/Bitwise Operations *********************************************# + +def abs(a): + "Same as abs(a)." + return _abs(a) + +def add(a, b): + "Same as a + b." + return a + b + +def and_(a, b): + "Same as a & b." + return a & b + +def floordiv(a, b): + "Same as a // b." + return a // b + +def index(a): + "Same as a.__index__()." + return a.__index__() + +def inv(a): + "Same as ~a." + return ~a +invert = inv + +def lshift(a, b): + "Same as a << b." + return a << b + +def mod(a, b): + "Same as a % b." + return a % b + +def mul(a, b): + "Same as a * b." + return a * b + +def matmul(a, b): + "Same as a @ b." + return a @ b + +def neg(a): + "Same as -a." + return -a + +def or_(a, b): + "Same as a | b." + return a | b + +def pos(a): + "Same as +a." + return +a + +def pow(a, b): + "Same as a ** b." + return a ** b + +def rshift(a, b): + "Same as a >> b." + return a >> b + +def sub(a, b): + "Same as a - b." + return a - b + +def truediv(a, b): + "Same as a / b." + return a / b + +def xor(a, b): + "Same as a ^ b." + return a ^ b + +# Sequence Operations *********************************************************# + +def concat(a, b): + "Same as a + b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + return a + b + +def contains(a, b): + "Same as b in a (note reversed operands)." + return b in a + +def countOf(a, b): + "Return the number of items in a which are, or which equal, b." + count = 0 + for i in a: + if i is b or i == b: + count += 1 + return count + +def delitem(a, b): + "Same as del a[b]." + del a[b] + +def getitem(a, b): + "Same as a[b]." + return a[b] + +def indexOf(a, b): + "Return the first index of b in a." + for i, j in enumerate(a): + if j is b or j == b: + return i + else: + raise ValueError('sequence.index(x): x not in sequence') + +def setitem(a, b, c): + "Same as a[b] = c." + a[b] = c + +def length_hint(obj, default=0): + """ + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + """ + if not isinstance(default, int): + msg = ("'%s' object cannot be interpreted as an integer" % + type(default).__name__) + raise TypeError(msg) + + try: + return len(obj) + except TypeError: + pass + + try: + hint = type(obj).__length_hint__ + except AttributeError: + return default + + try: + val = hint(obj) + except TypeError: + return default + if val is NotImplemented: + return default + if not isinstance(val, int): + msg = ('__length_hint__ must be integer, not %s' % + type(val).__name__) + raise TypeError(msg) + if val < 0: + msg = '__length_hint__() should return >= 0' + raise ValueError(msg) + return val + +# Generalized Lookup Objects **************************************************# + +class attrgetter: + """ + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + """ + __slots__ = ('_attrs', '_call') + + def __init__(self, attr, *attrs): + if not attrs: + if not isinstance(attr, str): + raise TypeError('attribute name must be a string') + self._attrs = (attr,) + names = attr.split('.') + def func(obj): + for name in names: + obj = getattr(obj, name) + return obj + self._call = func + else: + self._attrs = (attr,) + attrs + getters = tuple(map(attrgetter, self._attrs)) + def func(obj): + return tuple(getter(obj) for getter in getters) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__qualname__, + ', '.join(map(repr, self._attrs))) + + def __reduce__(self): + return self.__class__, self._attrs + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + +class methodcaller: + """ + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + """ + __slots__ = ('_name', '_args', '_kwargs') + + def __init__(self, name, /, *args, **kwargs): + self._name = name + if not isinstance(self._name, str): + raise TypeError('method name must be a string') + self._args = args + self._kwargs = kwargs + + def __call__(self, obj): + return getattr(obj, self._name)(*self._args, **self._kwargs) + + def __repr__(self): + args = [repr(self._name)] + args.extend(map(repr, self._args)) + args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(args)) + + def __reduce__(self): + if not self._kwargs: + return self.__class__, (self._name,) + self._args + else: + from functools import partial + return partial(self.__class__, self._name, **self._kwargs), self._args + + +# In-place Operations *********************************************************# + +def iadd(a, b): + "Same as a += b." + a += b + return a + +def iand(a, b): + "Same as a &= b." + a &= b + return a + +def iconcat(a, b): + "Same as a += b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + a += b + return a + +def ifloordiv(a, b): + "Same as a //= b." + a //= b + return a + +def ilshift(a, b): + "Same as a <<= b." + a <<= b + return a + +def imod(a, b): + "Same as a %= b." + a %= b + return a + +def imul(a, b): + "Same as a *= b." + a *= b + return a + +def imatmul(a, b): + "Same as a @= b." + a @= b + return a + +def ior(a, b): + "Same as a |= b." + a |= b + return a + +def ipow(a, b): + "Same as a **= b." + a **=b + return a + +def irshift(a, b): + "Same as a >>= b." + a >>= b + return a + +def isub(a, b): + "Same as a -= b." + a -= b + return a + +def itruediv(a, b): + "Same as a /= b." + a /= b + return a + +def ixor(a, b): + "Same as a ^= b." + a ^= b + return a + + +try: + from _operator import * +except ImportError: + pass +else: + from _operator import __doc__ + +# All of these "__func__ = func" assignments have to happen after importing +# from _operator to make sure they're set to the right function +__lt__ = lt +__le__ = le +__eq__ = eq +__ne__ = ne +__ge__ = ge +__gt__ = gt +__not__ = not_ +__abs__ = abs +__add__ = add +__and__ = and_ +__floordiv__ = floordiv +__index__ = index +__inv__ = inv +__invert__ = invert +__lshift__ = lshift +__mod__ = mod +__mul__ = mul +__matmul__ = matmul +__neg__ = neg +__or__ = or_ +__pos__ = pos +__pow__ = pow +__rshift__ = rshift +__sub__ = sub +__truediv__ = truediv +__xor__ = xor +__concat__ = concat +__contains__ = contains +__delitem__ = delitem +__getitem__ = getitem +__setitem__ = setitem +__iadd__ = iadd +__iand__ = iand +__iconcat__ = iconcat +__ifloordiv__ = ifloordiv +__ilshift__ = ilshift +__imod__ = imod +__imul__ = imul +__imatmul__ = imatmul +__ior__ = ior +__ipow__ = ipow +__irshift__ = irshift +__isub__ = isub +__itruediv__ = itruediv +__ixor__ = ixor diff --git a/pllava/lib/python3.10/os.py b/pllava/lib/python3.10/os.py new file mode 100644 index 0000000000000000000000000000000000000000..4f2ffceaaf383556921b8d18c05998563eaff4bb --- /dev/null +++ b/pllava/lib/python3.10/os.py @@ -0,0 +1,1123 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' +import abc +import sys +import stat as st + +from _collections_abc import _check_methods + +GenericAlias = type(list[int]) + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", + "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", + "extsep"] + +def _exists(name): + return name in globals() + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +# Any new dependencies of the os module and/or changes in path separator +# requires updating importlib as well. +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + __all__.append('_exit') + except ImportError: + pass + import posixpath as path + + try: + from posix import _have_functions + except ImportError: + pass + + import posix + __all__.extend(_get_exports_list(posix)) + del posix + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + __all__.append('_exit') + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + + try: + from nt import _have_functions + except ImportError: + pass + +else: + raise ImportError('no os specific module found') + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + + +if _exists("_have_functions"): + _globals = globals() + def _add(str, fn): + if (fn in _globals) and (str in _have_functions): + _set.add(_globals[fn]) + + _set = set() + _add("HAVE_FACCESSAT", "access") + _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_FUTIMESAT", "utime") + _add("HAVE_LINKAT", "link") + _add("HAVE_MKDIRAT", "mkdir") + _add("HAVE_MKFIFOAT", "mkfifo") + _add("HAVE_MKNODAT", "mknod") + _add("HAVE_OPENAT", "open") + _add("HAVE_READLINKAT", "readlink") + _add("HAVE_RENAMEAT", "rename") + _add("HAVE_SYMLINKAT", "symlink") + _add("HAVE_UNLINKAT", "unlink") + _add("HAVE_UNLINKAT", "rmdir") + _add("HAVE_UTIMENSAT", "utime") + supports_dir_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + supports_effective_ids = _set + + _set = set() + _add("HAVE_FCHDIR", "chdir") + _add("HAVE_FCHMOD", "chmod") + _add("HAVE_FCHOWN", "chown") + _add("HAVE_FDOPENDIR", "listdir") + _add("HAVE_FDOPENDIR", "scandir") + _add("HAVE_FEXECVE", "execve") + _set.add(stat) # fstat always works + _add("HAVE_FTRUNCATE", "truncate") + _add("HAVE_FUTIMENS", "utime") + _add("HAVE_FUTIMES", "utime") + _add("HAVE_FPATHCONF", "pathconf") + if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 + _add("HAVE_FSTATVFS", "statvfs") + supports_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + # Some platforms don't support lchmod(). Often the function exists + # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. + # (No, I don't know why that's a good design.) ./configure will detect + # this and reject it--so HAVE_LCHMOD still won't be defined on such + # platforms. This is Very Helpful. + # + # However, sometimes platforms without a working lchmod() *do* have + # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, + # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes + # it behave like lchmod(). So in theory it would be a suitable + # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s + # flag doesn't work *either*. Sadly ./configure isn't sophisticated + # enough to detect this condition--it only determines whether or not + # fchmodat() minimally works. + # + # Therefore we simply ignore fchmodat() when deciding whether or not + # os.chmod supports follow_symlinks. Just checking lchmod() is + # sufficient. After all--if you have a working fchmodat(), your + # lchmod() almost certainly works too. + # + # _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LCHFLAGS", "chflags") + _add("HAVE_LCHMOD", "chmod") + if _exists("lchown"): # mac os x10.3 + _add("HAVE_LCHOWN", "chown") + _add("HAVE_LINKAT", "link") + _add("HAVE_LUTIMES", "utime") + _add("HAVE_LSTAT", "stat") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_UTIMENSAT", "utime") + _add("MS_WINDOWS", "stat") + supports_follow_symlinks = _set + + del _set + del _have_functions + del _globals + del _add + + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +# Other possible SEEK values are directly imported from posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0o777, exist_ok=False): + """makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, exist_ok=exist_ok) + except FileExistsError: + # Defeats race condition when another thread created the path + pass + cdir = curdir + if isinstance(tail, bytes): + cdir = bytes(curdir, 'ASCII') + if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists + return + try: + mkdir(name, mode) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): + raise + +def removedirs(name): + """removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except OSError: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except OSError: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (including symlinks to directories, + and excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(getsize(join(root, name)) for name in files), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + """ + sys.audit("os.walk", top, topdown, onerror, followlinks) + return _walk(fspath(top), topdown, onerror, followlinks) + +def _walk(top, topdown, onerror, followlinks): + dirs = [] + nondirs = [] + walk_dirs = [] + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + # Note that scandir is global in this module due + # to earlier import-*. + scandir_it = scandir(top) + except OSError as error: + if onerror is not None: + onerror(error) + return + + with scandir_it: + while True: + try: + try: + entry = next(scandir_it) + except StopIteration: + break + except OSError as error: + if onerror is not None: + onerror(error) + return + + try: + is_dir = entry.is_dir() + except OSError: + # If is_dir() raises an OSError, consider that the entry is not + # a directory, same behaviour than os.path.isdir(). + is_dir = False + + if is_dir: + dirs.append(entry.name) + else: + nondirs.append(entry.name) + + if not topdown and is_dir: + # Bottom-up: recurse into sub-directory, but exclude symlinks to + # directories if followlinks is False + if followlinks: + walk_into = True + else: + try: + is_symlink = entry.is_symlink() + except OSError: + # If is_symlink() raises an OSError, consider that the + # entry is not a symbolic link, same behaviour than + # os.path.islink(). + is_symlink = False + walk_into = not is_symlink + + if walk_into: + walk_dirs.append(entry.path) + + # Yield before recursion if going top down + if topdown: + yield top, dirs, nondirs + + # Recurse into sub-directories + islink, join = path.islink, path.join + for dirname in dirs: + new_path = join(top, dirname) + # Issue #23605: os.path.islink() is used instead of caching + # entry.is_symlink() result during the loop on os.scandir() because + # the caller can replace the directory entry during the "yield" + # above. + if followlinks or not islink(new_path): + yield from _walk(new_path, topdown, onerror, followlinks) + else: + # Recurse into sub-directories + for new_path in walk_dirs: + yield from _walk(new_path, topdown, onerror, followlinks) + # Yield after recursion if going bottom up + yield top, dirs, nondirs + +__all__.append("walk") + +if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: + + def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): + """Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + """ + sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd) + if not isinstance(top, int) or not hasattr(top, '__index__'): + top = fspath(top) + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + if not follow_symlinks: + orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) + topfd = open(top, O_RDONLY, dir_fd=dir_fd) + try: + if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and + path.samestat(orig_st, stat(topfd)))): + yield from _fwalk(topfd, top, isinstance(top, bytes), + topdown, onerror, follow_symlinks) + finally: + close(topfd) + + def _fwalk(topfd, toppath, isbytes, topdown, onerror, follow_symlinks): + # Note: This uses O(depth of the directory tree) file descriptors: if + # necessary, it can be adapted to only require O(1) FDs, see issue + # #13734. + + scandir_it = scandir(topfd) + dirs = [] + nondirs = [] + entries = None if topdown or follow_symlinks else [] + for entry in scandir_it: + name = entry.name + if isbytes: + name = fsencode(name) + try: + if entry.is_dir(): + dirs.append(name) + if entries is not None: + entries.append(entry) + else: + nondirs.append(name) + except OSError: + try: + # Add dangling symlinks, ignore disappeared files + if entry.is_symlink(): + nondirs.append(name) + except OSError: + pass + + if topdown: + yield toppath, dirs, nondirs, topfd + + for name in dirs if entries is None else zip(dirs, entries): + try: + if not follow_symlinks: + if topdown: + orig_st = stat(name, dir_fd=topfd, follow_symlinks=False) + else: + assert entries is not None + name, entry = name + orig_st = entry.stat(follow_symlinks=False) + dirfd = open(name, O_RDONLY, dir_fd=topfd) + except OSError as err: + if onerror is not None: + onerror(err) + continue + try: + if follow_symlinks or path.samestat(orig_st, stat(dirfd)): + dirpath = path.join(toppath, name) + yield from _fwalk(dirfd, dirpath, isbytes, + topdown, onerror, follow_symlinks) + finally: + close(dirfd) + + if not topdown: + yield toppath, dirs, nondirs, topfd + + __all__.append("fwalk") + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + exec_func = execve + argrest = (args, env) + else: + exec_func = execv + argrest = (args,) + env = environ + + if path.dirname(file): + exec_func(file, *argrest) + return + saved_exc = None + path_list = get_exec_path(env) + if name != 'nt': + file = fsencode(file) + path_list = map(fsencode, path_list) + for dir in path_list: + fullname = path.join(dir, file) + try: + exec_func(fullname, *argrest) + except (FileNotFoundError, NotADirectoryError) as e: + last_exc = e + except OSError as e: + last_exc = e + if saved_exc is None: + saved_exc = e + if saved_exc is not None: + raise saved_exc + raise last_exc + + +def get_exec_path(env=None): + """Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + """ + # Use a local import instead of a global import to limit the number of + # modules loaded at startup: the os module is always loaded at startup by + # Python. It may also avoid a bootstrap issue. + import warnings + + if env is None: + env = environ + + # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a + # BytesWarning when using python -b or python -bb: ignore the warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", BytesWarning) + + try: + path_list = env.get('PATH') + except TypeError: + path_list = None + + if supports_bytes_environ: + try: + path_listb = env[b'PATH'] + except (KeyError, TypeError): + pass + else: + if path_list is not None: + raise ValueError( + "env cannot contain 'PATH' and b'PATH' keys") + path_list = path_listb + + if path_list is not None and isinstance(path_list, bytes): + path_list = fsdecode(path_list) + + if path_list is None: + path_list = defpath + return path_list.split(pathsep) + + +# Change environ to automatically call putenv() and unsetenv() +from _collections_abc import MutableMapping, Mapping + +class _Environ(MutableMapping): + def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue): + self.encodekey = encodekey + self.decodekey = decodekey + self.encodevalue = encodevalue + self.decodevalue = decodevalue + self._data = data + + def __getitem__(self, key): + try: + value = self._data[self.encodekey(key)] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + return self.decodevalue(value) + + def __setitem__(self, key, value): + key = self.encodekey(key) + value = self.encodevalue(value) + putenv(key, value) + self._data[key] = value + + def __delitem__(self, key): + encodedkey = self.encodekey(key) + unsetenv(encodedkey) + try: + del self._data[encodedkey] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + + def __iter__(self): + # list() from dict object is an atomic operation + keys = list(self._data) + for key in keys: + yield self.decodekey(key) + + def __len__(self): + return len(self._data) + + def __repr__(self): + return 'environ({{{}}})'.format(', '.join( + ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) + for key, value in self._data.items()))) + + def copy(self): + return dict(self) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return self[key] + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(self) + new.update(other) + return new + + def __ror__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(other) + new.update(self) + return new + +def _createenviron(): + if name == 'nt': + # Where Env Var Names Must Be UPPERCASE + def check_str(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value + encode = check_str + decode = str + def encodekey(key): + return encode(key).upper() + data = {} + for key, value in environ.items(): + data[encodekey(key)] = value + else: + # Where Env Var Names Can Be Mixed Case + encoding = sys.getfilesystemencoding() + def encode(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value.encode(encoding, 'surrogateescape') + def decode(value): + return value.decode(encoding, 'surrogateescape') + encodekey = encode + data = environ + return _Environ(data, + encodekey, decode, + encode, decode) + +# unicode environ +environ = _createenviron() +del _createenviron + + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.""" + return environ.get(key, default) + +supports_bytes_environ = (name != 'nt') +__all__.extend(("getenv", "supports_bytes_environ")) + +if supports_bytes_environ: + def _check_bytes(value): + if not isinstance(value, bytes): + raise TypeError("bytes expected, not %s" % type(value).__name__) + return value + + # bytes environ + environb = _Environ(environ._data, + _check_bytes, bytes, + _check_bytes, bytes) + del _check_bytes + + def getenvb(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.""" + return environb.get(key, default) + + __all__.extend(("environb", "getenvb")) + +def _fscodec(): + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + + def fsencode(filename): + """Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, str): + return filename.encode(encoding, errors) + else: + return filename + + def fsdecode(filename): + """Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, bytes): + return filename.decode(encoding, errors) + else: + return filename + + return fsencode, fsdecode + +fsencode, fsdecode = _fscodec() +del _fscodec + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + if not isinstance(args, (tuple, list)): + raise TypeError('argv must be a tuple or a list') + if not args or not args[0]: + raise ValueError('argv first element cannot be empty') + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + + return waitstatus_to_exitcode(sts) + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] isn't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + + + __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) + + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnl", "spawnle"]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnlp", "spawnlpe"]) + +# VxWorks has no user space shell provided. As a result, running +# command in a shell can't be supported. +if sys.platform != 'vxworks': + # Supply os.popen() + def popen(cmd, mode="r", buffering=-1): + if not isinstance(cmd, str): + raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) + if mode not in ("r", "w"): + raise ValueError("invalid mode %r" % mode) + if buffering == 0 or buffering is None: + raise ValueError("popen() does not support unbuffered streams") + import subprocess, io + if mode == "r": + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdout=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdout, proc) + else: + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdin=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdin, proc) + + # Helper for popen() -- a proxy for a file whose close waits for the process + class _wrap_close: + def __init__(self, stream, proc): + self._stream = stream + self._proc = proc + def close(self): + self._stream.close() + returncode = self._proc.wait() + if returncode == 0: + return None + if name == 'nt': + return returncode + else: + return returncode << 8 # Shift left to match old behavior + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __getattr__(self, name): + return getattr(self._stream, name) + def __iter__(self): + return iter(self._stream) + + __all__.append("popen") + +# Supply os.fdopen() +def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs): + if not isinstance(fd, int): + raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) + import io + if "b" not in mode: + encoding = io.text_encoding(encoding) + return io.open(fd, mode, buffering, encoding, *args, **kwargs) + + +# For testing purposes, make sure the function is available when the C +# implementation exists. +def _fspath(path): + """Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + """ + if isinstance(path, (str, bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + else: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) + if isinstance(path_repr, (str, bytes)): + return path_repr + else: + raise TypeError("expected {}.__fspath__() to return str or bytes, " + "not {}".format(path_type.__name__, + type(path_repr).__name__)) + +# If there is no C implementation, make the pure Python version the +# implementation as transparently as possible. +if not _exists('fspath'): + fspath = _fspath + fspath.__name__ = "fspath" + + +class PathLike(abc.ABC): + + """Abstract base class for implementing the file system path protocol.""" + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + if cls is PathLike: + return _check_methods(subclass, '__fspath__') + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +if name == 'nt': + class _AddedDllDirectory: + def __init__(self, path, cookie, remove_dll_directory): + self.path = path + self._cookie = cookie + self._remove_dll_directory = remove_dll_directory + def close(self): + self._remove_dll_directory(self._cookie) + self.path = None + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __repr__(self): + if self.path: + return "".format(self.path) + return "" + + def add_dll_directory(path): + """Add a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + """ + import nt + cookie = nt._add_dll_directory(path) + return _AddedDllDirectory( + path, + cookie, + nt._remove_dll_directory + ) diff --git a/pllava/lib/python3.10/pathlib.py b/pllava/lib/python3.10/pathlib.py new file mode 100644 index 0000000000000000000000000000000000000000..97b23ca45a3a19ccb7824c2b7232730f4860e4e8 --- /dev/null +++ b/pllava/lib/python3.10/pathlib.py @@ -0,0 +1,1461 @@ +import fnmatch +import functools +import io +import ntpath +import os +import posixpath +import re +import sys +import warnings +from _collections_abc import Sequence +from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP +from operator import attrgetter +from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO +from urllib.parse import quote_from_bytes as urlquote_from_bytes + + +__all__ = [ + "PurePath", "PurePosixPath", "PureWindowsPath", + "Path", "PosixPath", "WindowsPath", + ] + +# +# Internals +# + +_WINERROR_NOT_READY = 21 # drive exists but is not accessible +_WINERROR_INVALID_NAME = 123 # fix for bpo-35306 +_WINERROR_CANT_RESOLVE_FILENAME = 1921 # broken symlink pointing to itself + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + _WINERROR_NOT_READY, + _WINERROR_INVALID_NAME, + _WINERROR_CANT_RESOLVE_FILENAME) + +def _ignore_error(exception): + return (getattr(exception, 'errno', None) in _IGNORED_ERROS or + getattr(exception, 'winerror', None) in _IGNORED_WINERRORS) + + +def _is_wildcard_pattern(pat): + # Whether this pattern needs actual matching using fnmatch, or can + # be looked up directly as a file. + return "*" in pat or "?" in pat or "[" in pat + + +class _Flavour(object): + """A flavour implements a particular (platform-specific) set of path + semantics.""" + + def __init__(self): + self.join = self.sep.join + + def parse_parts(self, parts): + parsed = [] + sep = self.sep + altsep = self.altsep + drv = root = '' + it = reversed(parts) + for part in it: + if not part: + continue + if altsep: + part = part.replace(altsep, sep) + drv, root, rel = self.splitroot(part) + if sep in rel: + for x in reversed(rel.split(sep)): + if x and x != '.': + parsed.append(sys.intern(x)) + else: + if rel and rel != '.': + parsed.append(sys.intern(rel)) + if drv or root: + if not drv: + # If no drive is present, try to find one in the previous + # parts. This makes the result of parsing e.g. + # ("C:", "/", "a") reasonably intuitive. + for part in it: + if not part: + continue + if altsep: + part = part.replace(altsep, sep) + drv = self.splitroot(part)[0] + if drv: + break + break + if drv or root: + parsed.append(drv + root) + parsed.reverse() + return drv, root, parsed + + def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2): + """ + Join the two paths represented by the respective + (drive, root, parts) tuples. Return a new (drive, root, parts) tuple. + """ + if root2: + if not drv2 and drv: + return drv, root2, [drv + root2] + parts2[1:] + elif drv2: + if drv2 == drv or self.casefold(drv2) == self.casefold(drv): + # Same drive => second path is relative to the first + return drv, root, parts + parts2[1:] + else: + # Second path is non-anchored (common case) + return drv, root, parts + parts2 + return drv2, root2, parts2 + + +class _WindowsFlavour(_Flavour): + # Reference for Windows paths can be found at + # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx + + sep = '\\' + altsep = '/' + has_drv = True + pathmod = ntpath + + is_supported = (os.name == 'nt') + + drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') + ext_namespace_prefix = '\\\\?\\' + + reserved_names = ( + {'CON', 'PRN', 'AUX', 'NUL', 'CONIN$', 'CONOUT$'} | + {'COM%s' % c for c in '123456789\xb9\xb2\xb3'} | + {'LPT%s' % c for c in '123456789\xb9\xb2\xb3'} + ) + + # Interesting findings about extended paths: + # * '\\?\c:\a' is an extended path, which bypasses normal Windows API + # path processing. Thus relative paths are not resolved and slash is not + # translated to backslash. It has the native NT path limit of 32767 + # characters, but a bit less after resolving device symbolic links, + # such as '\??\C:' => '\Device\HarddiskVolume2'. + # * '\\?\c:/a' looks for a device named 'C:/a' because slash is a + # regular name character in the object namespace. + # * '\\?\c:\foo/bar' is invalid because '/' is illegal in NT filesystems. + # The only path separator at the filesystem level is backslash. + # * '//?/c:\a' and '//?/c:/a' are effectively equivalent to '\\.\c:\a' and + # thus limited to MAX_PATH. + # * Prior to Windows 8, ANSI API bytes paths are limited to MAX_PATH, + # even with the '\\?\' prefix. + + def splitroot(self, part, sep=sep): + first = part[0:1] + second = part[1:2] + if (second == sep and first == sep): + # XXX extended paths should also disable the collapsing of "." + # components (according to MSDN docs). + prefix, part = self._split_extended_path(part) + first = part[0:1] + second = part[1:2] + else: + prefix = '' + third = part[2:3] + if (second == sep and first == sep and third != sep): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvvv root + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^ + index = part.find(sep, 2) + if index != -1: + index2 = part.find(sep, index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 != index + 1: + if index2 == -1: + index2 = len(part) + if prefix: + return prefix + part[1:index2], sep, part[index2+1:] + else: + return part[:index2], sep, part[index2+1:] + drv = root = '' + if second == ':' and first in self.drive_letters: + drv = part[:2] + part = part[2:] + first = third + if first == sep: + root = first + part = part.lstrip(sep) + return prefix + drv, root, part + + def casefold(self, s): + return s.lower() + + def casefold_parts(self, parts): + return [p.lower() for p in parts] + + def compile_pattern(self, pattern): + return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch + + def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix): + prefix = '' + if s.startswith(ext_prefix): + prefix = s[:4] + s = s[4:] + if s.startswith('UNC\\'): + prefix += s[:3] + s = '\\' + s[3:] + return prefix, s + + def is_reserved(self, parts): + # NOTE: the rules for reserved names seem somewhat complicated + # (e.g. r"..\NUL" is reserved but not r"foo\NUL" if "foo" does not + # exist). We err on the side of caution and return True for paths + # which are not considered reserved by Windows. + if not parts: + return False + if parts[0].startswith('\\\\'): + # UNC paths are never reserved + return False + name = parts[-1].partition('.')[0].partition(':')[0].rstrip(' ') + return name.upper() in self.reserved_names + + def make_uri(self, path): + # Under Windows, file URIs use the UTF-8 encoding. + drive = path.drive + if len(drive) == 2 and drive[1] == ':': + # It's a path on a local drive => 'file:///c:/a/b' + rest = path.as_posix()[2:].lstrip('/') + return 'file:///%s/%s' % ( + drive, urlquote_from_bytes(rest.encode('utf-8'))) + else: + # It's a path on a network drive => 'file://host/share/a/b' + return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8')) + + +class _PosixFlavour(_Flavour): + sep = '/' + altsep = '' + has_drv = False + pathmod = posixpath + + is_supported = (os.name != 'nt') + + def splitroot(self, part, sep=sep): + if part and part[0] == sep: + stripped_part = part.lstrip(sep) + # According to POSIX path resolution: + # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11 + # "A pathname that begins with two successive slashes may be + # interpreted in an implementation-defined manner, although more + # than two leading slashes shall be treated as a single slash". + if len(part) - len(stripped_part) == 2: + return '', sep * 2, stripped_part + else: + return '', sep, stripped_part + else: + return '', '', part + + def casefold(self, s): + return s + + def casefold_parts(self, parts): + return parts + + def compile_pattern(self, pattern): + return re.compile(fnmatch.translate(pattern)).fullmatch + + def is_reserved(self, parts): + return False + + def make_uri(self, path): + # We represent the path using the local filesystem encoding, + # for portability to other applications. + bpath = bytes(path) + return 'file://' + urlquote_from_bytes(bpath) + + +_windows_flavour = _WindowsFlavour() +_posix_flavour = _PosixFlavour() + + +class _Accessor: + """An accessor implements a particular (system-specific or not) way of + accessing paths on the filesystem.""" + + +class _NormalAccessor(_Accessor): + + stat = os.stat + + open = io.open + + listdir = os.listdir + + scandir = os.scandir + + chmod = os.chmod + + mkdir = os.mkdir + + unlink = os.unlink + + if hasattr(os, "link"): + link = os.link + else: + def link(self, src, dst): + raise NotImplementedError("os.link() not available on this system") + + rmdir = os.rmdir + + rename = os.rename + + replace = os.replace + + if hasattr(os, "symlink"): + symlink = os.symlink + else: + def symlink(self, src, dst, target_is_directory=False): + raise NotImplementedError("os.symlink() not available on this system") + + def touch(self, path, mode=0o666, exist_ok=True): + if exist_ok: + # First try to bump modification time + # Implementation note: GNU touch uses the UTIME_NOW option of + # the utimensat() / futimens() functions. + try: + os.utime(path, None) + except OSError: + # Avoid exception chaining + pass + else: + return + flags = os.O_CREAT | os.O_WRONLY + if not exist_ok: + flags |= os.O_EXCL + fd = os.open(path, flags, mode) + os.close(fd) + + if hasattr(os, "readlink"): + readlink = os.readlink + else: + def readlink(self, path): + raise NotImplementedError("os.readlink() not available on this system") + + def owner(self, path): + try: + import pwd + return pwd.getpwuid(self.stat(path).st_uid).pw_name + except ImportError: + raise NotImplementedError("Path.owner() is unsupported on this system") + + def group(self, path): + try: + import grp + return grp.getgrgid(self.stat(path).st_gid).gr_name + except ImportError: + raise NotImplementedError("Path.group() is unsupported on this system") + + getcwd = os.getcwd + + expanduser = staticmethod(os.path.expanduser) + + realpath = staticmethod(os.path.realpath) + + +_normal_accessor = _NormalAccessor() + + +# +# Globbing helpers +# + +def _make_selector(pattern_parts, flavour): + pat = pattern_parts[0] + child_parts = pattern_parts[1:] + if pat == '**': + cls = _RecursiveWildcardSelector + elif '**' in pat: + raise ValueError("Invalid pattern: '**' can only be an entire path component") + elif _is_wildcard_pattern(pat): + cls = _WildcardSelector + else: + cls = _PreciseSelector + return cls(pat, child_parts, flavour) + +if hasattr(functools, "lru_cache"): + _make_selector = functools.lru_cache()(_make_selector) + + +class _Selector: + """A selector matches a specific glob pattern part against the children + of a given path.""" + + def __init__(self, child_parts, flavour): + self.child_parts = child_parts + if child_parts: + self.successor = _make_selector(child_parts, flavour) + self.dironly = True + else: + self.successor = _TerminatingSelector() + self.dironly = False + + def select_from(self, parent_path): + """Iterate over all child paths of `parent_path` matched by this + selector. This can contain parent_path itself.""" + path_cls = type(parent_path) + is_dir = path_cls.is_dir + exists = path_cls.exists + scandir = parent_path._accessor.scandir + if not is_dir(parent_path): + return iter([]) + return self._select_from(parent_path, is_dir, exists, scandir) + + +class _TerminatingSelector: + + def _select_from(self, parent_path, is_dir, exists, scandir): + yield parent_path + + +class _PreciseSelector(_Selector): + + def __init__(self, name, child_parts, flavour): + self.name = name + _Selector.__init__(self, child_parts, flavour) + + def _select_from(self, parent_path, is_dir, exists, scandir): + try: + path = parent_path._make_child_relpath(self.name) + if (is_dir if self.dironly else exists)(path): + for p in self.successor._select_from(path, is_dir, exists, scandir): + yield p + except PermissionError: + return + + +class _WildcardSelector(_Selector): + + def __init__(self, pat, child_parts, flavour): + self.match = flavour.compile_pattern(pat) + _Selector.__init__(self, child_parts, flavour) + + def _select_from(self, parent_path, is_dir, exists, scandir): + try: + with scandir(parent_path) as scandir_it: + entries = list(scandir_it) + for entry in entries: + if self.dironly: + try: + # "entry.is_dir()" can raise PermissionError + # in some cases (see bpo-38894), which is not + # among the errors ignored by _ignore_error() + if not entry.is_dir(): + continue + except OSError as e: + if not _ignore_error(e): + raise + continue + name = entry.name + if self.match(name): + path = parent_path._make_child_relpath(name) + for p in self.successor._select_from(path, is_dir, exists, scandir): + yield p + except PermissionError: + return + + +class _RecursiveWildcardSelector(_Selector): + + def __init__(self, pat, child_parts, flavour): + _Selector.__init__(self, child_parts, flavour) + + def _iterate_directories(self, parent_path, is_dir, scandir): + yield parent_path + try: + with scandir(parent_path) as scandir_it: + entries = list(scandir_it) + for entry in entries: + entry_is_dir = False + try: + entry_is_dir = entry.is_dir() + except OSError as e: + if not _ignore_error(e): + raise + if entry_is_dir and not entry.is_symlink(): + path = parent_path._make_child_relpath(entry.name) + for p in self._iterate_directories(path, is_dir, scandir): + yield p + except PermissionError: + return + + def _select_from(self, parent_path, is_dir, exists, scandir): + try: + yielded = set() + try: + successor_select = self.successor._select_from + for starting_point in self._iterate_directories(parent_path, is_dir, scandir): + for p in successor_select(starting_point, is_dir, exists, scandir): + if p not in yielded: + yield p + yielded.add(p) + finally: + yielded.clear() + except PermissionError: + return + + +# +# Public API +# + +class _PathParents(Sequence): + """This object provides sequence-like access to the logical ancestors + of a path. Don't try to construct it yourself.""" + __slots__ = ('_pathcls', '_drv', '_root', '_parts') + + def __init__(self, path): + # We don't store the instance to avoid reference cycles + self._pathcls = type(path) + self._drv = path._drv + self._root = path._root + self._parts = path._parts + + def __len__(self): + if self._drv or self._root: + return len(self._parts) - 1 + else: + return len(self._parts) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return tuple(self[i] for i in range(*idx.indices(len(self)))) + + if idx >= len(self) or idx < -len(self): + raise IndexError(idx) + if idx < 0: + idx += len(self) + return self._pathcls._from_parsed_parts(self._drv, self._root, + self._parts[:-idx - 1]) + + def __repr__(self): + return "<{}.parents>".format(self._pathcls.__name__) + + +class PurePath(object): + """Base class for manipulating paths without I/O. + + PurePath represents a filesystem path and offers operations which + don't imply any actual filesystem I/O. Depending on your system, + instantiating a PurePath will return either a PurePosixPath or a + PureWindowsPath object. You can also instantiate either of these classes + directly, regardless of your system. + """ + __slots__ = ( + '_drv', '_root', '_parts', + '_str', '_hash', '_pparts', '_cached_cparts', + ) + + def __new__(cls, *args): + """Construct a PurePath from one or several strings and or existing + PurePath objects. The strings and path objects are combined so as + to yield a canonicalized path, which is incorporated into the + new PurePath object. + """ + if cls is PurePath: + cls = PureWindowsPath if os.name == 'nt' else PurePosixPath + return cls._from_parts(args) + + def __reduce__(self): + # Using the parts tuple helps share interned path parts + # when pickling related paths. + return (self.__class__, tuple(self._parts)) + + @classmethod + def _parse_args(cls, args): + # This is useful when you don't want to create an instance, just + # canonicalize some constructor arguments. + parts = [] + for a in args: + if isinstance(a, PurePath): + parts += a._parts + else: + a = os.fspath(a) + if isinstance(a, str): + # Force-cast str subclasses to str (issue #21127) + parts.append(str(a)) + else: + raise TypeError( + "argument should be a str object or an os.PathLike " + "object returning str, not %r" + % type(a)) + return cls._flavour.parse_parts(parts) + + @classmethod + def _from_parts(cls, args): + # We need to call _parse_args on the instance, so as to get the + # right flavour. + self = object.__new__(cls) + drv, root, parts = self._parse_args(args) + self._drv = drv + self._root = root + self._parts = parts + return self + + @classmethod + def _from_parsed_parts(cls, drv, root, parts): + self = object.__new__(cls) + self._drv = drv + self._root = root + self._parts = parts + return self + + @classmethod + def _format_parsed_parts(cls, drv, root, parts): + if drv or root: + return drv + root + cls._flavour.join(parts[1:]) + else: + return cls._flavour.join(parts) + + def _make_child(self, args): + drv, root, parts = self._parse_args(args) + drv, root, parts = self._flavour.join_parsed_parts( + self._drv, self._root, self._parts, drv, root, parts) + return self._from_parsed_parts(drv, root, parts) + + def __str__(self): + """Return the string representation of the path, suitable for + passing to system calls.""" + try: + return self._str + except AttributeError: + self._str = self._format_parsed_parts(self._drv, self._root, + self._parts) or '.' + return self._str + + def __fspath__(self): + return str(self) + + def as_posix(self): + """Return the string representation of the path with forward (/) + slashes.""" + f = self._flavour + return str(self).replace(f.sep, '/') + + def __bytes__(self): + """Return the bytes representation of the path. This is only + recommended to use under Unix.""" + return os.fsencode(self) + + def __repr__(self): + return "{}({!r})".format(self.__class__.__name__, self.as_posix()) + + def as_uri(self): + """Return the path as a 'file' URI.""" + if not self.is_absolute(): + raise ValueError("relative path can't be expressed as a file URI") + return self._flavour.make_uri(self) + + @property + def _cparts(self): + # Cached casefolded parts, for hashing and comparison + try: + return self._cached_cparts + except AttributeError: + self._cached_cparts = self._flavour.casefold_parts(self._parts) + return self._cached_cparts + + def __eq__(self, other): + if not isinstance(other, PurePath): + return NotImplemented + return self._cparts == other._cparts and self._flavour is other._flavour + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(tuple(self._cparts)) + return self._hash + + def __lt__(self, other): + if not isinstance(other, PurePath) or self._flavour is not other._flavour: + return NotImplemented + return self._cparts < other._cparts + + def __le__(self, other): + if not isinstance(other, PurePath) or self._flavour is not other._flavour: + return NotImplemented + return self._cparts <= other._cparts + + def __gt__(self, other): + if not isinstance(other, PurePath) or self._flavour is not other._flavour: + return NotImplemented + return self._cparts > other._cparts + + def __ge__(self, other): + if not isinstance(other, PurePath) or self._flavour is not other._flavour: + return NotImplemented + return self._cparts >= other._cparts + + def __class_getitem__(cls, type): + return cls + + drive = property(attrgetter('_drv'), + doc="""The drive prefix (letter or UNC path), if any.""") + + root = property(attrgetter('_root'), + doc="""The root of the path, if any.""") + + @property + def anchor(self): + """The concatenation of the drive and root, or ''.""" + anchor = self._drv + self._root + return anchor + + @property + def name(self): + """The final path component, if any.""" + parts = self._parts + if len(parts) == (1 if (self._drv or self._root) else 0): + return '' + return parts[-1] + + @property + def suffix(self): + """ + The final component's last suffix, if any. + + This includes the leading period. For example: '.txt' + """ + name = self.name + i = name.rfind('.') + if 0 < i < len(name) - 1: + return name[i:] + else: + return '' + + @property + def suffixes(self): + """ + A list of the final component's suffixes, if any. + + These include the leading periods. For example: ['.tar', '.gz'] + """ + name = self.name + if name.endswith('.'): + return [] + name = name.lstrip('.') + return ['.' + suffix for suffix in name.split('.')[1:]] + + @property + def stem(self): + """The final path component, minus its last suffix.""" + name = self.name + i = name.rfind('.') + if 0 < i < len(name) - 1: + return name[:i] + else: + return name + + def with_name(self, name): + """Return a new path with the file name changed.""" + if not self.name: + raise ValueError("%r has an empty name" % (self,)) + drv, root, parts = self._flavour.parse_parts((name,)) + if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep] + or drv or root or len(parts) != 1): + raise ValueError("Invalid name %r" % (name)) + return self._from_parsed_parts(self._drv, self._root, + self._parts[:-1] + [name]) + + def with_stem(self, stem): + """Return a new path with the stem changed.""" + return self.with_name(stem + self.suffix) + + def with_suffix(self, suffix): + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. + """ + f = self._flavour + if f.sep in suffix or f.altsep and f.altsep in suffix: + raise ValueError("Invalid suffix %r" % (suffix,)) + if suffix and not suffix.startswith('.') or suffix == '.': + raise ValueError("Invalid suffix %r" % (suffix)) + name = self.name + if not name: + raise ValueError("%r has an empty name" % (self,)) + old_suffix = self.suffix + if not old_suffix: + name = name + suffix + else: + name = name[:-len(old_suffix)] + suffix + return self._from_parsed_parts(self._drv, self._root, + self._parts[:-1] + [name]) + + def relative_to(self, *other): + """Return the relative path to another path identified by the passed + arguments. If the operation is not possible (because this is not + a subpath of the other path), raise ValueError. + """ + # For the purpose of this method, drive and root are considered + # separate parts, i.e.: + # Path('c:/').relative_to('c:') gives Path('/') + # Path('c:/').relative_to('/') raise ValueError + if not other: + raise TypeError("need at least one argument") + parts = self._parts + drv = self._drv + root = self._root + if root: + abs_parts = [drv, root] + parts[1:] + else: + abs_parts = parts + to_drv, to_root, to_parts = self._parse_args(other) + if to_root: + to_abs_parts = [to_drv, to_root] + to_parts[1:] + else: + to_abs_parts = to_parts + n = len(to_abs_parts) + cf = self._flavour.casefold_parts + if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts): + formatted = self._format_parsed_parts(to_drv, to_root, to_parts) + raise ValueError("{!r} is not in the subpath of {!r}" + " OR one path is relative and the other is absolute." + .format(str(self), str(formatted))) + return self._from_parsed_parts('', root if n == 1 else '', + abs_parts[n:]) + + def is_relative_to(self, *other): + """Return True if the path is relative to another path or False. + """ + try: + self.relative_to(*other) + return True + except ValueError: + return False + + @property + def parts(self): + """An object providing sequence-like access to the + components in the filesystem path.""" + # We cache the tuple to avoid building a new one each time .parts + # is accessed. XXX is this necessary? + try: + return self._pparts + except AttributeError: + self._pparts = tuple(self._parts) + return self._pparts + + def joinpath(self, *args): + """Combine this path with one or several arguments, and return a + new path representing either a subpath (if all arguments are relative + paths) or a totally different path (if one of the arguments is + anchored). + """ + return self._make_child(args) + + def __truediv__(self, key): + try: + return self._make_child((key,)) + except TypeError: + return NotImplemented + + def __rtruediv__(self, key): + try: + return self._from_parts([key] + self._parts) + except TypeError: + return NotImplemented + + @property + def parent(self): + """The logical parent of the path.""" + drv = self._drv + root = self._root + parts = self._parts + if len(parts) == 1 and (drv or root): + return self + return self._from_parsed_parts(drv, root, parts[:-1]) + + @property + def parents(self): + """A sequence of this path's logical parents.""" + return _PathParents(self) + + def is_absolute(self): + """True if the path is absolute (has both a root and, if applicable, + a drive).""" + if not self._root: + return False + return not self._flavour.has_drv or bool(self._drv) + + def is_reserved(self): + """Return True if the path contains one of the special names reserved + by the system, if any.""" + return self._flavour.is_reserved(self._parts) + + def match(self, path_pattern): + """ + Return True if this path matches the given pattern. + """ + cf = self._flavour.casefold + path_pattern = cf(path_pattern) + drv, root, pat_parts = self._flavour.parse_parts((path_pattern,)) + if not pat_parts: + raise ValueError("empty pattern") + if drv and drv != cf(self._drv): + return False + if root and root != cf(self._root): + return False + parts = self._cparts + if drv or root: + if len(pat_parts) != len(parts): + return False + pat_parts = pat_parts[1:] + elif len(pat_parts) > len(parts): + return False + for part, pat in zip(reversed(parts), reversed(pat_parts)): + if not fnmatch.fnmatchcase(part, pat): + return False + return True + +# Can't subclass os.PathLike from PurePath and keep the constructor +# optimizations in PurePath._parse_args(). +os.PathLike.register(PurePath) + + +class PurePosixPath(PurePath): + """PurePath subclass for non-Windows systems. + + On a POSIX system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + _flavour = _posix_flavour + __slots__ = () + + +class PureWindowsPath(PurePath): + """PurePath subclass for Windows systems. + + On a Windows system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + _flavour = _windows_flavour + __slots__ = () + + +# Filesystem-accessing classes + + +class Path(PurePath): + """PurePath subclass that can make system calls. + + Path represents a filesystem path but unlike PurePath, also offers + methods to do system calls on path objects. Depending on your system, + instantiating a Path will return either a PosixPath or a WindowsPath + object. You can also instantiate a PosixPath or WindowsPath directly, + but cannot instantiate a WindowsPath on a POSIX system or vice versa. + """ + _accessor = _normal_accessor + __slots__ = () + + def __new__(cls, *args, **kwargs): + if cls is Path: + cls = WindowsPath if os.name == 'nt' else PosixPath + self = cls._from_parts(args) + if not self._flavour.is_supported: + raise NotImplementedError("cannot instantiate %r on your system" + % (cls.__name__,)) + return self + + def _make_child_relpath(self, part): + # This is an optimization used for dir walking. `part` must be + # a single part relative to this path. + parts = self._parts + [part] + return self._from_parsed_parts(self._drv, self._root, parts) + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + # https://bugs.python.org/issue39682 + # In previous versions of pathlib, this method marked this path as + # closed; subsequent attempts to perform I/O would raise an IOError. + # This functionality was never documented, and had the effect of + # making Path objects mutable, contrary to PEP 428. In Python 3.9 the + # _closed attribute was removed, and this method made a no-op. + # This method and __enter__()/__exit__() should be deprecated and + # removed in the future. + pass + + # Public API + + @classmethod + def cwd(cls): + """Return a new path pointing to the current working directory + (as returned by os.getcwd()). + """ + return cls(cls._accessor.getcwd()) + + @classmethod + def home(cls): + """Return a new path pointing to the user's home directory (as + returned by os.path.expanduser('~')). + """ + return cls("~").expanduser() + + def samefile(self, other_path): + """Return whether other_path is the same or not as this file + (as returned by os.path.samefile()). + """ + st = self.stat() + try: + other_st = other_path.stat() + except AttributeError: + other_st = self._accessor.stat(other_path) + return os.path.samestat(st, other_st) + + def iterdir(self): + """Iterate over the files in this directory. Does not yield any + result for the special paths '.' and '..'. + """ + for name in self._accessor.listdir(self): + if name in {'.', '..'}: + # Yielding a path object for these makes little sense + continue + yield self._make_child_relpath(name) + + def glob(self, pattern): + """Iterate over this subtree and yield all existing files (of any + kind, including directories) matching the given relative pattern. + """ + sys.audit("pathlib.Path.glob", self, pattern) + if not pattern: + raise ValueError("Unacceptable pattern: {!r}".format(pattern)) + drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) + if drv or root: + raise NotImplementedError("Non-relative patterns are unsupported") + selector = _make_selector(tuple(pattern_parts), self._flavour) + for p in selector.select_from(self): + yield p + + def rglob(self, pattern): + """Recursively yield all existing files (of any kind, including + directories) matching the given relative pattern, anywhere in + this subtree. + """ + sys.audit("pathlib.Path.rglob", self, pattern) + drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) + if drv or root: + raise NotImplementedError("Non-relative patterns are unsupported") + selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour) + for p in selector.select_from(self): + yield p + + def absolute(self): + """Return an absolute version of this path. This function works + even if the path doesn't point to anything. + + No normalization is done, i.e. all '.' and '..' will be kept along. + Use resolve() to get the canonical path to a file. + """ + # XXX untested yet! + if self.is_absolute(): + return self + # FIXME this must defer to the specific flavour (and, under Windows, + # use nt._getfullpathname()) + return self._from_parts([self._accessor.getcwd()] + self._parts) + + def resolve(self, strict=False): + """ + Make the path absolute, resolving all symlinks on the way and also + normalizing it (for example turning slashes into backslashes under + Windows). + """ + + def check_eloop(e): + winerror = getattr(e, 'winerror', 0) + if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME: + raise RuntimeError("Symlink loop from %r" % e.filename) + + try: + s = self._accessor.realpath(self, strict=strict) + except OSError as e: + check_eloop(e) + raise + p = self._from_parts((s,)) + + # In non-strict mode, realpath() doesn't raise on symlink loops. + # Ensure we get an exception by calling stat() + if not strict: + try: + p.stat() + except OSError as e: + check_eloop(e) + return p + + def stat(self, *, follow_symlinks=True): + """ + Return the result of the stat() system call on this path, like + os.stat() does. + """ + return self._accessor.stat(self, follow_symlinks=follow_symlinks) + + def owner(self): + """ + Return the login name of the file owner. + """ + return self._accessor.owner(self) + + def group(self): + """ + Return the group name of the file gid. + """ + return self._accessor.group(self) + + def open(self, mode='r', buffering=-1, encoding=None, + errors=None, newline=None): + """ + Open the file pointed by this path and return a file object, as + the built-in open() function does. + """ + if "b" not in mode: + encoding = io.text_encoding(encoding) + return self._accessor.open(self, mode, buffering, encoding, errors, + newline) + + def read_bytes(self): + """ + Open the file in bytes mode, read it, and close the file. + """ + with self.open(mode='rb') as f: + return f.read() + + def read_text(self, encoding=None, errors=None): + """ + Open the file in text mode, read it, and close the file. + """ + encoding = io.text_encoding(encoding) + with self.open(mode='r', encoding=encoding, errors=errors) as f: + return f.read() + + def write_bytes(self, data): + """ + Open the file in bytes mode, write to it, and close the file. + """ + # type-check for the buffer interface before truncating the file + view = memoryview(data) + with self.open(mode='wb') as f: + return f.write(view) + + def write_text(self, data, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, write to it, and close the file. + """ + if not isinstance(data, str): + raise TypeError('data must be str, not %s' % + data.__class__.__name__) + encoding = io.text_encoding(encoding) + with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: + return f.write(data) + + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + path = self._accessor.readlink(self) + return self._from_parts((path,)) + + def touch(self, mode=0o666, exist_ok=True): + """ + Create this file with the given access mode, if it doesn't exist. + """ + self._accessor.touch(self, mode, exist_ok) + + def mkdir(self, mode=0o777, parents=False, exist_ok=False): + """ + Create a new directory at this given path. + """ + try: + self._accessor.mkdir(self, mode) + except FileNotFoundError: + if not parents or self.parent == self: + raise + self.parent.mkdir(parents=True, exist_ok=True) + self.mkdir(mode, parents=False, exist_ok=exist_ok) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not self.is_dir(): + raise + + def chmod(self, mode, *, follow_symlinks=True): + """ + Change the permissions of the path, like os.chmod(). + """ + self._accessor.chmod(self, mode, follow_symlinks=follow_symlinks) + + def lchmod(self, mode): + """ + Like chmod(), except if the path points to a symlink, the symlink's + permissions are changed, rather than its target's. + """ + self.chmod(mode, follow_symlinks=False) + + def unlink(self, missing_ok=False): + """ + Remove this file or link. + If the path is a directory, use rmdir() instead. + """ + try: + self._accessor.unlink(self) + except FileNotFoundError: + if not missing_ok: + raise + + def rmdir(self): + """ + Remove this directory. The directory must be empty. + """ + self._accessor.rmdir(self) + + def lstat(self): + """ + Like stat(), except if the path points to a symlink, the symlink's + status information is returned, rather than its target's. + """ + return self.stat(follow_symlinks=False) + + def rename(self, target): + """ + Rename this path to the target path. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + self._accessor.rename(self, target) + return self.__class__(target) + + def replace(self, target): + """ + Rename this path to the target path, overwriting if that path exists. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + self._accessor.replace(self, target) + return self.__class__(target) + + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + self._accessor.symlink(target, self, target_is_directory) + + def hardlink_to(self, target): + """ + Make this path a hard link pointing to the same file as *target*. + + Note the order of arguments (self, target) is the reverse of os.link's. + """ + self._accessor.link(target, self) + + def link_to(self, target): + """ + Make the target path a hard link pointing to this path. + + Note this function does not make this path a hard link to *target*, + despite the implication of the function and argument names. The order + of arguments (target, link) is the reverse of Path.symlink_to, but + matches that of os.link. + + Deprecated since Python 3.10 and scheduled for removal in Python 3.12. + Use `hardlink_to()` instead. + """ + warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled " + "for removal in Python 3.12. " + "Use pathlib.Path.hardlink_to() instead.", + DeprecationWarning, stacklevel=2) + self._accessor.link(self, target) + + # Convenience functions for querying the stat results + + def exists(self): + """ + Whether this path exists. + """ + try: + self.stat() + except OSError as e: + if not _ignore_error(e): + raise + return False + except ValueError: + # Non-encodable path + return False + return True + + def is_dir(self): + """ + Whether this path is a directory. + """ + try: + return S_ISDIR(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def is_file(self): + """ + Whether this path is a regular file (also True for symlinks pointing + to regular files). + """ + try: + return S_ISREG(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def is_mount(self): + """ + Check if this path is a POSIX mount point + """ + # Need to exist and be a dir + if not self.exists() or not self.is_dir(): + return False + + try: + parent_dev = self.parent.stat().st_dev + except OSError: + return False + + dev = self.stat().st_dev + if dev != parent_dev: + return True + ino = self.stat().st_ino + parent_ino = self.parent.stat().st_ino + return ino == parent_ino + + def is_symlink(self): + """ + Whether this path is a symbolic link. + """ + try: + return S_ISLNK(self.lstat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist + return False + except ValueError: + # Non-encodable path + return False + + def is_block_device(self): + """ + Whether this path is a block device. + """ + try: + return S_ISBLK(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def is_char_device(self): + """ + Whether this path is a character device. + """ + try: + return S_ISCHR(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def is_fifo(self): + """ + Whether this path is a FIFO. + """ + try: + return S_ISFIFO(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def is_socket(self): + """ + Whether this path is a socket. + """ + try: + return S_ISSOCK(self.stat().st_mode) + except OSError as e: + if not _ignore_error(e): + raise + # Path doesn't exist or is a broken symlink + # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) + return False + except ValueError: + # Non-encodable path + return False + + def expanduser(self): + """ Return a new path with expanded ~ and ~user constructs + (as returned by os.path.expanduser) + """ + if (not (self._drv or self._root) and + self._parts and self._parts[0][:1] == '~'): + homedir = self._accessor.expanduser(self._parts[0]) + if homedir[:1] == "~": + raise RuntimeError("Could not determine home directory.") + return self._from_parts([homedir] + self._parts[1:]) + + return self + + +class PosixPath(Path, PurePosixPath): + """Path subclass for non-Windows systems. + + On a POSIX system, instantiating a Path should return this object. + """ + __slots__ = () + +class WindowsPath(Path, PureWindowsPath): + """Path subclass for Windows systems. + + On a Windows system, instantiating a Path should return this object. + """ + __slots__ = () + + def is_mount(self): + raise NotImplementedError("Path.is_mount() is unsupported on this system") diff --git a/pllava/lib/python3.10/pickle.py b/pllava/lib/python3.10/pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..f027e0432045b762f9661a90d380ebb9f8c1d8d8 --- /dev/null +++ b/pllava/lib/python3.10/pickle.py @@ -0,0 +1,1820 @@ +"""Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(bytes) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +""" + +from types import FunctionType +from copyreg import dispatch_table +from copyreg import _extension_registry, _inverted_registry, _extension_cache +from itertools import islice +from functools import partial +import sys +from sys import maxsize +from struct import pack, unpack +import re +import io +import codecs +import _compat_pickle + +__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler", + "Unpickler", "dump", "dumps", "load", "loads"] + +try: + from _pickle import PickleBuffer + __all__.append("PickleBuffer") + _HAVE_PICKLE_BUFFER = True +except ImportError: + _HAVE_PICKLE_BUFFER = False + + +# Shortcut for use in isinstance testing +bytes_types = (bytes, bytearray) + +# These are purely informational; no code uses these. +format_version = "4.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + "3.0", # Protocol 3 + "4.0", # Protocol 4 + "5.0", # Protocol 5 + ] # Old format versions we can read + +# This is the highest protocol number we know how to read. +HIGHEST_PROTOCOL = 5 + +# The protocol we write by default. May be less than HIGHEST_PROTOCOL. +# Only bump this if the oldest still supported version of Python already +# includes it. +DEFAULT_PROTOCOL = 4 + +class PickleError(Exception): + """A common base class for the other pickling exceptions.""" + pass + +class PicklingError(PickleError): + """This exception is raised when an unpicklable object is passed to the + dump() method. + + """ + pass + +class UnpicklingError(PickleError): + """This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + """ + pass + +# An instance of _Stop is raised by Unpickler.load_stop() in response to +# the STOP opcode, passing the object that is the result of unpickling. +class _Stop(Exception): + def __init__(self, value): + self.value = value + +# Jython has PyStringMap; it's a dict subclass with string keys +try: + from org.python.core import PyStringMap +except ImportError: + PyStringMap = None + +# Pickle opcodes. See pickletools.py for extensive docs. The listing +# here is in kind-of alphabetical order of 1-character pickle code. +# pickletools groups them by purpose. + +MARK = b'(' # push special markobject on stack +STOP = b'.' # every pickle ends with STOP +POP = b'0' # discard topmost stack item +POP_MARK = b'1' # discard stack top through topmost markobject +DUP = b'2' # duplicate top stack item +FLOAT = b'F' # push float object; decimal string argument +INT = b'I' # push integer or bool; decimal string argument +BININT = b'J' # push four-byte signed int +BININT1 = b'K' # push 1-byte unsigned int +LONG = b'L' # push long; decimal string argument +BININT2 = b'M' # push 2-byte unsigned int +NONE = b'N' # push None +PERSID = b'P' # push persistent object; id is taken from string arg +BINPERSID = b'Q' # " " " ; " " " " stack +REDUCE = b'R' # apply callable to argtuple, both on stack +STRING = b'S' # push string; NL-terminated string argument +BINSTRING = b'T' # push string; counted binary string argument +SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes +UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = b'X' # " " " ; counted UTF-8 string argument +APPEND = b'a' # append stack top to list below it +BUILD = b'b' # call __setstate__ or __dict__.update() +GLOBAL = b'c' # push self.find_class(modname, name); 2 string args +DICT = b'd' # build a dict from stack items +EMPTY_DICT = b'}' # push empty dict +APPENDS = b'e' # extend list on stack by topmost stack slice +GET = b'g' # push item from memo on stack; index is string arg +BINGET = b'h' # " " " " " " ; " " 1-byte arg +INST = b'i' # build & push class instance +LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg +LIST = b'l' # build list from topmost stack items +EMPTY_LIST = b']' # push empty list +OBJ = b'o' # build & push class instance +PUT = b'p' # store stack top in memo; index is string arg +BINPUT = b'q' # " " " " " ; " " 1-byte arg +LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg +SETITEM = b's' # add key+value pair to dict +TUPLE = b't' # build tuple from topmost stack items +EMPTY_TUPLE = b')' # push empty tuple +SETITEMS = b'u' # modify dict by adding topmost key+value pairs +BINFLOAT = b'G' # push float; arg is 8-byte float encoding + +TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = b'\x80' # identify pickle protocol +NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple +EXT1 = b'\x82' # push object from extension registry; 1-byte index +EXT2 = b'\x83' # ditto, but 2-byte index +EXT4 = b'\x84' # ditto, but 4-byte index +TUPLE1 = b'\x85' # build 1-tuple from stack top +TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items +TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items +NEWTRUE = b'\x88' # push True +NEWFALSE = b'\x89' # push False +LONG1 = b'\x8a' # push long from < 256 bytes +LONG4 = b'\x8b' # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + +# Protocol 3 (Python 3.x) + +BINBYTES = b'B' # push bytes; counted binary string argument +SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes + +# Protocol 4 + +SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes +BINUNICODE8 = b'\x8d' # push very long string +BINBYTES8 = b'\x8e' # push very long bytes string +EMPTY_SET = b'\x8f' # push empty set on the stack +ADDITEMS = b'\x90' # modify set by adding topmost stack items +FROZENSET = b'\x91' # build frozenset from topmost stack items +NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments +STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks +MEMOIZE = b'\x94' # store top of the stack in memo +FRAME = b'\x95' # indicate the beginning of a new frame + +# Protocol 5 + +BYTEARRAY8 = b'\x96' # push bytearray +NEXT_BUFFER = b'\x97' # push next out-of-band buffer +READONLY_BUFFER = b'\x98' # make top of stack readonly + +__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)]) + + +class _Framer: + + _FRAME_SIZE_MIN = 4 + _FRAME_SIZE_TARGET = 64 * 1024 + + def __init__(self, file_write): + self.file_write = file_write + self.current_frame = None + + def start_framing(self): + self.current_frame = io.BytesIO() + + def end_framing(self): + if self.current_frame and self.current_frame.tell() > 0: + self.commit_frame(force=True) + self.current_frame = None + + def commit_frame(self, force=False): + if self.current_frame: + f = self.current_frame + if f.tell() >= self._FRAME_SIZE_TARGET or force: + data = f.getbuffer() + write = self.file_write + if len(data) >= self._FRAME_SIZE_MIN: + # Issue a single call to the write method of the underlying + # file object for the frame opcode with the size of the + # frame. The concatenation is expected to be less expensive + # than issuing an additional call to write. + write(FRAME + pack("': + raise AttributeError("Can't get local attribute {!r} on {!r}" + .format(name, obj)) + try: + parent = obj + obj = getattr(obj, subpath) + except AttributeError: + raise AttributeError("Can't get attribute {!r} on {!r}" + .format(name, obj)) from None + return obj, parent + +def whichmodule(obj, name): + """Find the module an object belong to.""" + module_name = getattr(obj, '__module__', None) + if module_name is not None: + return module_name + # Protect the iteration by using a list copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for module_name, module in sys.modules.copy().items(): + if (module_name == '__main__' + or module_name == '__mp_main__' # bpo-42406 + or module is None): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except AttributeError: + pass + return '__main__' + +def encode_long(x): + r"""Encode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + """ + if x == 0: + return b'' + nbytes = (x.bit_length() >> 3) + 1 + result = x.to_bytes(nbytes, byteorder='little', signed=True) + if x < 0 and nbytes > 1: + if result[-1] == 0xff and (result[-2] & 0x80) != 0: + result = result[:-1] + return result + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + """ + return int.from_bytes(data, byteorder='little', signed=True) + + +# Pickling machinery + +class _Pickler: + + def __init__(self, file, protocol=None, *, fix_imports=True, + buffer_callback=None): + """This takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 4. It was introduced in Python 3.4, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + """ + if protocol is None: + protocol = DEFAULT_PROTOCOL + if protocol < 0: + protocol = HIGHEST_PROTOCOL + elif not 0 <= protocol <= HIGHEST_PROTOCOL: + raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL) + if buffer_callback is not None and protocol < 5: + raise ValueError("buffer_callback needs protocol >= 5") + self._buffer_callback = buffer_callback + try: + self._file_write = file.write + except AttributeError: + raise TypeError("file must have a 'write' attribute") + self.framer = _Framer(self._file_write) + self.write = self.framer.write + self._write_large_bytes = self.framer.write_large_bytes + self.memo = {} + self.proto = int(protocol) + self.bin = protocol >= 1 + self.fast = 0 + self.fix_imports = fix_imports and protocol < 3 + + def clear_memo(self): + """Clears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + """ + self.memo.clear() + + def dump(self, obj): + """Write a pickled representation of obj to the open file.""" + # Check whether Pickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Pickler.dump(). + if not hasattr(self, "_file_write"): + raise PicklingError("Pickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + if self.proto >= 2: + self.write(PROTO + pack("= 4: + self.framer.start_framing() + self.save(obj) + self.write(STOP) + self.framer.end_framing() + + def memoize(self, obj): + """Store an object in the memo.""" + + # The Pickler memo is a dictionary mapping object ids to 2-tuples + # that contain the Unpickler memo key and the object being memoized. + # The memo key is written to the pickle and will become + # the key in the Unpickler's memo. The object is stored in the + # Pickler memo so that transient objects are kept alive during + # pickling. + + # The use of the Unpickler memo length as the memo key is just a + # convention. The only requirement is that the memo values be unique. + # But there appears no advantage to any other scheme, and this + # scheme allows the Unpickler memo to be implemented as a plain (but + # growable) array, indexed by memo key. + if self.fast: + return + assert id(obj) not in self.memo + idx = len(self.memo) + self.write(self.put(idx)) + self.memo[id(obj)] = idx, obj + + # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i. + def put(self, idx): + if self.proto >= 4: + return MEMOIZE + elif self.bin: + if idx < 256: + return BINPUT + pack("= 2 and func_name == "__newobj_ex__": + cls, args, kwargs = args + if not hasattr(cls, "__new__"): + raise PicklingError("args[0] from {} args has no __new__" + .format(func_name)) + if obj is not None and cls is not obj.__class__: + raise PicklingError("args[0] from {} args has the wrong class" + .format(func_name)) + if self.proto >= 4: + save(cls) + save(args) + save(kwargs) + write(NEWOBJ_EX) + else: + func = partial(cls.__new__, cls, *args, **kwargs) + save(func) + save(()) + write(REDUCE) + elif self.proto >= 2 and func_name == "__newobj__": + # A __reduce__ implementation can direct protocol 2 or newer to + # use the more efficient NEWOBJ opcode, while still + # allowing protocol 0 and 1 to work normally. For this to + # work, the function returned by __reduce__ should be + # called __newobj__, and its first argument should be a + # class. The implementation for __newobj__ + # should be as follows, although pickle has no way to + # verify this: + # + # def __newobj__(cls, *args): + # return cls.__new__(cls, *args) + # + # Protocols 0 and 1 will pickle a reference to __newobj__, + # while protocol 2 (and above) will pickle a reference to + # cls, the remaining args tuple, and the NEWOBJ code, + # which calls cls.__new__(cls, *args) at unpickling time + # (see load_newobj below). If __reduce__ returns a + # three-tuple, the state from the third tuple item will be + # pickled regardless of the protocol, calling __setstate__ + # at unpickling time (see load_build below). + # + # Note that no standard __newobj__ implementation exists; + # you have to provide your own. This is to enforce + # compatibility with Python 2.2 (pickles written using + # protocol 0 or 1 in Python 2.3 should be unpicklable by + # Python 2.2). + cls = args[0] + if not hasattr(cls, "__new__"): + raise PicklingError( + "args[0] from __newobj__ args has no __new__") + if obj is not None and cls is not obj.__class__: + raise PicklingError( + "args[0] from __newobj__ args has the wrong class") + args = args[1:] + save(cls) + save(args) + write(NEWOBJ) + else: + save(func) + save(args) + write(REDUCE) + + if obj is not None: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + if id(obj) in self.memo: + write(POP + self.get(self.memo[id(obj)][0])) + else: + self.memoize(obj) + + # More new special cases (that work with older protocols as + # well): when __reduce__ returns a tuple with 4 or 5 items, + # the 4th and 5th item should be iterators that provide list + # items and dict items (as (key, value) tuples), or None. + + if listitems is not None: + self._batch_appends(listitems) + + if dictitems is not None: + self._batch_setitems(dictitems) + + if state is not None: + if state_setter is None: + save(state) + write(BUILD) + else: + # If a state_setter is specified, call it instead of load_build + # to update obj's with its previous state. + # First, push state_setter and its tuple of expected arguments + # (obj, state) onto the stack. + save(state_setter) + save(obj) # simple BINGET opcode as obj is already memoized. + save(state) + write(TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(POP) + + # Methods below this point are dispatched through the dispatch table + + dispatch = {} + + def save_none(self, obj): + self.write(NONE) + dispatch[type(None)] = save_none + + def save_bool(self, obj): + if self.proto >= 2: + self.write(NEWTRUE if obj else NEWFALSE) + else: + self.write(TRUE if obj else FALSE) + dispatch[bool] = save_bool + + def save_long(self, obj): + if self.bin: + # If the int is small enough to fit in a signed 4-byte 2's-comp + # format, we can store it more efficiently than the general + # case. + # First one- and two-byte unsigned ints: + if obj >= 0: + if obj <= 0xff: + self.write(BININT1 + pack("= 2: + encoded = encode_long(obj) + n = len(encoded) + if n < 256: + self.write(LONG1 + pack("d', obj)) + else: + self.write(FLOAT + repr(obj).encode("ascii") + b'\n') + dispatch[float] = save_float + + def save_bytes(self, obj): + if self.proto < 3: + if not obj: # bytes object is empty + self.save_reduce(bytes, (), obj=obj) + else: + self.save_reduce(codecs.encode, + (str(obj, 'latin1'), 'latin1'), obj=obj) + return + n = len(obj) + if n <= 0xff: + self.write(SHORT_BINBYTES + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINBYTES8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINBYTES + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BYTEARRAY8 + pack("= 5") + with obj.raw() as m: + if not m.contiguous: + raise PicklingError("PickleBuffer can not be pickled when " + "pointing to a non-contiguous buffer") + in_band = True + if self._buffer_callback is not None: + in_band = bool(self._buffer_callback(obj)) + if in_band: + # Write data in-band + # XXX The C implementation avoids a copy here + if m.readonly: + self.save_bytes(m.tobytes()) + else: + self.save_bytearray(m.tobytes()) + else: + # Write data out-of-band + self.write(NEXT_BUFFER) + if m.readonly: + self.write(READONLY_BUFFER) + + dispatch[PickleBuffer] = save_picklebuffer + + def save_str(self, obj): + if self.bin: + encoded = obj.encode('utf-8', 'surrogatepass') + n = len(encoded) + if n <= 0xff and self.proto >= 4: + self.write(SHORT_BINUNICODE + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINUNICODE8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINUNICODE + pack("= 2: + for element in obj: + save(element) + # Subtle. Same as in the big comment below. + if id(obj) in memo: + get = self.get(memo[id(obj)][0]) + self.write(POP * n + get) + else: + self.write(_tuplesize2code[n]) + self.memoize(obj) + return + + # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple + # has more than 3 elements. + write = self.write + write(MARK) + for element in obj: + save(element) + + if id(obj) in memo: + # Subtle. d was not in memo when we entered save_tuple(), so + # the process of saving the tuple's elements must have saved + # the tuple itself: the tuple is recursive. The proper action + # now is to throw away everything we put on the stack, and + # simply GET the tuple (it's already constructed). This check + # could have been done in the "for element" loop instead, but + # recursive tuples are a rare thing. + get = self.get(memo[id(obj)][0]) + if self.bin: + write(POP_MARK + get) + else: # proto 0 -- POP_MARK not available + write(POP * (n+1) + get) + return + + # No recursion. + write(TUPLE) + self.memoize(obj) + + dispatch[tuple] = save_tuple + + def save_list(self, obj): + if self.bin: + self.write(EMPTY_LIST) + else: # proto 0 -- can't use EMPTY_LIST + self.write(MARK + LIST) + + self.memoize(obj) + self._batch_appends(obj) + + dispatch[list] = save_list + + _BATCHSIZE = 1000 + + def _batch_appends(self, items): + # Helper to batch up APPENDS sequences + save = self.save + write = self.write + + if not self.bin: + for x in items: + save(x) + write(APPEND) + return + + it = iter(items) + while True: + tmp = list(islice(it, self._BATCHSIZE)) + n = len(tmp) + if n > 1: + write(MARK) + for x in tmp: + save(x) + write(APPENDS) + elif n: + save(tmp[0]) + write(APPEND) + # else tmp is empty, and we're done + if n < self._BATCHSIZE: + return + + def save_dict(self, obj): + if self.bin: + self.write(EMPTY_DICT) + else: # proto 0 -- can't use EMPTY_DICT + self.write(MARK + DICT) + + self.memoize(obj) + self._batch_setitems(obj.items()) + + dispatch[dict] = save_dict + if PyStringMap is not None: + dispatch[PyStringMap] = save_dict + + def _batch_setitems(self, items): + # Helper to batch up SETITEMS sequences; proto >= 1 only + save = self.save + write = self.write + + if not self.bin: + for k, v in items: + save(k) + save(v) + write(SETITEM) + return + + it = iter(items) + while True: + tmp = list(islice(it, self._BATCHSIZE)) + n = len(tmp) + if n > 1: + write(MARK) + for k, v in tmp: + save(k) + save(v) + write(SETITEMS) + elif n: + k, v = tmp[0] + save(k) + save(v) + write(SETITEM) + # else tmp is empty, and we're done + if n < self._BATCHSIZE: + return + + def save_set(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(set, (list(obj),), obj=obj) + return + + write(EMPTY_SET) + self.memoize(obj) + + it = iter(obj) + while True: + batch = list(islice(it, self._BATCHSIZE)) + n = len(batch) + if n > 0: + write(MARK) + for item in batch: + save(item) + write(ADDITEMS) + if n < self._BATCHSIZE: + return + dispatch[set] = save_set + + def save_frozenset(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(frozenset, (list(obj),), obj=obj) + return + + write(MARK) + for item in obj: + save(item) + + if id(obj) in self.memo: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + write(POP_MARK + self.get(self.memo[id(obj)][0])) + return + + write(FROZENSET) + self.memoize(obj) + dispatch[frozenset] = save_frozenset + + def save_global(self, obj, name=None): + write = self.write + memo = self.memo + + if name is None: + name = getattr(obj, '__qualname__', None) + if name is None: + name = obj.__name__ + + module_name = whichmodule(obj, name) + try: + __import__(module_name, level=0) + module = sys.modules[module_name] + obj2, parent = _getattribute(module, name) + except (ImportError, KeyError, AttributeError): + raise PicklingError( + "Can't pickle %r: it's not found as %s.%s" % + (obj, module_name, name)) from None + else: + if obj2 is not obj: + raise PicklingError( + "Can't pickle %r: it's not the same object as %s.%s" % + (obj, module_name, name)) + + if self.proto >= 2: + code = _extension_registry.get((module_name, name)) + if code: + assert code > 0 + if code <= 0xff: + write(EXT1 + pack("= 3. + if self.proto >= 4: + self.save(module_name) + self.save(name) + write(STACK_GLOBAL) + elif parent is not module: + self.save_reduce(getattr, (parent, lastname)) + elif self.proto >= 3: + write(GLOBAL + bytes(module_name, "utf-8") + b'\n' + + bytes(name, "utf-8") + b'\n') + else: + if self.fix_imports: + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + elif module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + try: + write(GLOBAL + bytes(module_name, "ascii") + b'\n' + + bytes(name, "ascii") + b'\n') + except UnicodeEncodeError: + raise PicklingError( + "can't pickle global identifier '%s.%s' using " + "pickle protocol %i" % (module, name, self.proto)) from None + + self.memoize(obj) + + def save_type(self, obj): + if obj is type(None): + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj is type(...): + return self.save_reduce(type, (...,), obj=obj) + return self.save_global(obj) + + dispatch[FunctionType] = save_global + dispatch[type] = save_type + + +# Unpickling machinery + +class _Unpickler: + + def __init__(self, file, *, fix_imports=True, + encoding="ASCII", errors="strict", buffers=None): + """This takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read these 8-bit string instances as bytes objects. + """ + self._buffers = iter(buffers) if buffers is not None else None + self._file_readline = file.readline + self._file_read = file.read + self.memo = {} + self.encoding = encoding + self.errors = errors + self.proto = 0 + self.fix_imports = fix_imports + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + # Check whether Unpickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Unpickler.dump(). + if not hasattr(self, "_file_read"): + raise UnpicklingError("Unpickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + self._unframer = _Unframer(self._file_read, self._file_readline) + self.read = self._unframer.read + self.readinto = self._unframer.readinto + self.readline = self._unframer.readline + self.metastack = [] + self.stack = [] + self.append = self.stack.append + self.proto = 0 + read = self.read + dispatch = self.dispatch + try: + while True: + key = read(1) + if not key: + raise EOFError + assert isinstance(key, bytes_types) + dispatch[key[0]](self) + except _Stop as stopinst: + return stopinst.value + + # Return a list of items pushed in the stack after last MARK instruction. + def pop_mark(self): + items = self.stack + self.stack = self.metastack.pop() + self.append = self.stack.append + return items + + def persistent_load(self, pid): + raise UnpicklingError("unsupported persistent id encountered") + + dispatch = {} + + def load_proto(self): + proto = self.read(1)[0] + if not 0 <= proto <= HIGHEST_PROTOCOL: + raise ValueError("unsupported pickle protocol: %d" % proto) + self.proto = proto + dispatch[PROTO[0]] = load_proto + + def load_frame(self): + frame_size, = unpack(' sys.maxsize: + raise ValueError("frame size > sys.maxsize: %d" % frame_size) + self._unframer.load_frame(frame_size) + dispatch[FRAME[0]] = load_frame + + def load_persid(self): + try: + pid = self.readline()[:-1].decode("ascii") + except UnicodeDecodeError: + raise UnpicklingError( + "persistent IDs in protocol 0 must be ASCII strings") + self.append(self.persistent_load(pid)) + dispatch[PERSID[0]] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID[0]] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE[0]] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE[0]] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE[0]] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + val = int(data, 0) + self.append(val) + dispatch[INT[0]] = load_int + + def load_binint(self): + self.append(unpack('d', self.read(8))[0]) + dispatch[BINFLOAT[0]] = load_binfloat + + def _decode_string(self, value): + # Used to allow strings from Python 2 to be decoded either as + # bytes or Unicode strings. This should be used only with the + # STRING, BINSTRING and SHORT_BINSTRING opcodes. + if self.encoding == "bytes": + return value + else: + return value.decode(self.encoding, self.errors) + + def load_string(self): + data = self.readline()[:-1] + # Strip outermost quotes + if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'': + data = data[1:-1] + else: + raise UnpicklingError("the STRING opcode argument must be quoted") + self.append(self._decode_string(codecs.escape_decode(data)[0])) + dispatch[STRING[0]] = load_string + + def load_binstring(self): + # Deprecated BINSTRING uses signed 32-bit length + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES[0]] = load_binbytes + + def load_unicode(self): + self.append(str(self.readline()[:-1], 'raw-unicode-escape')) + dispatch[UNICODE[0]] = load_unicode + + def load_binunicode(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE[0]] = load_binunicode + + def load_binunicode8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE8[0]] = load_binunicode8 + + def load_binbytes8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES8[0]] = load_binbytes8 + + def load_bytearray8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " + "of %d bytes" % maxsize) + b = bytearray(len) + self.readinto(b) + self.append(b) + dispatch[BYTEARRAY8[0]] = load_bytearray8 + + def load_next_buffer(self): + if self._buffers is None: + raise UnpicklingError("pickle stream refers to out-of-band data " + "but no *buffers* argument was given") + try: + buf = next(self._buffers) + except StopIteration: + raise UnpicklingError("not enough out-of-band buffers") + self.append(buf) + dispatch[NEXT_BUFFER[0]] = load_next_buffer + + def load_readonly_buffer(self): + buf = self.stack[-1] + with memoryview(buf) as m: + if not m.readonly: + self.stack[-1] = m.toreadonly() + dispatch[READONLY_BUFFER[0]] = load_readonly_buffer + + def load_short_binstring(self): + len = self.read(1)[0] + data = self.read(len) + self.append(self._decode_string(data)) + dispatch[SHORT_BINSTRING[0]] = load_short_binstring + + def load_short_binbytes(self): + len = self.read(1)[0] + self.append(self.read(len)) + dispatch[SHORT_BINBYTES[0]] = load_short_binbytes + + def load_short_binunicode(self): + len = self.read(1)[0] + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode + + def load_tuple(self): + items = self.pop_mark() + self.append(tuple(items)) + dispatch[TUPLE[0]] = load_tuple + + def load_empty_tuple(self): + self.append(()) + dispatch[EMPTY_TUPLE[0]] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1[0]] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2[0]] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3[0]] = load_tuple3 + + def load_empty_list(self): + self.append([]) + dispatch[EMPTY_LIST[0]] = load_empty_list + + def load_empty_dictionary(self): + self.append({}) + dispatch[EMPTY_DICT[0]] = load_empty_dictionary + + def load_empty_set(self): + self.append(set()) + dispatch[EMPTY_SET[0]] = load_empty_set + + def load_frozenset(self): + items = self.pop_mark() + self.append(frozenset(items)) + dispatch[FROZENSET[0]] = load_frozenset + + def load_list(self): + items = self.pop_mark() + self.append(items) + dispatch[LIST[0]] = load_list + + def load_dict(self): + items = self.pop_mark() + d = {items[i]: items[i+1] + for i in range(0, len(items), 2)} + self.append(d) + dispatch[DICT[0]] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, args): + if (args or not isinstance(klass, type) or + hasattr(klass, "__getinitargs__")): + try: + value = klass(*args) + except TypeError as err: + raise TypeError("in constructor for %s: %s" % + (klass.__name__, str(err)), sys.exc_info()[2]) + else: + value = klass.__new__(klass) + self.append(value) + + def load_inst(self): + module = self.readline()[:-1].decode("ascii") + name = self.readline()[:-1].decode("ascii") + klass = self.find_class(module, name) + self._instantiate(klass, self.pop_mark()) + dispatch[INST[0]] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + args = self.pop_mark() + cls = args.pop(0) + self._instantiate(cls, args) + dispatch[OBJ[0]] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args) + self.append(obj) + dispatch[NEWOBJ[0]] = load_newobj + + def load_newobj_ex(self): + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + dispatch[NEWOBJ_EX[0]] = load_newobj_ex + + def load_global(self): + module = self.readline()[:-1].decode("utf-8") + name = self.readline()[:-1].decode("utf-8") + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL[0]] = load_global + + def load_stack_global(self): + name = self.stack.pop() + module = self.stack.pop() + if type(name) is not str or type(module) is not str: + raise UnpicklingError("STACK_GLOBAL requires str") + self.append(self.find_class(module, name)) + dispatch[STACK_GLOBAL[0]] = load_stack_global + + def load_ext1(self): + code = self.read(1)[0] + self.get_extension(code) + dispatch[EXT1[0]] = load_ext1 + + def load_ext2(self): + code, = unpack('= 4: + return _getattribute(sys.modules[module], name)[0] + else: + return getattr(sys.modules[module], name) + + def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + stack[-1] = func(*args) + dispatch[REDUCE[0]] = load_reduce + + def load_pop(self): + if self.stack: + del self.stack[-1] + else: + self.pop_mark() + dispatch[POP[0]] = load_pop + + def load_pop_mark(self): + self.pop_mark() + dispatch[POP_MARK[0]] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP[0]] = load_dup + + def load_get(self): + i = int(self.readline()[:-1]) + try: + self.append(self.memo[i]) + except KeyError: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[GET[0]] = load_get + + def load_binget(self): + i = self.read(1)[0] + try: + self.append(self.memo[i]) + except KeyError as exc: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[BINGET[0]] = load_binget + + def load_long_binget(self): + i, = unpack(' maxsize: + raise ValueError("negative LONG_BINPUT argument") + self.memo[i] = self.stack[-1] + dispatch[LONG_BINPUT[0]] = load_long_binput + + def load_memoize(self): + memo = self.memo + memo[len(memo)] = self.stack[-1] + dispatch[MEMOIZE[0]] = load_memoize + + def load_append(self): + stack = self.stack + value = stack.pop() + list = stack[-1] + list.append(value) + dispatch[APPEND[0]] = load_append + + def load_appends(self): + items = self.pop_mark() + list_obj = self.stack[-1] + try: + extend = list_obj.extend + except AttributeError: + pass + else: + extend(items) + return + # Even if the PEP 307 requires extend() and append() methods, + # fall back on append() if the object has no extend() method + # for backward compatibility. + append = list_obj.append + for item in items: + append(item) + dispatch[APPENDS[0]] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM[0]] = load_setitem + + def load_setitems(self): + items = self.pop_mark() + dict = self.stack[-1] + for i in range(0, len(items), 2): + dict[items[i]] = items[i + 1] + dispatch[SETITEMS[0]] = load_setitems + + def load_additems(self): + items = self.pop_mark() + set_obj = self.stack[-1] + if isinstance(set_obj, set): + set_obj.update(items) + else: + add = set_obj.add + for item in items: + add(item) + dispatch[ADDITEMS[0]] = load_additems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", None) + if setstate is not None: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + inst_dict = inst.__dict__ + intern = sys.intern + for k, v in state.items(): + if type(k) is str: + inst_dict[intern(k)] = v + else: + inst_dict[k] = v + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD[0]] = load_build + + def load_mark(self): + self.metastack.append(self.stack) + self.stack = [] + self.append = self.stack.append + dispatch[MARK[0]] = load_mark + + def load_stop(self): + value = self.stack.pop() + raise _Stop(value) + dispatch[STOP[0]] = load_stop + + +# Shorthands + +def _dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None): + _Pickler(file, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + +def _dumps(obj, protocol=None, *, fix_imports=True, buffer_callback=None): + f = io.BytesIO() + _Pickler(f, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + res = f.getvalue() + assert isinstance(res, bytes_types) + return res + +def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +def _loads(s, /, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + if isinstance(s, str): + raise TypeError("Can't load pickle from unicode string") + file = io.BytesIO(s) + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +# Use the faster _pickle if possible +try: + from _pickle import ( + PickleError, + PicklingError, + UnpicklingError, + Pickler, + Unpickler, + dump, + dumps, + load, + loads + ) +except ImportError: + Pickler, Unpickler = _Pickler, _Unpickler + dump, dumps, load, loads = _dump, _dumps, _load, _loads + +# Doctest +def _test(): + import doctest + return doctest.testmod() + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser( + description='display contents of the pickle files') + parser.add_argument( + 'pickle_file', type=argparse.FileType('br'), + nargs='*', help='the pickle file') + parser.add_argument( + '-t', '--test', action='store_true', + help='run self-test suite') + parser.add_argument( + '-v', action='store_true', + help='run verbosely; only affects self-test run') + args = parser.parse_args() + if args.test: + _test() + else: + if not args.pickle_file: + parser.print_help() + else: + import pprint + for f in args.pickle_file: + obj = load(f) + pprint.pprint(obj) diff --git a/pllava/lib/python3.10/pipes.py b/pllava/lib/python3.10/pipes.py new file mode 100644 index 0000000000000000000000000000000000000000..8cc74b0f1f781b15a095f33ce0d2bdd88ccfe008 --- /dev/null +++ b/pllava/lib/python3.10/pipes.py @@ -0,0 +1,247 @@ +"""Conversion pipeline templates. + +The problem: +------------ + +Suppose you have some data that you want to convert to another format, +such as from GIF image format to PPM image format. Maybe the +conversion involves several steps (e.g. piping it through compress or +uuencode). Some of the conversion steps may require that their input +is a disk file, others may be able to read standard input; similar for +their output. The input to the entire conversion may also be read +from a disk file or from an open file, and similar for its output. + +The module lets you construct a pipeline template by sticking one or +more conversion steps together. It will take care of creating and +removing temporary files if they are necessary to hold intermediate +data. You can then use the template to do conversions from many +different sources to many different destinations. The temporary +file names used are different each time the template is used. + +The templates are objects so you can create templates for many +different conversion steps and store them in a dictionary, for +instance. + + +Directions: +----------- + +To create a template: + t = Template() + +To add a conversion step to a template: + t.append(command, kind) +where kind is a string of two characters: the first is '-' if the +command reads its standard input or 'f' if it requires a file; the +second likewise for the output. The command must be valid /bin/sh +syntax. If input or output files are required, they are passed as +$IN and $OUT; otherwise, it must be possible to use the command in +a pipeline. + +To add a conversion step at the beginning: + t.prepend(command, kind) + +To convert a file to another file using a template: + sts = t.copy(infile, outfile) +If infile or outfile are the empty string, standard input is read or +standard output is written, respectively. The return value is the +exit status of the conversion pipeline. + +To open a file for reading or writing through a conversion pipeline: + fp = t.open(file, mode) +where mode is 'r' to read the file, or 'w' to write it -- just like +for the built-in function open() or for os.popen(). + +To create a new template object initialized to a given one: + t2 = t.clone() +""" # ' + + +import re +import os +import tempfile +# we import the quote function rather than the module for backward compat +# (quote used to be an undocumented but used function in pipes) +from shlex import quote + +__all__ = ["Template"] + +# Conversion step kinds + +FILEIN_FILEOUT = 'ff' # Must read & write real files +STDIN_FILEOUT = '-f' # Must write a real file +FILEIN_STDOUT = 'f-' # Must read a real file +STDIN_STDOUT = '--' # Normal pipeline element +SOURCE = '.-' # Must be first, writes stdout +SINK = '-.' # Must be last, reads stdin + +stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \ + SOURCE, SINK] + + +class Template: + """Class representing a pipeline template.""" + + def __init__(self): + """Template() returns a fresh pipeline template.""" + self.debugging = 0 + self.reset() + + def __repr__(self): + """t.__repr__() implements repr(t).""" + return '