diff --git a/evalkit_cambrian/lib/python3.10/_aix_support.py b/evalkit_cambrian/lib/python3.10/_aix_support.py new file mode 100644 index 0000000000000000000000000000000000000000..45504934063df87f8e77e790bdd8ae40c4d4c2aa --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_aix_support.py @@ -0,0 +1,89 @@ +"""Shared AIX support functions.""" + +import sys +import sysconfig + +try: + import subprocess +except ImportError: # pragma: no cover + # _aix_support is used in distutils by setup.py to build C extensions, + # before subprocess dependencies like _posixsubprocess are available. + import _bootsubprocess as subprocess + + +def _aix_tag(vrtl, bd): + # type: (List[int], int) -> str + # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default) + _sz = 32 if sys.maxsize == (2**31-1) else 64 + # vrtl[version, release, technology_level] + return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], bd, _sz) + + +# extract version, release and technology level from a VRMF string +def _aix_vrtl(vrmf): + # type: (str) -> List[int] + v, r, tl = vrmf.split(".")[:3] + return [int(v[-1]), int(r), int(tl)] + + +def _aix_bosmp64(): + # type: () -> Tuple[str, int] + """ + Return a Tuple[str, int] e.g., ['7.1.4.34', 1806] + The fileset bos.mp64 is the AIX kernel. It's VRMF and builddate + reflect the current ABI levels of the runtime environment. + """ + # We expect all AIX systems to have lslpp installed in this location + out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.mp64"]) + out = out.decode("utf-8") + out = out.strip().split(":") # type: ignore + # Use str() and int() to help mypy see types + return (str(out[2]), int(out[-1])) + + +def aix_platform(): + # type: () -> str + """ + AIX filesets are identified by four decimal values: V.R.M.F. + V (version) and R (release) can be retreived using ``uname`` + Since 2007, starting with AIX 5.3 TL7, the M value has been + included with the fileset bos.mp64 and represents the Technology + Level (TL) of AIX. The F (Fix) value also increases, but is not + relevant for comparing releases and binary compatibility. + For binary compatibility the so-called builddate is needed. + Again, the builddate of an AIX release is associated with bos.mp64. + AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\ + support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html + + For pep425 purposes the AIX platform tag becomes: + "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize) + e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit + and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit + """ + vrmf, bd = _aix_bosmp64() + return _aix_tag(_aix_vrtl(vrmf), bd) + + +# extract vrtl from the BUILD_GNU_TYPE as an int +def _aix_bgt(): + # type: () -> List[int] + gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE") + if not gnu_type: + raise ValueError("BUILD_GNU_TYPE is not defined") + return _aix_vrtl(vrmf=gnu_type) + + +def aix_buildtag(): + # type: () -> str + """ + Return the platform_tag of the system Python was built on. + """ + # AIX_BUILDDATE is defined by configure with: + # lslpp -Lcq bos.mp64 | awk -F: '{ print $NF }' + build_date = sysconfig.get_config_var("AIX_BUILDDATE") + try: + build_date = int(build_date) + except (ValueError, TypeError): + raise ValueError(f"AIX_BUILDDATE is not defined or invalid: " + f"{build_date!r}") + return _aix_tag(_aix_bgt(), build_date) diff --git a/evalkit_cambrian/lib/python3.10/_compat_pickle.py b/evalkit_cambrian/lib/python3.10/_compat_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..f68496ae639f5f880ae5f4ca0a220a29d2e354be --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_compat_pickle.py @@ -0,0 +1,251 @@ +# This module is used to map the old Python 2 names to the new names used in +# Python 3 for the pickle module. This needed to make pickle streams +# generated with Python 2 loadable by Python 3. + +# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import +# lib2to3 and use the mapping defined there, because lib2to3 uses pickle. +# Thus, this could cause the module to be imported recursively. +IMPORT_MAPPING = { + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'tkFileDialog': 'tkinter.filedialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'Tix': 'tkinter.tix', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'test.test_support': 'test.support', + 'commands': 'subprocess', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', + 'urllib2': 'urllib.request', + 'anydbm': 'dbm', + '_abcoll' : 'collections.abc', +} + + +# This contains rename rules that are easy to handle. We ignore the more +# complex stuff (e.g. mapping the names in the urllib and types modules). +# These rules should be run before import names are fixed. +NAME_MAPPING = { + ('__builtin__', 'xrange'): ('builtins', 'range'), + ('__builtin__', 'reduce'): ('functools', 'reduce'), + ('__builtin__', 'intern'): ('sys', 'intern'), + ('__builtin__', 'unichr'): ('builtins', 'chr'), + ('__builtin__', 'unicode'): ('builtins', 'str'), + ('__builtin__', 'long'): ('builtins', 'int'), + ('itertools', 'izip'): ('builtins', 'zip'), + ('itertools', 'imap'): ('builtins', 'map'), + ('itertools', 'ifilter'): ('builtins', 'filter'), + ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), + ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), + ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), + ('UserList', 'UserList'): ('collections', 'UserList'), + ('UserString', 'UserString'): ('collections', 'UserString'), + ('whichdb', 'whichdb'): ('dbm', 'whichdb'), + ('_socket', 'fromfd'): ('socket', 'fromfd'), + ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), + ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), + ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), + ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), + ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), + ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), + ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), + ('urllib', 'quote'): ('urllib.parse', 'quote'), + ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), + ('urllib', 'unquote'): ('urllib.parse', 'unquote'), + ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), + ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), + ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), + ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), + ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), + ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), + ('urllib2', 'URLError'): ('urllib.error', 'URLError'), +} + +PYTHON2_EXCEPTIONS = ( + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "BufferError", + "BytesWarning", + "DeprecationWarning", + "EOFError", + "EnvironmentError", + "Exception", + "FloatingPointError", + "FutureWarning", + "GeneratorExit", + "IOError", + "ImportError", + "ImportWarning", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NotImplementedError", + "OSError", + "OverflowError", + "PendingDeprecationWarning", + "ReferenceError", + "RuntimeError", + "RuntimeWarning", + # StandardError is gone in Python 3, so we map it to Exception + "StopIteration", + "SyntaxError", + "SyntaxWarning", + "SystemError", + "SystemExit", + "TabError", + "TypeError", + "UnboundLocalError", + "UnicodeDecodeError", + "UnicodeEncodeError", + "UnicodeError", + "UnicodeTranslateError", + "UnicodeWarning", + "UserWarning", + "ValueError", + "Warning", + "ZeroDivisionError", +) + +try: + WindowsError +except NameError: + pass +else: + PYTHON2_EXCEPTIONS += ("WindowsError",) + +for excname in PYTHON2_EXCEPTIONS: + NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) + +MULTIPROCESSING_EXCEPTIONS = ( + 'AuthenticationError', + 'BufferTooShort', + 'ProcessError', + 'TimeoutError', +) + +for excname in MULTIPROCESSING_EXCEPTIONS: + NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) + +# Same, but for 3.x to 2.x +REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) +assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) +REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) +assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) + +# Non-mutual mappings. + +IMPORT_MAPPING.update({ + 'cPickle': 'pickle', + '_elementtree': 'xml.etree.ElementTree', + 'FileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleHTTPServer': 'http.server', + 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', +}) + +REVERSE_IMPORT_MAPPING.update({ + '_bz2': 'bz2', + '_dbm': 'dbm', + '_functools': 'functools', + '_gdbm': 'gdbm', + '_pickle': 'pickle', +}) + +NAME_MAPPING.update({ + ('__builtin__', 'basestring'): ('builtins', 'str'), + ('exceptions', 'StandardError'): ('builtins', 'Exception'), + ('UserDict', 'UserDict'): ('collections', 'UserDict'), + ('socket', '_socketobject'): ('socket', 'SocketType'), +}) + +REVERSE_NAME_MAPPING.update({ + ('_functools', 'reduce'): ('__builtin__', 'reduce'), + ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), + ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), + ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), + ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), + ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), + ('xmlrpc.server', 'XMLRPCDocGenerator'): + ('DocXMLRPCServer', 'XMLRPCDocGenerator'), + ('xmlrpc.server', 'DocXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), + ('xmlrpc.server', 'DocXMLRPCServer'): + ('DocXMLRPCServer', 'DocXMLRPCServer'), + ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), + ('http.server', 'SimpleHTTPRequestHandler'): + ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), + ('http.server', 'CGIHTTPRequestHandler'): + ('CGIHTTPServer', 'CGIHTTPRequestHandler'), + ('_socket', 'socket'): ('socket', '_socketobject'), +}) + +PYTHON3_OSERROR_EXCEPTIONS = ( + 'BrokenPipeError', + 'ChildProcessError', + 'ConnectionAbortedError', + 'ConnectionError', + 'ConnectionRefusedError', + 'ConnectionResetError', + 'FileExistsError', + 'FileNotFoundError', + 'InterruptedError', + 'IsADirectoryError', + 'NotADirectoryError', + 'PermissionError', + 'ProcessLookupError', + 'TimeoutError', +) + +for excname in PYTHON3_OSERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') + +PYTHON3_IMPORTERROR_EXCEPTIONS = ( + 'ModuleNotFoundError', +) + +for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') diff --git a/evalkit_cambrian/lib/python3.10/_osx_support.py b/evalkit_cambrian/lib/python3.10/_osx_support.py new file mode 100644 index 0000000000000000000000000000000000000000..aa66c8b9f4189f8b688f26469def701b821e049c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_osx_support.py @@ -0,0 +1,574 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring, capture_stderr=False): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + if capture_stderr: + cmd = "%s >'%s' 2>&1" % (commandstring, fp.name) + else: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist', encoding="utf-8") + except OSError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +_SYSTEM_VERSION_TUPLE = None +def _get_system_version_tuple(): + """ + Return the macOS system version as a tuple + + The return value is safe to use to compare + two version numbers. + """ + global _SYSTEM_VERSION_TUPLE + if _SYSTEM_VERSION_TUPLE is None: + osx_version = _get_system_version() + if osx_version: + try: + _SYSTEM_VERSION_TUPLE = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + _SYSTEM_VERSION_TUPLE = () + + return _SYSTEM_VERSION_TUPLE + + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + + +_cache_default_sysroot = None +def _default_sysroot(cc): + """ Returns the root of the default SDK for this system, or '/' """ + global _cache_default_sysroot + + if _cache_default_sysroot is not None: + return _cache_default_sysroot + + contents = _read_output('%s -c -E -v - "): + in_incdirs = True + elif line.startswith("End of search list"): + in_incdirs = False + elif in_incdirs: + line = line.strip() + if line == '/usr/include': + _cache_default_sysroot = '/' + elif line.endswith(".sdk/usr/include"): + _cache_default_sysroot = line[:-12] + if _cache_default_sysroot is None: + _cache_default_sysroot = '/' + + return _cache_default_sysroot + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version_tuple() + return bool(osx_version >= (10, 4)) if osx_version else False + +def _supports_arm64_builds(): + """Returns True if arm64 builds are supported on this system""" + # There are two sets of systems supporting macOS/arm64 builds: + # 1. macOS 11 and later, unconditionally + # 2. macOS 10.15 with Xcode 12.2 or later + # For now the second category is ignored. + osx_version = _get_system_version_tuple() + return osx_version >= (11, 0) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Furthermore, the compiler that can be used varies between + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overridden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if data and 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explicitly + # overridden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overridden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII) + flags = re.sub(r'-isysroot\s*\S+', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overridden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overridden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalone Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s*(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overridden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s*\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot')) + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + elif not _supports_arm64_builds(): + # Look for "-arch arm64" and drop that + for idx in reversed(range(len(compiler_so))): + if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64": + del compiler_so[idx:idx+2] + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] + if not indices: + break + index = indices[0] + if compiler_so[index] == '-isysroot': + # Strip this argument and the next one: + del compiler_so[index:index+2] + else: + # It's '-isysroot/some/path' in one arg + del compiler_so[index:index+1] + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + argvar = cc_args + indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')] + if not indices: + argvar = compiler_so + indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] + + for idx in indices: + if argvar[idx] == '-isysroot': + sysroot = argvar[idx+1] + break + else: + sysroot = argvar[idx][len('-isysroot'):] + break + + if sysroot and not os.path.isdir(sysroot): + sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n") + sys.stderr.write("Please check your Xcode installation\n") + sys.stderr.flush() + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler. + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 3) + else: + # assume no universal support + macrelease = (10, 3) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall(r'-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('arm64', 'x86_64'): + machine = 'universal2' + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/evalkit_cambrian/lib/python3.10/_strptime.py b/evalkit_cambrian/lib/python3.10/_strptime.py new file mode 100644 index 0000000000000000000000000000000000000000..b97dfcce1e8e4d7dfe2eb8c3a22f2f9b7536c78b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_strptime.py @@ -0,0 +1,579 @@ +"""Strptime-related classes and functions. + +CLASSES: + LocaleTime -- Discovers and stores locale-specific time information + TimeRE -- Creates regexes for pattern matching a string of text containing + time information + +FUNCTIONS: + _getlang -- Figure out what language is being used for the locale + strptime -- Calculates the time struct represented by the passed-in string + +""" +import time +import locale +import calendar +from re import compile as re_compile +from re import IGNORECASE +from re import escape as re_escape +from datetime import (date as datetime_date, + timedelta as datetime_timedelta, + timezone as datetime_timezone) +from _thread import allocate_lock as _thread_allocate_lock + +__all__ = [] + +def _getlang(): + # Figure out what the current language is set to. + return locale.getlocale(locale.LC_TIME) + +class LocaleTime(object): + """Stores and handles locale-specific information related to time. + + ATTRIBUTES: + f_weekday -- full weekday names (7-item list) + a_weekday -- abbreviated weekday names (7-item list) + f_month -- full month names (13-item list; dummy value in [0], which + is added by code) + a_month -- abbreviated month names (13-item list, dummy value in + [0], which is added by code) + am_pm -- AM/PM representation (2-item list) + LC_date_time -- format string for date/time representation (string) + LC_date -- format string for date representation (string) + LC_time -- format string for time representation (string) + timezone -- daylight- and non-daylight-savings timezone representation + (2-item list of sets) + lang -- Language used by instance (2-item tuple) + """ + + def __init__(self): + """Set all attributes. + + Order of methods called matters for dependency reasons. + + The locale language is set at the offset and then checked again before + exiting. This is to make sure that the attributes were not set with a + mix of information from more than one locale. This would most likely + happen when using threads where one thread calls a locale-dependent + function while another thread changes the locale while the function in + the other thread is still running. Proper coding would call for + locks to prevent changing the locale while locale-dependent code is + running. The check here is done in case someone does not think about + doing this. + + Only other possible issue is if someone changed the timezone and did + not call tz.tzset . That is an issue for the programmer, though, + since changing the timezone is worthless without that call. + + """ + self.lang = _getlang() + self.__calc_weekday() + self.__calc_month() + self.__calc_am_pm() + self.__calc_timezone() + self.__calc_date_time() + if _getlang() != self.lang: + raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") + + def __calc_weekday(self): + # Set self.a_weekday and self.f_weekday using the calendar + # module. + a_weekday = [calendar.day_abbr[i].lower() for i in range(7)] + f_weekday = [calendar.day_name[i].lower() for i in range(7)] + self.a_weekday = a_weekday + self.f_weekday = f_weekday + + def __calc_month(self): + # Set self.f_month and self.a_month using the calendar module. + a_month = [calendar.month_abbr[i].lower() for i in range(13)] + f_month = [calendar.month_name[i].lower() for i in range(13)] + self.a_month = a_month + self.f_month = f_month + + def __calc_am_pm(self): + # Set self.am_pm by using time.strftime(). + + # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that + # magical; just happened to have used it everywhere else where a + # static date was needed. + am_pm = [] + for hour in (1, 22): + time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0)) + am_pm.append(time.strftime("%p", time_tuple).lower()) + self.am_pm = am_pm + + def __calc_date_time(self): + # Set self.date_time, self.date, & self.time by using + # time.strftime(). + + # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of + # overloaded numbers is minimized. The order in which searches for + # values within the format string is very important; it eliminates + # possible ambiguity for what something represents. + time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0)) + date_time = [None, None, None] + date_time[0] = time.strftime("%c", time_tuple).lower() + date_time[1] = time.strftime("%x", time_tuple).lower() + date_time[2] = time.strftime("%X", time_tuple).lower() + replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'), + (self.f_month[3], '%B'), (self.a_weekday[2], '%a'), + (self.a_month[3], '%b'), (self.am_pm[1], '%p'), + ('1999', '%Y'), ('99', '%y'), ('22', '%H'), + ('44', '%M'), ('55', '%S'), ('76', '%j'), + ('17', '%d'), ('03', '%m'), ('3', '%m'), + # '3' needed for when no leading zero. + ('2', '%w'), ('10', '%I')] + replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone + for tz in tz_values]) + for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')): + current_format = date_time[offset] + for old, new in replacement_pairs: + # Must deal with possible lack of locale info + # manifesting itself as the empty string (e.g., Swedish's + # lack of AM/PM info) or a platform returning a tuple of empty + # strings (e.g., MacOS 9 having timezone as ('','')). + if old: + current_format = current_format.replace(old, new) + # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since + # 2005-01-03 occurs before the first Monday of the year. Otherwise + # %U is used. + time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0)) + if '00' in time.strftime(directive, time_tuple): + U_W = '%W' + else: + U_W = '%U' + date_time[offset] = current_format.replace('11', U_W) + self.LC_date_time = date_time[0] + self.LC_date = date_time[1] + self.LC_time = date_time[2] + + def __calc_timezone(self): + # Set self.timezone by using time.tzname. + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. + try: + time.tzset() + except AttributeError: + pass + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) + else: + has_saving = frozenset() + self.timezone = (no_saving, has_saving) + + +class TimeRE(dict): + """Handle conversion from format directives to regexes.""" + + def __init__(self, locale_time=None): + """Create keys/values. + + Order of execution is important for dependency reasons. + + """ + if locale_time: + self.locale_time = locale_time + else: + self.locale_time = LocaleTime() + base = super() + base.__init__({ + # The " [1-9]" part of the regex is to make %c from ANSI C work + 'd': r"(?P3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", + 'f': r"(?P[0-9]{1,6})", + 'H': r"(?P2[0-3]|[0-1]\d|\d)", + 'I': r"(?P1[0-2]|0[1-9]|[1-9])", + 'G': r"(?P\d\d\d\d)", + 'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", + 'm': r"(?P1[0-2]|0[1-9]|[1-9])", + 'M': r"(?P[0-5]\d|\d)", + 'S': r"(?P6[0-1]|[0-5]\d|\d)", + 'U': r"(?P5[0-3]|[0-4]\d|\d)", + 'w': r"(?P[0-6])", + 'u': r"(?P[1-7])", + 'V': r"(?P5[0-3]|0[1-9]|[1-4]\d|\d)", + # W is set below by using 'U' + 'y': r"(?P\d\d)", + #XXX: Does 'Y' need to worry about having less or more than + # 4 digits? + 'Y': r"(?P\d\d\d\d)", + 'z': r"(?P[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|(?-i:Z))", + 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'), + 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'), + 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), + 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), + 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), + 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone + for tz in tz_names), + 'Z'), + '%': '%'}) + base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) + base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) + base.__setitem__('x', self.pattern(self.locale_time.LC_date)) + base.__setitem__('X', self.pattern(self.locale_time.LC_time)) + + def __seqToRE(self, to_convert, directive): + """Convert a list to a regex string for matching a directive. + + Want possible matching values to be from longest to shortest. This + prevents the possibility of a match occurring for a value that also + a substring of a larger value that should have matched (e.g., 'abc' + matching when 'abcdef' should have been the match). + + """ + to_convert = sorted(to_convert, key=len, reverse=True) + for value in to_convert: + if value != '': + break + else: + return '' + regex = '|'.join(re_escape(stuff) for stuff in to_convert) + regex = '(?P<%s>%s' % (directive, regex) + return '%s)' % regex + + def pattern(self, format): + """Return regex pattern for the format string. + + Need to make sure that any characters that might be interpreted as + regex syntax are escaped. + + """ + processed_format = '' + # The sub() call escapes all characters that might be misconstrued + # as regex syntax. Cannot use re.escape since we have to deal with + # format directives (%m, etc.). + regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])") + format = regex_chars.sub(r"\\\1", format) + whitespace_replacement = re_compile(r'\s+') + format = whitespace_replacement.sub(r'\\s+', format) + while '%' in format: + directive_index = format.index('%')+1 + processed_format = "%s%s%s" % (processed_format, + format[:directive_index-1], + self[format[directive_index]]) + format = format[directive_index+1:] + return "%s%s" % (processed_format, format) + + def compile(self, format): + """Return a compiled re object for the format string.""" + return re_compile(self.pattern(format), IGNORECASE) + +_cache_lock = _thread_allocate_lock() +# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock +# first! +_TimeRE_cache = TimeRE() +_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache +_regex_cache = {} + +def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): + """Calculate the Julian day based on the year, week of the year, and day of + the week, with week_start_day representing whether the week of the year + assumes the week starts on Sunday or Monday (6 or 0).""" + first_weekday = datetime_date(year, 1, 1).weekday() + # If we are dealing with the %U directive (week starts on Sunday), it's + # easier to just shift the view to Sunday being the first day of the + # week. + if not week_starts_Mon: + first_weekday = (first_weekday + 1) % 7 + day_of_week = (day_of_week + 1) % 7 + # Need to watch out for a week 0 (when the first day of the year is not + # the same as that specified by %U or %W). + week_0_length = (7 - first_weekday) % 7 + if week_of_year == 0: + return 1 + day_of_week - first_weekday + else: + days_to_week = week_0_length + (7 * (week_of_year - 1)) + return 1 + days_to_week + day_of_week + + +def _calc_julian_from_V(iso_year, iso_week, iso_weekday): + """Calculate the Julian day based on the ISO 8601 year, week, and weekday. + ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. + ISO week days range from 1 (Monday) to 7 (Sunday). + """ + correction = datetime_date(iso_year, 1, 4).isoweekday() + 3 + ordinal = (iso_week * 7) + iso_weekday - correction + # ordinal may be negative or 0 now, which means the date is in the previous + # calendar year + if ordinal < 1: + ordinal += datetime_date(iso_year, 1, 1).toordinal() + iso_year -= 1 + ordinal -= datetime_date(iso_year, 1, 1).toordinal() + return iso_year, ordinal + + +def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a 2-tuple consisting of a time struct and an int containing + the number of microseconds based on the input string and the + format string.""" + + for index, arg in enumerate([data_string, format]): + if not isinstance(arg, str): + msg = "strptime() argument {} must be str, not {}" + raise TypeError(msg.format(index, type(arg))) + + global _TimeRE_cache, _regex_cache + with _cache_lock: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): + _TimeRE_cache = TimeRE() + _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time + if len(_regex_cache) > _CACHE_MAX_SIZE: + _regex_cache.clear() + format_regex = _regex_cache.get(format) + if not format_regex: + try: + format_regex = _TimeRE_cache.compile(format) + # KeyError raised when a bad format is found; can be specified as + # \\, in which case it was a stray % but with a space after it + except KeyError as err: + bad_directive = err.args[0] + if bad_directive == "\\": + bad_directive = "%" + del err + raise ValueError("'%s' is a bad directive in format '%s'" % + (bad_directive, format)) from None + # IndexError only occurs when the format string is "%" + except IndexError: + raise ValueError("stray %% in format '%s'" % format) from None + _regex_cache[format] = format_regex + found = format_regex.match(data_string) + if not found: + raise ValueError("time data %r does not match format %r" % + (data_string, format)) + if len(data_string) != found.end(): + raise ValueError("unconverted data remains: %s" % + data_string[found.end():]) + + iso_year = year = None + month = day = 1 + hour = minute = second = fraction = 0 + tz = -1 + gmtoff = None + gmtoff_fraction = 0 + # Default to -1 to signify that values not known; not critical to have, + # though + iso_week = week_of_year = None + week_of_year_start = None + # weekday and julian defaulted to None so as to signal need to calculate + # values + weekday = julian = None + found_dict = found.groupdict() + for group_key in found_dict.keys(): + # Directives not explicitly handled below: + # c, x, X + # handled by making out of other directives + # U, W + # worthless without day of the week + if group_key == 'y': + year = int(found_dict['y']) + # Open Group specification for strptime() states that a %y + #value in the range of [00, 68] is in the century 2000, while + #[69,99] is in the century 1900 + if year <= 68: + year += 2000 + else: + year += 1900 + elif group_key == 'Y': + year = int(found_dict['Y']) + elif group_key == 'G': + iso_year = int(found_dict['G']) + elif group_key == 'm': + month = int(found_dict['m']) + elif group_key == 'B': + month = locale_time.f_month.index(found_dict['B'].lower()) + elif group_key == 'b': + month = locale_time.a_month.index(found_dict['b'].lower()) + elif group_key == 'd': + day = int(found_dict['d']) + elif group_key == 'H': + hour = int(found_dict['H']) + elif group_key == 'I': + hour = int(found_dict['I']) + ampm = found_dict.get('p', '').lower() + # If there was no AM/PM indicator, we'll treat this like AM + if ampm in ('', locale_time.am_pm[0]): + # We're in AM so the hour is correct unless we're + # looking at 12 midnight. + # 12 midnight == 12 AM == hour 0 + if hour == 12: + hour = 0 + elif ampm == locale_time.am_pm[1]: + # We're in PM so we need to add 12 to the hour unless + # we're looking at 12 noon. + # 12 noon == 12 PM == hour 12 + if hour != 12: + hour += 12 + elif group_key == 'M': + minute = int(found_dict['M']) + elif group_key == 'S': + second = int(found_dict['S']) + elif group_key == 'f': + s = found_dict['f'] + # Pad to always return microseconds. + s += "0" * (6 - len(s)) + fraction = int(s) + elif group_key == 'A': + weekday = locale_time.f_weekday.index(found_dict['A'].lower()) + elif group_key == 'a': + weekday = locale_time.a_weekday.index(found_dict['a'].lower()) + elif group_key == 'w': + weekday = int(found_dict['w']) + if weekday == 0: + weekday = 6 + else: + weekday -= 1 + elif group_key == 'u': + weekday = int(found_dict['u']) + weekday -= 1 + elif group_key == 'j': + julian = int(found_dict['j']) + elif group_key in ('U', 'W'): + week_of_year = int(found_dict[group_key]) + if group_key == 'U': + # U starts week on Sunday. + week_of_year_start = 6 + else: + # W starts week on Monday. + week_of_year_start = 0 + elif group_key == 'V': + iso_week = int(found_dict['V']) + elif group_key == 'z': + z = found_dict['z'] + if z == 'Z': + gmtoff = 0 + else: + if z[3] == ':': + z = z[:3] + z[4:] + if len(z) > 5: + if z[5] != ':': + msg = f"Inconsistent use of : in {found_dict['z']}" + raise ValueError(msg) + z = z[:5] + z[6:] + hours = int(z[1:3]) + minutes = int(z[3:5]) + seconds = int(z[5:7] or 0) + gmtoff = (hours * 60 * 60) + (minutes * 60) + seconds + gmtoff_remainder = z[8:] + # Pad to always return microseconds. + gmtoff_remainder_padding = "0" * (6 - len(gmtoff_remainder)) + gmtoff_fraction = int(gmtoff_remainder + gmtoff_remainder_padding) + if z.startswith("-"): + gmtoff = -gmtoff + gmtoff_fraction = -gmtoff_fraction + elif group_key == 'Z': + # Since -1 is default value only need to worry about setting tz if + # it can be something other than -1. + found_zone = found_dict['Z'].lower() + for value, tz_values in enumerate(locale_time.timezone): + if found_zone in tz_values: + # Deal with bad locale setup where timezone names are the + # same and yet time.daylight is true; too ambiguous to + # be able to tell what timezone has daylight savings + if (time.tzname[0] == time.tzname[1] and + time.daylight and found_zone not in ("utc", "gmt")): + break + else: + tz = value + break + # Deal with the cases where ambiguities arize + # don't assume default values for ISO week/year + if year is None and iso_year is not None: + if iso_week is None or weekday is None: + raise ValueError("ISO year directive '%G' must be used with " + "the ISO week directive '%V' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + if julian is not None: + raise ValueError("Day of the year directive '%j' is not " + "compatible with ISO year directive '%G'. " + "Use '%Y' instead.") + elif week_of_year is None and iso_week is not None: + if weekday is None: + raise ValueError("ISO week directive '%V' must be used with " + "the ISO year directive '%G' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + else: + raise ValueError("ISO week directive '%V' is incompatible with " + "the year directive '%Y'. Use the ISO year '%G' " + "instead.") + + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 + + + # If we know the week of the year and what day of that week, we can figure + # out the Julian day of the year. + if julian is None and weekday is not None: + if week_of_year is not None: + week_starts_Mon = True if week_of_year_start == 0 else False + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + elif iso_year is not None and iso_week is not None: + year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) + if julian is not None and julian <= 0: + year -= 1 + yday = 366 if calendar.isleap(year) else 365 + julian += yday + + if julian is None: + # Cannot pre-calculate datetime_date() since can change in Julian + # calculation and thus could have different value for the day of + # the week calculation. + # Need to add 1 to result since first day of the year is 1, not 0. + julian = datetime_date(year, month, day).toordinal() - \ + datetime_date(year, 1, 1).toordinal() + 1 + else: # Assume that if they bothered to include Julian day (or if it was + # calculated above with year/week/weekday) it will be accurate. + datetime_result = datetime_date.fromordinal( + (julian - 1) + + datetime_date(year, 1, 1).toordinal()) + year = datetime_result.year + month = datetime_result.month + day = datetime_result.day + if weekday is None: + weekday = datetime_date(year, month, day).weekday() + # Add timezone info + tzname = found_dict.get("Z") + + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + + return (year, month, day, + hour, minute, second, + weekday, julian, tz, tzname, gmtoff), fraction, gmtoff_fraction + +def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a time struct based on the input string and the + format string.""" + tt = _strptime(data_string, format)[0] + return time.struct_time(tt[:time._STRUCT_TM_ITEMS]) + +def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a class cls instance based on the input string and the + format string.""" + tt, fraction, gmtoff_fraction = _strptime(data_string, format) + tzname, gmtoff = tt[-2:] + args = tt[:6] + (fraction,) + if gmtoff is not None: + tzdelta = datetime_timedelta(seconds=gmtoff, microseconds=gmtoff_fraction) + if tzname: + tz = datetime_timezone(tzdelta, tzname) + else: + tz = datetime_timezone(tzdelta) + args += (tz,) + + return cls(*args) diff --git a/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig b/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig new file mode 100644 index 0000000000000000000000000000000000000000..47e3ea63b887cc6acd12ae41bd38a1b48c4d7c1e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig @@ -0,0 +1,986 @@ +# system configuration generated and used by the sysconfig module +build_time_vars = {'ABIFLAGS': '', + 'AC_APPLE_UNIVERSAL_BUILD': 0, + 'AIX_BUILDDATE': 0, + 'AIX_GENUINE_CPLUSPLUS': 0, + 'ALIGNOF_LONG': 8, + 'ALIGNOF_SIZE_T': 8, + 'ALT_SOABI': 0, + 'ANDROID_API_LEVEL': 0, + 'AR': 'x86_64-conda-linux-gnu-ar', + 'ARFLAGS': 'rcs', + 'BASECFLAGS': '-Wno-unused-result -Wsign-compare', + 'BASECPPFLAGS': '-IObjects -IInclude -IPython', + 'BASEMODLIBS': '', + 'BINDIR': '/root/envs/evalkit_cambrian/bin', + 'BINLIBDEST': '/root/envs/evalkit_cambrian/lib/python3.10', + 'BLDLIBRARY': 'libpython3.10.a', + 'BLDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 ' + '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib', + 'BUILDEXE': '', + 'BUILDPYTHON': 'python', + 'BUILD_GNU_TYPE': 'x86_64-conda-linux-gnu', + 'BYTESTR_DEPS': '\\', + 'CC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'CCSHARED': '-fPIC', + 'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ', + 'CFLAGSFORSHARED': '', + 'CFLAGS_ALIASING': '', + 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in ' + 'Makefile.pre.in', + 'CONFIGURE_CFLAGS': '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' ', + 'CONFIGURE_CFLAGS_NODIST': '-fno-semantic-interposition ' + ' ' + ' -g -std=c99 -Wextra ' + '-Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden', + 'CONFIGURE_CPPFLAGS': '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include', + 'CONFIGURE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib', + 'CONFIGURE_LDFLAGS_NODIST': '-fno-semantic-interposition ' + ' ' + ' -g', + 'CONFIG_ARGS': "'--prefix=/root/envs/evalkit_cambrian' " + "'--build=x86_64-conda-linux-gnu' " + "'--host=x86_64-conda-linux-gnu' '--enable-ipv6' " + "'--with-ensurepip=no' " + "'--with-tzpath=/root/envs/evalkit_cambrian/share/zoneinfo' " + "'--with-computed-gotos' '--with-system-ffi' " + "'--enable-loadable-sqlite-extensions' " + "'--with-tcltk-includes=-I/root/envs/evalkit_cambrian/include' " + "'--with-tcltk-libs=-L/root/envs/evalkit_cambrian/lib " + "-ltcl8.6 -ltk8.6' '--with-platlibdir=lib' '--with-lto' " + "'--enable-optimizations' " + "'-oldincludedir=/croot/python-split_1733933809325/_build_env/x86_64-conda-linux-gnu/sysroot/usr/include' " + "'--disable-shared' 'PROFILE_TASK=-m test --pgo' " + "'build_alias=x86_64-conda-linux-gnu' " + "'host_alias=x86_64-conda-linux-gnu' 'MACHDEP=linux' " + "'CC=x86_64-conda-linux-gnu-gcc' 'CFLAGS=-march=nocona " + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + "' 'LDFLAGS=-Wl,-O2 -Wl,--sort-common -Wl,--as-needed " + '-Wl,-z,relro -Wl,-z,now -Wl,--disable-new-dtags ' + '-Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + "-L/root/envs/evalkit_cambrian/lib' " + "'CPPFLAGS=-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem " + '/root/envs/evalkit_cambrian/include ' + "-I/root/envs/evalkit_cambrian/include' " + "'CPP=/croot/python-split_1733933809325/_build_env/bin/x86_64-conda-linux-gnu-cpp' " + "'PKG_CONFIG_PATH=/root/envs/evalkit_cambrian/lib/pkgconfig'", + 'CONFINCLUDEDIR': '/root/envs/evalkit_cambrian/include', + 'CONFINCLUDEPY': '/root/envs/evalkit_cambrian/include/python3.10', + 'COREPYTHONPATH': '', + 'COVERAGE_INFO': '/croot/python-split_1733933809325/work/build-static/coverage.info', + 'COVERAGE_REPORT': '/croot/python-split_1733933809325/work/build-static/lcov-report', + 'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov ' + 'report"', + 'CPPFLAGS': '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include', + 'CXX': 'x86_64-conda-linux-gnu-c++ -pthread', + 'DESTDIRS': '/root/envs/evalkit_cambrian ' + '/root/envs/evalkit_cambrian/lib ' + '/root/envs/evalkit_cambrian/lib/python3.10 ' + '/root/envs/evalkit_cambrian/lib/python3.10/lib-dynload', + 'DESTLIB': '/root/envs/evalkit_cambrian/lib/python3.10', + 'DESTPATH': '', + 'DESTSHARED': '/root/envs/evalkit_cambrian/lib/python3.10/lib-dynload', + 'DFLAGS': '', + 'DIRMODE': 755, + 'DIST': 'README.rst ChangeLog configure configure.ac acconfig.h pyconfig.h.in ' + 'Makefile.pre.in Include Lib Misc Ext-dummy', + 'DISTDIRS': 'Include Lib Misc Ext-dummy', + 'DISTFILES': 'README.rst ChangeLog configure configure.ac acconfig.h ' + 'pyconfig.h.in Makefile.pre.in', + 'DLINCLDIR': '.', + 'DLLLIBRARY': '', + 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, + 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, + 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, + 'DTRACE': '', + 'DTRACE_DEPS': '\\', + 'DTRACE_HEADERS': '', + 'DTRACE_OBJS': '', + 'DYNLOADFILE': 'dynload_shlib.o', + 'ENABLE_IPV6': 1, + 'ENSUREPIP': 'no', + 'EXE': '', + 'EXEMODE': 755, + 'EXPERIMENTAL_ISOLATED_SUBINTERPRETERS': 0, + 'EXPORTSFROM': '', + 'EXPORTSYMS': '', + 'EXTRATESTOPTS': '', + 'EXT_SUFFIX': '.cpython-310-x86_64-linux-gnu.so', + 'FILEMODE': 644, + 'FLOAT_WORDS_BIGENDIAN': 0, + 'FLOCK_NEEDS_LIBBSD': 0, + 'GETPGRP_HAVE_ARG': 0, + 'GITBRANCH': '', + 'GITTAG': '', + 'GITVERSION': '', + 'GNULD': 'yes', + 'HAVE_ACCEPT4': 1, + 'HAVE_ACOSH': 1, + 'HAVE_ADDRINFO': 1, + 'HAVE_ALARM': 1, + 'HAVE_ALIGNED_REQUIRED': 0, + 'HAVE_ALLOCA_H': 1, + 'HAVE_ALTZONE': 0, + 'HAVE_ASINH': 1, + 'HAVE_ASM_TYPES_H': 1, + 'HAVE_ATANH': 1, + 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, + 'HAVE_BLUETOOTH_BLUETOOTH_H': 0, + 'HAVE_BLUETOOTH_H': 0, + 'HAVE_BROKEN_MBSTOWCS': 0, + 'HAVE_BROKEN_NICE': 0, + 'HAVE_BROKEN_PIPE_BUF': 0, + 'HAVE_BROKEN_POLL': 0, + 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, + 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, + 'HAVE_BROKEN_SEM_GETVALUE': 0, + 'HAVE_BROKEN_UNSETENV': 0, + 'HAVE_BUILTIN_ATOMIC': 1, + 'HAVE_CHFLAGS': 0, + 'HAVE_CHOWN': 1, + 'HAVE_CHROOT': 1, + 'HAVE_CLOCK': 1, + 'HAVE_CLOCK_GETRES': 1, + 'HAVE_CLOCK_GETTIME': 1, + 'HAVE_CLOCK_SETTIME': 1, + 'HAVE_CLOSE_RANGE': 0, + 'HAVE_COMPUTED_GOTOS': 1, + 'HAVE_CONFSTR': 1, + 'HAVE_CONIO_H': 0, + 'HAVE_COPYSIGN': 1, + 'HAVE_COPY_FILE_RANGE': 0, + 'HAVE_CRYPT_H': 1, + 'HAVE_CRYPT_R': 1, + 'HAVE_CTERMID': 1, + 'HAVE_CTERMID_R': 0, + 'HAVE_CURSES_FILTER': 1, + 'HAVE_CURSES_H': 1, + 'HAVE_CURSES_HAS_KEY': 1, + 'HAVE_CURSES_IMMEDOK': 1, + 'HAVE_CURSES_IS_PAD': 1, + 'HAVE_CURSES_IS_TERM_RESIZED': 1, + 'HAVE_CURSES_RESIZETERM': 1, + 'HAVE_CURSES_RESIZE_TERM': 1, + 'HAVE_CURSES_SYNCOK': 1, + 'HAVE_CURSES_TYPEAHEAD': 1, + 'HAVE_CURSES_USE_ENV': 1, + 'HAVE_CURSES_WCHGAT': 1, + 'HAVE_DECL_ISFINITE': 1, + 'HAVE_DECL_ISINF': 1, + 'HAVE_DECL_ISNAN': 1, + 'HAVE_DECL_RTLD_DEEPBIND': 1, + 'HAVE_DECL_RTLD_GLOBAL': 1, + 'HAVE_DECL_RTLD_LAZY': 1, + 'HAVE_DECL_RTLD_LOCAL': 1, + 'HAVE_DECL_RTLD_MEMBER': 0, + 'HAVE_DECL_RTLD_NODELETE': 1, + 'HAVE_DECL_RTLD_NOLOAD': 1, + 'HAVE_DECL_RTLD_NOW': 1, + 'HAVE_DECL_TZNAME': 0, + 'HAVE_DEVICE_MACROS': 1, + 'HAVE_DEV_PTC': 0, + 'HAVE_DEV_PTMX': 1, + 'HAVE_DIRECT_H': 0, + 'HAVE_DIRENT_D_TYPE': 1, + 'HAVE_DIRENT_H': 1, + 'HAVE_DIRFD': 1, + 'HAVE_DLFCN_H': 1, + 'HAVE_DLOPEN': 1, + 'HAVE_DUP2': 1, + 'HAVE_DUP3': 1, + 'HAVE_DYLD_SHARED_CACHE_CONTAINS_PATH': 0, + 'HAVE_DYNAMIC_LOADING': 1, + 'HAVE_ENDIAN_H': 1, + 'HAVE_EPOLL': 1, + 'HAVE_EPOLL_CREATE1': 1, + 'HAVE_ERF': 1, + 'HAVE_ERFC': 1, + 'HAVE_ERRNO_H': 1, + 'HAVE_EVENTFD': 1, + 'HAVE_EXECV': 1, + 'HAVE_EXPLICIT_BZERO': 0, + 'HAVE_EXPLICIT_MEMSET': 0, + 'HAVE_EXPM1': 1, + 'HAVE_FACCESSAT': 1, + 'HAVE_FCHDIR': 1, + 'HAVE_FCHMOD': 1, + 'HAVE_FCHMODAT': 1, + 'HAVE_FCHOWN': 1, + 'HAVE_FCHOWNAT': 1, + 'HAVE_FCNTL_H': 1, + 'HAVE_FDATASYNC': 1, + 'HAVE_FDOPENDIR': 1, + 'HAVE_FDWALK': 0, + 'HAVE_FEXECVE': 1, + 'HAVE_FINITE': 1, + 'HAVE_FLOCK': 1, + 'HAVE_FORK': 1, + 'HAVE_FORKPTY': 1, + 'HAVE_FPATHCONF': 1, + 'HAVE_FSEEK64': 0, + 'HAVE_FSEEKO': 1, + 'HAVE_FSTATAT': 1, + 'HAVE_FSTATVFS': 1, + 'HAVE_FSYNC': 1, + 'HAVE_FTELL64': 0, + 'HAVE_FTELLO': 1, + 'HAVE_FTIME': 1, + 'HAVE_FTRUNCATE': 1, + 'HAVE_FUTIMENS': 1, + 'HAVE_FUTIMES': 1, + 'HAVE_FUTIMESAT': 1, + 'HAVE_GAI_STRERROR': 1, + 'HAVE_GAMMA': 1, + 'HAVE_GCC_ASM_FOR_MC68881': 0, + 'HAVE_GCC_ASM_FOR_X64': 1, + 'HAVE_GCC_ASM_FOR_X87': 1, + 'HAVE_GCC_UINT128_T': 1, + 'HAVE_GETADDRINFO': 1, + 'HAVE_GETC_UNLOCKED': 1, + 'HAVE_GETENTROPY': 0, + 'HAVE_GETGRGID_R': 1, + 'HAVE_GETGRNAM_R': 1, + 'HAVE_GETGROUPLIST': 1, + 'HAVE_GETGROUPS': 1, + 'HAVE_GETHOSTBYNAME': 0, + 'HAVE_GETHOSTBYNAME_R': 1, + 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, + 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, + 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, + 'HAVE_GETITIMER': 1, + 'HAVE_GETLOADAVG': 1, + 'HAVE_GETLOGIN': 1, + 'HAVE_GETNAMEINFO': 1, + 'HAVE_GETPAGESIZE': 1, + 'HAVE_GETPEERNAME': 1, + 'HAVE_GETPGID': 1, + 'HAVE_GETPGRP': 1, + 'HAVE_GETPID': 1, + 'HAVE_GETPRIORITY': 1, + 'HAVE_GETPWENT': 1, + 'HAVE_GETPWNAM_R': 1, + 'HAVE_GETPWUID_R': 1, + 'HAVE_GETRANDOM': 0, + 'HAVE_GETRANDOM_SYSCALL': 1, + 'HAVE_GETRESGID': 1, + 'HAVE_GETRESUID': 1, + 'HAVE_GETSID': 1, + 'HAVE_GETSPENT': 1, + 'HAVE_GETSPNAM': 1, + 'HAVE_GETWD': 1, + 'HAVE_GLIBC_MEMMOVE_BUG': 0, + 'HAVE_GRP_H': 1, + 'HAVE_HSTRERROR': 1, + 'HAVE_HTOLE64': 1, + 'HAVE_HYPOT': 1, + 'HAVE_IEEEFP_H': 0, + 'HAVE_IF_NAMEINDEX': 1, + 'HAVE_INET_ATON': 1, + 'HAVE_INET_PTON': 1, + 'HAVE_INITGROUPS': 1, + 'HAVE_INTTYPES_H': 1, + 'HAVE_IO_H': 0, + 'HAVE_IPA_PURE_CONST_BUG': 0, + 'HAVE_KILL': 1, + 'HAVE_KILLPG': 1, + 'HAVE_KQUEUE': 0, + 'HAVE_LANGINFO_H': 1, + 'HAVE_LARGEFILE_SUPPORT': 0, + 'HAVE_LCHFLAGS': 0, + 'HAVE_LCHMOD': 0, + 'HAVE_LCHOWN': 1, + 'HAVE_LGAMMA': 1, + 'HAVE_LIBDL': 1, + 'HAVE_LIBDLD': 0, + 'HAVE_LIBIEEE': 0, + 'HAVE_LIBINTL_H': 1, + 'HAVE_LIBREADLINE': 1, + 'HAVE_LIBRESOLV': 0, + 'HAVE_LIBSENDFILE': 0, + 'HAVE_LIBUTIL_H': 0, + 'HAVE_LIBUUID': 1, + 'HAVE_LINK': 1, + 'HAVE_LINKAT': 1, + 'HAVE_LINUX_AUXVEC_H': 1, + 'HAVE_LINUX_CAN_BCM_H': 1, + 'HAVE_LINUX_CAN_H': 1, + 'HAVE_LINUX_CAN_J1939_H': 0, + 'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1, + 'HAVE_LINUX_CAN_RAW_H': 1, + 'HAVE_LINUX_CAN_RAW_JOIN_FILTERS': 1, + 'HAVE_LINUX_MEMFD_H': 1, + 'HAVE_LINUX_NETLINK_H': 1, + 'HAVE_LINUX_QRTR_H': 0, + 'HAVE_LINUX_RANDOM_H': 1, + 'HAVE_LINUX_TIPC_H': 1, + 'HAVE_LINUX_VM_SOCKETS_H': 1, + 'HAVE_LINUX_WAIT_H': 1, + 'HAVE_LOCKF': 1, + 'HAVE_LOG1P': 1, + 'HAVE_LOG2': 1, + 'HAVE_LONG_DOUBLE': 1, + 'HAVE_LSTAT': 1, + 'HAVE_LUTIMES': 1, + 'HAVE_MADVISE': 1, + 'HAVE_MAKEDEV': 1, + 'HAVE_MBRTOWC': 1, + 'HAVE_MEMFD_CREATE': 0, + 'HAVE_MEMORY_H': 1, + 'HAVE_MEMRCHR': 1, + 'HAVE_MKDIRAT': 1, + 'HAVE_MKFIFO': 1, + 'HAVE_MKFIFOAT': 1, + 'HAVE_MKNOD': 1, + 'HAVE_MKNODAT': 1, + 'HAVE_MKTIME': 1, + 'HAVE_MMAP': 1, + 'HAVE_MREMAP': 1, + 'HAVE_NCURSES_H': 1, + 'HAVE_NDIR_H': 0, + 'HAVE_NETPACKET_PACKET_H': 1, + 'HAVE_NET_IF_H': 1, + 'HAVE_NICE': 1, + 'HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION': 0, + 'HAVE_OPENAT': 1, + 'HAVE_OPENPTY': 1, + 'HAVE_PATHCONF': 1, + 'HAVE_PAUSE': 1, + 'HAVE_PIPE2': 1, + 'HAVE_PLOCK': 0, + 'HAVE_POLL': 1, + 'HAVE_POLL_H': 1, + 'HAVE_POSIX_FADVISE': 1, + 'HAVE_POSIX_FALLOCATE': 1, + 'HAVE_POSIX_SPAWN': 1, + 'HAVE_POSIX_SPAWNP': 1, + 'HAVE_PREAD': 1, + 'HAVE_PREADV': 1, + 'HAVE_PREADV2': 0, + 'HAVE_PRLIMIT': 1, + 'HAVE_PROCESS_H': 0, + 'HAVE_PROTOTYPES': 1, + 'HAVE_PTHREAD_CONDATTR_SETCLOCK': 1, + 'HAVE_PTHREAD_DESTRUCTOR': 0, + 'HAVE_PTHREAD_GETCPUCLOCKID': 1, + 'HAVE_PTHREAD_H': 1, + 'HAVE_PTHREAD_INIT': 0, + 'HAVE_PTHREAD_KILL': 1, + 'HAVE_PTHREAD_SIGMASK': 1, + 'HAVE_PTY_H': 1, + 'HAVE_PWRITE': 1, + 'HAVE_PWRITEV': 1, + 'HAVE_PWRITEV2': 0, + 'HAVE_READLINK': 1, + 'HAVE_READLINKAT': 1, + 'HAVE_READV': 1, + 'HAVE_REALPATH': 1, + 'HAVE_RENAMEAT': 1, + 'HAVE_RL_APPEND_HISTORY': 1, + 'HAVE_RL_CATCH_SIGNAL': 1, + 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, + 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, + 'HAVE_RL_COMPLETION_MATCHES': 1, + 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, + 'HAVE_RL_PRE_INPUT_HOOK': 1, + 'HAVE_RL_RESIZE_TERMINAL': 1, + 'HAVE_ROUND': 1, + 'HAVE_RTPSPAWN': 0, + 'HAVE_SCHED_GET_PRIORITY_MAX': 1, + 'HAVE_SCHED_H': 1, + 'HAVE_SCHED_RR_GET_INTERVAL': 1, + 'HAVE_SCHED_SETAFFINITY': 1, + 'HAVE_SCHED_SETPARAM': 1, + 'HAVE_SCHED_SETSCHEDULER': 1, + 'HAVE_SEM_CLOCKWAIT': 0, + 'HAVE_SEM_GETVALUE': 1, + 'HAVE_SEM_OPEN': 1, + 'HAVE_SEM_TIMEDWAIT': 1, + 'HAVE_SEM_UNLINK': 1, + 'HAVE_SENDFILE': 1, + 'HAVE_SETEGID': 1, + 'HAVE_SETEUID': 1, + 'HAVE_SETGID': 1, + 'HAVE_SETGROUPS': 1, + 'HAVE_SETHOSTNAME': 1, + 'HAVE_SETITIMER': 1, + 'HAVE_SETLOCALE': 1, + 'HAVE_SETPGID': 1, + 'HAVE_SETPGRP': 1, + 'HAVE_SETPRIORITY': 1, + 'HAVE_SETREGID': 1, + 'HAVE_SETRESGID': 1, + 'HAVE_SETRESUID': 1, + 'HAVE_SETREUID': 1, + 'HAVE_SETSID': 1, + 'HAVE_SETUID': 1, + 'HAVE_SETVBUF': 1, + 'HAVE_SHADOW_H': 1, + 'HAVE_SHM_OPEN': 1, + 'HAVE_SHM_UNLINK': 1, + 'HAVE_SIGACTION': 1, + 'HAVE_SIGALTSTACK': 1, + 'HAVE_SIGFILLSET': 1, + 'HAVE_SIGINFO_T_SI_BAND': 1, + 'HAVE_SIGINTERRUPT': 1, + 'HAVE_SIGNAL_H': 1, + 'HAVE_SIGPENDING': 1, + 'HAVE_SIGRELSE': 1, + 'HAVE_SIGTIMEDWAIT': 1, + 'HAVE_SIGWAIT': 1, + 'HAVE_SIGWAITINFO': 1, + 'HAVE_SNPRINTF': 1, + 'HAVE_SOCKADDR_ALG': 1, + 'HAVE_SOCKADDR_SA_LEN': 0, + 'HAVE_SOCKADDR_STORAGE': 1, + 'HAVE_SOCKETPAIR': 1, + 'HAVE_SPAWN_H': 1, + 'HAVE_SPLICE': 1, + 'HAVE_SSIZE_T': 1, + 'HAVE_STATVFS': 1, + 'HAVE_STAT_TV_NSEC': 1, + 'HAVE_STAT_TV_NSEC2': 0, + 'HAVE_STDARG_PROTOTYPES': 1, + 'HAVE_STDINT_H': 1, + 'HAVE_STDLIB_H': 1, + 'HAVE_STD_ATOMIC': 1, + 'HAVE_STRFTIME': 1, + 'HAVE_STRINGS_H': 1, + 'HAVE_STRING_H': 1, + 'HAVE_STRLCPY': 0, + 'HAVE_STROPTS_H': 0, + 'HAVE_STRSIGNAL': 1, + 'HAVE_STRUCT_PASSWD_PW_GECOS': 1, + 'HAVE_STRUCT_PASSWD_PW_PASSWD': 1, + 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, + 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, + 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, + 'HAVE_STRUCT_STAT_ST_FLAGS': 0, + 'HAVE_STRUCT_STAT_ST_GEN': 0, + 'HAVE_STRUCT_STAT_ST_RDEV': 1, + 'HAVE_STRUCT_TM_TM_ZONE': 1, + 'HAVE_SYMLINK': 1, + 'HAVE_SYMLINKAT': 1, + 'HAVE_SYNC': 1, + 'HAVE_SYSCONF': 1, + 'HAVE_SYSEXITS_H': 1, + 'HAVE_SYS_AUDIOIO_H': 0, + 'HAVE_SYS_AUXV_H': 1, + 'HAVE_SYS_BSDTTY_H': 0, + 'HAVE_SYS_DEVPOLL_H': 0, + 'HAVE_SYS_DIR_H': 0, + 'HAVE_SYS_ENDIAN_H': 0, + 'HAVE_SYS_EPOLL_H': 1, + 'HAVE_SYS_EVENTFD_H': 1, + 'HAVE_SYS_EVENT_H': 0, + 'HAVE_SYS_FILE_H': 1, + 'HAVE_SYS_IOCTL_H': 1, + 'HAVE_SYS_KERN_CONTROL_H': 0, + 'HAVE_SYS_LOADAVG_H': 0, + 'HAVE_SYS_LOCK_H': 0, + 'HAVE_SYS_MEMFD_H': 0, + 'HAVE_SYS_MKDEV_H': 0, + 'HAVE_SYS_MMAN_H': 1, + 'HAVE_SYS_MODEM_H': 0, + 'HAVE_SYS_NDIR_H': 0, + 'HAVE_SYS_PARAM_H': 1, + 'HAVE_SYS_POLL_H': 1, + 'HAVE_SYS_RANDOM_H': 0, + 'HAVE_SYS_RESOURCE_H': 1, + 'HAVE_SYS_SELECT_H': 1, + 'HAVE_SYS_SENDFILE_H': 1, + 'HAVE_SYS_SOCKET_H': 1, + 'HAVE_SYS_STATVFS_H': 1, + 'HAVE_SYS_STAT_H': 1, + 'HAVE_SYS_SYSCALL_H': 1, + 'HAVE_SYS_SYSMACROS_H': 1, + 'HAVE_SYS_SYS_DOMAIN_H': 0, + 'HAVE_SYS_TERMIO_H': 0, + 'HAVE_SYS_TIMES_H': 1, + 'HAVE_SYS_TIME_H': 1, + 'HAVE_SYS_TYPES_H': 1, + 'HAVE_SYS_UIO_H': 1, + 'HAVE_SYS_UN_H': 1, + 'HAVE_SYS_UTSNAME_H': 1, + 'HAVE_SYS_WAIT_H': 1, + 'HAVE_SYS_XATTR_H': 1, + 'HAVE_TCGETPGRP': 1, + 'HAVE_TCSETPGRP': 1, + 'HAVE_TEMPNAM': 1, + 'HAVE_TERMIOS_H': 1, + 'HAVE_TERM_H': 1, + 'HAVE_TGAMMA': 1, + 'HAVE_TIMEGM': 1, + 'HAVE_TIMES': 1, + 'HAVE_TMPFILE': 1, + 'HAVE_TMPNAM': 1, + 'HAVE_TMPNAM_R': 1, + 'HAVE_TM_ZONE': 1, + 'HAVE_TRUNCATE': 1, + 'HAVE_TZNAME': 0, + 'HAVE_UCS4_TCL': 0, + 'HAVE_UNAME': 1, + 'HAVE_UNISTD_H': 1, + 'HAVE_UNLINKAT': 1, + 'HAVE_USABLE_WCHAR_T': 0, + 'HAVE_UTIL_H': 0, + 'HAVE_UTIMENSAT': 1, + 'HAVE_UTIMES': 1, + 'HAVE_UTIME_H': 1, + 'HAVE_UUID_CREATE': 0, + 'HAVE_UUID_ENC_BE': 0, + 'HAVE_UUID_GENERATE_TIME_SAFE': 1, + 'HAVE_UUID_H': 1, + 'HAVE_UUID_UUID_H': 1, + 'HAVE_VFORK': 1, + 'HAVE_WAIT3': 1, + 'HAVE_WAIT4': 1, + 'HAVE_WAITID': 1, + 'HAVE_WAITPID': 1, + 'HAVE_WCHAR_H': 1, + 'HAVE_WCSCOLL': 1, + 'HAVE_WCSFTIME': 1, + 'HAVE_WCSXFRM': 1, + 'HAVE_WMEMCMP': 1, + 'HAVE_WORKING_TZSET': 1, + 'HAVE_WRITEV': 1, + 'HAVE_ZLIB_COPY': 1, + 'HAVE__GETPTY': 0, + 'HOST_GNU_TYPE': 'x86_64-conda-linux-gnu', + 'INCLDIRSTOMAKE': '/root/envs/evalkit_cambrian/include ' + '/root/envs/evalkit_cambrian/include ' + '/root/envs/evalkit_cambrian/include/python3.10 ' + '/root/envs/evalkit_cambrian/include/python3.10', + 'INCLUDEDIR': '/root/envs/evalkit_cambrian/include', + 'INCLUDEPY': '/root/envs/evalkit_cambrian/include/python3.10', + 'INSTALL': '/usr/bin/install -c', + 'INSTALL_DATA': '/usr/bin/install -c -m 644', + 'INSTALL_PROGRAM': '/usr/bin/install -c', + 'INSTALL_SCRIPT': '/usr/bin/install -c', + 'INSTALL_SHARED': '/usr/bin/install -c -m 755', + 'INSTSONAME': 'libpython3.10.a', + 'IO_H': 'Modules/_io/_iomodule.h', + 'IO_OBJS': '\\', + 'LDCXXSHARED': 'x86_64-conda-linux-gnu-c++ -pthread -shared', + 'LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib', + 'LDLIBRARY': 'libpython3.10.a', + 'LDLIBRARYDIR': '', + 'LDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 ' + '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib', + 'LDVERSION': '3.10', + 'LIBC': '', + 'LIBDEST': '/root/envs/evalkit_cambrian/lib/python3.10', + 'LIBDIR': '/root/envs/evalkit_cambrian/lib', + 'LIBFFI_INCLUDEDIR': '/root/envs/evalkit_cambrian/include', + 'LIBM': '-lm', + 'LIBOBJDIR': 'Python/', + 'LIBOBJS': '', + 'LIBPC': '/root/envs/evalkit_cambrian/lib/pkgconfig', + 'LIBPL': '/root/envs/evalkit_cambrian/lib/python3.10/config-3.10-x86_64-linux-gnu', + 'LIBPYTHON': '', + 'LIBRARY': 'libpython3.10.a', + 'LIBRARY_DEPS': 'libpython3.10.a', + 'LIBRARY_OBJS': '\\', + 'LIBRARY_OBJS_OMIT_FROZEN': '\\', + 'LIBS': '-lcrypt -lpthread -ldl -lutil -lm', + 'LIBSUBDIRS': 'asyncio \\', + 'LINKCC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'LINKFORSHARED': '-Xlinker -export-dynamic', + 'LIPO_32BIT_FLAGS': '', + 'LIPO_INTEL64_FLAGS': '', + 'LLVM_PROF_ERR': 'no', + 'LLVM_PROF_FILE': '', + 'LLVM_PROF_MERGER': 'true', + 'LN': 'ln', + 'LOCALMODLIBS': '', + 'MACHDEP': 'linux', + 'MACHDEP_OBJS': '', + 'MACHDESTLIB': '/root/envs/evalkit_cambrian/lib/python3.10', + 'MACOSX_DEPLOYMENT_TARGET': '', + 'MAINCC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'MAJOR_IN_MKDEV': 0, + 'MAJOR_IN_SYSMACROS': 0, + 'MAKESETUP': '/croot/python-split_1733933809325/work/Modules/makesetup', + 'MANDIR': '/root/envs/evalkit_cambrian/share/man', + 'MKDIR_P': '/usr/bin/mkdir -p', + 'MODBUILT_NAMES': 'posix errno pwd _sre _codecs _weakref _functools ' + '_operator _collections _abc itertools atexit _signal ' + '_stat time _thread _locale _io faulthandler ' + '_tracemalloc _symtable xxsubtype', + 'MODDISABLED_NAMES': '', + 'MODLIBS': '', + 'MODOBJS': 'Modules/posixmodule.o Modules/errnomodule.o ' + 'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o ' + 'Modules/_weakref.o Modules/_functoolsmodule.o ' + 'Modules/_operator.o Modules/_collectionsmodule.o ' + 'Modules/_abc.o Modules/itertoolsmodule.o ' + 'Modules/atexitmodule.o Modules/signalmodule.o Modules/_stat.o ' + 'Modules/timemodule.o Modules/_threadmodule.o ' + 'Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o ' + 'Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o ' + 'Modules/textio.o Modules/stringio.o Modules/faulthandler.o ' + 'Modules/_tracemalloc.o Modules/symtablemodule.o ' + 'Modules/xxsubtype.o', + 'MODULE_OBJS': '\\', + 'MULTIARCH': 'x86_64-linux-gnu', + 'MULTIARCH_CPPFLAGS': '-DMULTIARCH=\\"x86_64-linux-gnu\\"', + 'MVWDELCH_IS_EXPRESSION': 1, + 'NO_AS_NEEDED': '-Wl,--no-as-needed', + 'OBJECT_OBJS': '\\', + 'OPENSSL_INCLUDES': '-I/root/envs/evalkit_cambrian/include', + 'OPENSSL_LDFLAGS': '-L/root/envs/evalkit_cambrian/lib', + 'OPENSSL_LIBS': '-lssl -lcrypto', + 'OPENSSL_RPATH': '', + 'OPT': '-DNDEBUG -fwrapv -O2 -Wall', + 'OTHER_LIBTOOL_OPT': '', + 'PACKAGE_BUGREPORT': 0, + 'PACKAGE_NAME': 0, + 'PACKAGE_STRING': 0, + 'PACKAGE_TARNAME': 0, + 'PACKAGE_URL': 0, + 'PACKAGE_VERSION': 0, + 'PARSER_HEADERS': '\\', + 'PARSER_OBJS': '\\ \\ Parser/myreadline.o Parser/tokenizer.o', + 'PEGEN_HEADERS': '\\', + 'PEGEN_OBJS': '\\', + 'PGO_PROF_GEN_FLAG': '-fprofile-generate', + 'PGO_PROF_USE_FLAG': ' ', + 'PLATLIBDIR': 'lib', + 'POBJS': '\\', + 'POSIX_SEMAPHORES_NOT_ENABLED': 0, + 'PROFILE_TASK': '-m test --pgo', + 'PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT': 1, + 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, + 'PURIFY': '', + 'PY3LIBRARY': '', + 'PYLONG_BITS_IN_DIGIT': 0, + 'PYTHON': 'python', + 'PYTHONFRAMEWORK': '', + 'PYTHONFRAMEWORKDIR': 'no-framework', + 'PYTHONFRAMEWORKINSTALLDIR': '', + 'PYTHONFRAMEWORKPREFIX': '', + 'PYTHONPATH': '', + 'PYTHON_FOR_BUILD': './python -E', + 'PYTHON_FOR_REGEN': '', + 'PYTHON_HEADERS': '\\', + 'PYTHON_OBJS': '\\', + 'PY_BUILD_ENVIRON': '', + 'PY_BUILTIN_HASHLIB_HASHES': '"md5,sha1,sha256,sha512,sha3,blake2"', + 'PY_BUILTIN_MODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG ' + '-fwrapv -O2 -Wall -march=nocona -mtune=haswell ' + '-ftree-vectorize -fPIC -fstack-protector-strong ' + '-fno-plt -O2 -ffunction-sections -pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona ' + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' ' + '-fno-semantic-interposition ' + ' ' + ' -g -std=c99 -Wextra ' + '-Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DPy_BUILD_CORE_BUILTIN', + 'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + '', + 'PY_CFLAGS_NODIST': '-fno-semantic-interposition ' + ' -g -std=c99 ' + '-Wextra -Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + '-I/croot/python-split_1733933809325/work/Include/internal', + 'PY_COERCE_C_LOCALE': 1, + 'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 ' + '-Wall -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' -fno-semantic-interposition ' + ' ' + '-g -std=c99 -Wextra -Wno-unused-result ' + '-Wno-unused-parameter -Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration -fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DPy_BUILD_CORE', + 'PY_CORE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-fno-semantic-interposition ' + ' -g', + 'PY_CPPFLAGS': '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include', + 'PY_ENABLE_SHARED': 0, + 'PY_FORMAT_SIZE_T': '"z"', + 'PY_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/evalkit_cambrian/lib ' + '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib ' + '-L/root/envs/evalkit_cambrian/lib', + 'PY_LDFLAGS_NODIST': '-fno-semantic-interposition ' + ' -g', + 'PY_SSL_DEFAULT_CIPHERS': 1, + 'PY_SSL_DEFAULT_CIPHER_STRING': 0, + 'PY_STDMODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv ' + '-O2 -Wall -march=nocona -mtune=haswell ' + '-ftree-vectorize -fPIC -fstack-protector-strong ' + '-fno-plt -O2 -ffunction-sections -pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona ' + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix ' + ' ' + ' ' + '-fno-semantic-interposition ' + ' -g -std=c99 ' + '-Wextra -Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/evalkit_cambrian/include ' + '-I/root/envs/evalkit_cambrian/include', + 'Py_DEBUG': 0, + 'Py_ENABLE_SHARED': 0, + 'Py_HASH_ALGORITHM': 0, + 'Py_TRACE_REFS': 0, + 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', + 'READELF': 'x86_64-conda-linux-gnu-readelf', + 'RESSRCDIR': 'Mac/Resources/framework', + 'RETSIGTYPE': 'void', + 'RUNSHARED': '', + 'SCRIPTDIR': '/root/envs/evalkit_cambrian/lib', + 'SETPGRP_HAVE_ARG': 0, + 'SHELL': '/bin/sh', + 'SHLIBS': '-lcrypt -lpthread -ldl -lutil -lm', + 'SHLIB_SUFFIX': '.so', + 'SHM_NEEDS_LIBRT': 1, + 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, + 'SITEPATH': '', + 'SIZEOF_DOUBLE': 8, + 'SIZEOF_FLOAT': 4, + 'SIZEOF_FPOS_T': 16, + 'SIZEOF_INT': 4, + 'SIZEOF_LONG': 8, + 'SIZEOF_LONG_DOUBLE': 16, + 'SIZEOF_LONG_LONG': 8, + 'SIZEOF_OFF_T': 8, + 'SIZEOF_PID_T': 4, + 'SIZEOF_PTHREAD_KEY_T': 4, + 'SIZEOF_PTHREAD_T': 8, + 'SIZEOF_SHORT': 2, + 'SIZEOF_SIZE_T': 8, + 'SIZEOF_TIME_T': 8, + 'SIZEOF_UINTPTR_T': 8, + 'SIZEOF_VOID_P': 8, + 'SIZEOF_WCHAR_T': 4, + 'SIZEOF__BOOL': 1, + 'SOABI': 'cpython-310-x86_64-linux-gnu', + 'SRCDIRS': 'Parser Objects Python Modules Modules/_io Programs', + 'SRC_GDB_HOOKS': '/croot/python-split_1733933809325/work/Tools/gdb/libpython.py', + 'STATIC_LIBPYTHON': 1, + 'STDC_HEADERS': 1, + 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", + 'STRIPFLAG': '-s', + 'SUBDIRS': '', + 'SUBDIRSTOO': 'Include Lib Misc', + 'SYSLIBS': '-lm', + 'SYS_SELECT_WITH_SYS_TIME': 1, + 'TCLTK_INCLUDES': '-I/root/envs/evalkit_cambrian/include', + 'TCLTK_LIBS': '-L/root/envs/evalkit_cambrian/lib ' + '-ltcl8.6 -ltk8.6', + 'TESTOPTS': '', + 'TESTPATH': '', + 'TESTPYTHON': './python', + 'TESTPYTHONOPTS': '', + 'TESTRUNNER': './python ' + '/croot/python-split_1733933809325/work/Tools/scripts/run_tests.py', + 'TESTSUBDIRS': 'ctypes/test \\', + 'TESTTIMEOUT': 1200, + 'TEST_MODULES': 'yes', + 'THREAD_STACK_SIZE': 0, + 'TIMEMODULE_LIB': 0, + 'TIME_WITH_SYS_TIME': 1, + 'TM_IN_SYS_TIME': 0, + 'TZPATH': '/root/envs/evalkit_cambrian/share/zoneinfo', + 'UNICODE_DEPS': '\\', + 'UNIVERSALSDK': '', + 'UPDATE_FILE': '/croot/python-split_1733933809325/work/Tools/scripts/update_file.py', + 'USE_COMPUTED_GOTOS': 1, + 'VERSION': '3.10', + 'VPATH': '/croot/python-split_1733933809325/work', + 'WHEEL_PKG_DIR': '', + 'WINDOW_HAS_FLAGS': 1, + 'WITH_DECIMAL_CONTEXTVAR': 1, + 'WITH_DOC_STRINGS': 1, + 'WITH_DTRACE': 0, + 'WITH_DYLD': 0, + 'WITH_EDITLINE': 0, + 'WITH_LIBINTL': 0, + 'WITH_NEXT_FRAMEWORK': 0, + 'WITH_PYMALLOC': 1, + 'WITH_VALGRIND': 0, + 'X87_DOUBLE_ROUNDING': 0, + 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', + 'abs_builddir': '/croot/python-split_1733933809325/work/build-static', + 'abs_srcdir': '/croot/python-split_1733933809325/work', + 'datarootdir': '/root/envs/evalkit_cambrian/share', + 'exec_prefix': '/root/envs/evalkit_cambrian', + 'prefix': '/root/envs/evalkit_cambrian', + 'srcdir': '/croot/python-split_1733933809325/work'} diff --git a/evalkit_cambrian/lib/python3.10/_threading_local.py b/evalkit_cambrian/lib/python3.10/_threading_local.py new file mode 100644 index 0000000000000000000000000000000000000000..b006d76c4e23df7dbf09bc7e668b9eb87e4044af --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/_threading_local.py @@ -0,0 +1,242 @@ +"""Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... def __init__(self, /, **kw): + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red')], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +""" + +from weakref import ref +from contextlib import contextmanager + +__all__ = ["local"] + +# We need to use objects from the threading module, but the threading +# module may also want to use our `local` class, if support for locals +# isn't compiled in to the `thread` module. This creates potential problems +# with circular imports. For that reason, we don't import `threading` +# until the bottom of this file (a hack sufficient to worm around the +# potential problems). Note that all platforms on CPython do have support +# for locals in the `thread` module, and there is no circular import problem +# then, so problems introduced by fiddling the order of imports here won't +# manifest. + +class _localimpl: + """A class managing thread-local dicts""" + __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' + + def __init__(self): + # The key used in the Thread objects' attribute dicts. + # We keep it a string for speed but make it unlikely to clash with + # a "real" attribute. + self.key = '_threading_local._localimpl.' + str(id(self)) + # { id(Thread) -> (ref(Thread), thread-local dict) } + self.dicts = {} + + def get_dict(self): + """Return the dict for the current thread. Raises KeyError if none + defined.""" + thread = current_thread() + return self.dicts[id(thread)][1] + + def create_dict(self): + """Create a new dict for the current thread, and return it.""" + localdict = {} + key = self.key + thread = current_thread() + idt = id(thread) + def local_deleted(_, key=key): + # When the localimpl is deleted, remove the thread attribute. + thread = wrthread() + if thread is not None: + del thread.__dict__[key] + def thread_deleted(_, idt=idt): + # When the thread is deleted, remove the local dict. + # Note that this is suboptimal if the thread object gets + # caught in a reference loop. We would like to be called + # as soon as the OS-level thread ends instead. + local = wrlocal() + if local is not None: + dct = local.dicts.pop(idt) + wrlocal = ref(self, local_deleted) + wrthread = ref(thread, thread_deleted) + thread.__dict__[key] = wrlocal + self.dicts[idt] = wrthread, localdict + return localdict + + +@contextmanager +def _patch(self): + impl = object.__getattribute__(self, '_local__impl') + try: + dct = impl.get_dict() + except KeyError: + dct = impl.create_dict() + args, kw = impl.localargs + self.__init__(*args, **kw) + with impl.locallock: + object.__setattr__(self, '__dict__', dct) + yield + + +class local: + __slots__ = '_local__impl', '__dict__' + + def __new__(cls, /, *args, **kw): + if (args or kw) and (cls.__init__ is object.__init__): + raise TypeError("Initialization arguments are not supported") + self = object.__new__(cls) + impl = _localimpl() + impl.localargs = (args, kw) + impl.locallock = RLock() + object.__setattr__(self, '_local__impl', impl) + # We need to create the thread dict in anticipation of + # __init__ being called, to make sure we don't call it + # again ourselves. + impl.create_dict() + return self + + def __getattribute__(self, name): + with _patch(self): + return object.__getattribute__(self, name) + + def __setattr__(self, name, value): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__delattr__(self, name) + + +from threading import current_thread, RLock diff --git a/evalkit_cambrian/lib/python3.10/abc.py b/evalkit_cambrian/lib/python3.10/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..3c552cebb4226c02bd6f741c5446623a0cd6a7ed --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/abc.py @@ -0,0 +1,188 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. abstractmethod() may be used to declare + abstract methods for properties and descriptors. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractclassmethod(classmethod): + """A decorator indicating abstract classmethods. + + Deprecated, use 'classmethod' with 'abstractmethod' instead: + + class C(ABC): + @classmethod + @abstractmethod + def my_abstract_classmethod(cls, ...): + ... + + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractstaticmethod(staticmethod): + """A decorator indicating abstract staticmethods. + + Deprecated, use 'staticmethod' with 'abstractmethod' instead: + + class C(ABC): + @staticmethod + @abstractmethod + def my_abstract_staticmethod(...): + ... + + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractproperty(property): + """A decorator indicating abstract properties. + + Deprecated, use 'property' with 'abstractmethod' instead: + + class C(ABC): + @property + @abstractmethod + def my_abstract_property(self): + ... + + """ + + __isabstractmethod__ = True + + +try: + from _abc import (get_cache_token, _abc_init, _abc_register, + _abc_instancecheck, _abc_subclasscheck, _get_dump, + _reset_registry, _reset_caches) +except ImportError: + from _py_abc import ABCMeta, get_cache_token + ABCMeta.__module__ = 'abc' +else: + class ABCMeta(type): + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + """ + def __new__(mcls, name, bases, namespace, **kwargs): + cls = super().__new__(mcls, name, bases, namespace, **kwargs) + _abc_init(cls) + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + return _abc_register(cls, subclass) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + return _abc_instancecheck(cls, instance) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + return _abc_subclasscheck(cls, subclass) + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Inv. counter: {get_cache_token()}", file=file) + (_abc_registry, _abc_cache, _abc_negative_cache, + _abc_negative_cache_version) = _get_dump(cls) + print(f"_abc_registry: {_abc_registry!r}", file=file) + print(f"_abc_cache: {_abc_cache!r}", file=file) + print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) + print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", + file=file) + + def _abc_registry_clear(cls): + """Clear the registry (for debugging or testing).""" + _reset_registry(cls) + + def _abc_caches_clear(cls): + """Clear the caches (for debugging or testing).""" + _reset_caches(cls) + + +def update_abstractmethods(cls): + """Recalculate the set of abstract methods of an abstract class. + + If a class has had one of its abstract methods implemented after the + class was created, the method will not be considered implemented until + this function is called. Alternatively, if a new abstract method has been + added to the class, it will only be considered an abstract method of the + class after this function is called. + + This function should be called before any use is made of the class, + usually in class decorators that add methods to the subject class. + + Returns cls, to allow usage as a class decorator. + + If cls is not an instance of ABCMeta, does nothing. + """ + if not hasattr(cls, '__abstractmethods__'): + # We check for __abstractmethods__ here because cls might by a C + # implementation or a python implementation (especially during + # testing), and we want to handle both cases. + return cls + + abstracts = set() + # Check the existing abstract methods of the parents, keep only the ones + # that are not implemented. + for scls in cls.__bases__: + for name in getattr(scls, '__abstractmethods__', ()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + # Also add any other newly added abstract methods. + for name, value in cls.__dict__.items(): + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + return cls + + +class ABC(metaclass=ABCMeta): + """Helper class that provides a standard way to create an ABC using + inheritance. + """ + __slots__ = () diff --git a/evalkit_cambrian/lib/python3.10/ast.py b/evalkit_cambrian/lib/python3.10/ast.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5f9827146dfd8731196121970b21724102c372 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/ast.py @@ -0,0 +1,1709 @@ +""" + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +""" +import sys +from _ast import * +from contextlib import contextmanager, nullcontext +from enum import IntEnum, auto + + +def parse(source, filename='', mode='exec', *, + type_comments=False, feature_version=None): + """ + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + Pass type_comments=True to get back type comments where the syntax allows. + """ + flags = PyCF_ONLY_AST + if type_comments: + flags |= PyCF_TYPE_COMMENTS + if isinstance(feature_version, tuple): + major, minor = feature_version # Should be a 2-tuple. + assert major == 3 + feature_version = minor + elif feature_version is None: + feature_version = -1 + # Else it should be an int giving the minor version for 3.x. + return compile(source, filename, mode, flags, + _feature_version=feature_version) + + +def literal_eval(node_or_string): + """ + Evaluate an expression node or a string containing only a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + + Caution: A complex expression can overflow the C stack and cause a crash. + """ + if isinstance(node_or_string, str): + node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval') + if isinstance(node_or_string, Expression): + node_or_string = node_or_string.body + def _raise_malformed_node(node): + msg = "malformed node or string" + if lno := getattr(node, 'lineno', None): + msg += f' on line {lno}' + raise ValueError(msg + f': {node!r}') + def _convert_num(node): + if not isinstance(node, Constant) or type(node.value) not in (int, float, complex): + _raise_malformed_node(node) + return node.value + def _convert_signed_num(node): + if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)): + operand = _convert_num(node.operand) + if isinstance(node.op, UAdd): + return + operand + else: + return - operand + return _convert_num(node) + def _convert(node): + if isinstance(node, Constant): + return node.value + elif isinstance(node, Tuple): + return tuple(map(_convert, node.elts)) + elif isinstance(node, List): + return list(map(_convert, node.elts)) + elif isinstance(node, Set): + return set(map(_convert, node.elts)) + elif (isinstance(node, Call) and isinstance(node.func, Name) and + node.func.id == 'set' and node.args == node.keywords == []): + return set() + elif isinstance(node, Dict): + if len(node.keys) != len(node.values): + _raise_malformed_node(node) + return dict(zip(map(_convert, node.keys), + map(_convert, node.values))) + elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)): + left = _convert_signed_num(node.left) + right = _convert_num(node.right) + if isinstance(left, (int, float)) and isinstance(right, complex): + if isinstance(node.op, Add): + return left + right + else: + return left - right + return _convert_signed_num(node) + return _convert(node_or_string) + + +def dump(node, annotate_fields=True, include_attributes=False, *, indent=None): + """ + Return a formatted dump of the tree in node. This is mainly useful for + debugging purposes. If annotate_fields is true (by default), + the returned string will show the names and the values for fields. + If annotate_fields is false, the result string will be more compact by + omitting unambiguous field names. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + include_attributes can be set to true. If indent is a non-negative + integer or string, then the tree will be pretty-printed with that indent + level. None (the default) selects the single line representation. + """ + def _format(node, level=0): + if indent is not None: + level += 1 + prefix = '\n' + indent * level + sep = ',\n' + indent * level + else: + prefix = '' + sep = ', ' + if isinstance(node, AST): + cls = type(node) + args = [] + allsimple = True + keywords = annotate_fields + for name in node._fields: + try: + value = getattr(node, name) + except AttributeError: + keywords = True + continue + if value is None and getattr(cls, name, ...) is None: + keywords = True + continue + value, simple = _format(value, level) + allsimple = allsimple and simple + if keywords: + args.append('%s=%s' % (name, value)) + else: + args.append(value) + if include_attributes and node._attributes: + for name in node._attributes: + try: + value = getattr(node, name) + except AttributeError: + continue + if value is None and getattr(cls, name, ...) is None: + continue + value, simple = _format(value, level) + allsimple = allsimple and simple + args.append('%s=%s' % (name, value)) + if allsimple and len(args) <= 3: + return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args + return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False + elif isinstance(node, list): + if not node: + return '[]', True + return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False + return repr(node), True + + if not isinstance(node, AST): + raise TypeError('expected AST, got %r' % node.__class__.__name__) + if indent is not None and not isinstance(indent, str): + indent = ' ' * indent + return _format(node)[0] + + +def copy_location(new_node, old_node): + """ + Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset` + attributes) from *old_node* to *new_node* if possible, and return *new_node*. + """ + for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset': + if attr in old_node._attributes and attr in new_node._attributes: + value = getattr(old_node, attr, None) + # end_lineno and end_col_offset are optional attributes, and they + # should be copied whether the value is None or not. + if value is not None or ( + hasattr(old_node, attr) and attr.startswith("end_") + ): + setattr(new_node, attr, value) + return new_node + + +def fix_missing_locations(node): + """ + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + """ + def _fix(node, lineno, col_offset, end_lineno, end_col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'end_lineno' in node._attributes: + if getattr(node, 'end_lineno', None) is None: + node.end_lineno = end_lineno + else: + end_lineno = node.end_lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + if 'end_col_offset' in node._attributes: + if getattr(node, 'end_col_offset', None) is None: + node.end_col_offset = end_col_offset + else: + end_col_offset = node.end_col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset, end_lineno, end_col_offset) + _fix(node, 1, 0, 1, 0) + return node + + +def increment_lineno(node, n=1): + """ + Increment the line number and end line number of each node in the tree + starting at *node* by *n*. This is useful to "move code" to a different + location in a file. + """ + for child in walk(node): + # TypeIgnore is a special case where lineno is not an attribute + # but rather a field of the node itself. + if isinstance(child, TypeIgnore): + child.lineno = getattr(child, 'lineno', 0) + n + continue + + if 'lineno' in child._attributes: + child.lineno = getattr(child, 'lineno', 0) + n + if ( + "end_lineno" in child._attributes + and (end_lineno := getattr(child, "end_lineno", 0)) is not None + ): + child.end_lineno = end_lineno + n + return node + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, AST): + yield item + + +def get_docstring(node, clean=True): + """ + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + """ + if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)): + raise TypeError("%r can't have docstrings" % node.__class__.__name__) + if not(node.body and isinstance(node.body[0], Expr)): + return None + node = node.body[0].value + if isinstance(node, Str): + text = node.s + elif isinstance(node, Constant) and isinstance(node.value, str): + text = node.value + else: + return None + if clean: + import inspect + text = inspect.cleandoc(text) + return text + + +def _splitlines_no_ff(source): + """Split a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + """ + idx = 0 + lines = [] + next_line = '' + while idx < len(source): + c = source[idx] + next_line += c + idx += 1 + # Keep \r\n together + if c == '\r' and idx < len(source) and source[idx] == '\n': + next_line += '\n' + idx += 1 + if c in '\r\n': + lines.append(next_line) + next_line = '' + + if next_line: + lines.append(next_line) + return lines + + +def _pad_whitespace(source): + r"""Replace all chars except '\f\t' in a line with spaces.""" + result = '' + for c in source: + if c in '\f\t': + result += c + else: + result += ' ' + return result + + +def get_source_segment(source, node, *, padded=False): + """Get source code segment of the *source* that generated *node*. + + If some location information (`lineno`, `end_lineno`, `col_offset`, + or `end_col_offset`) is missing, return None. + + If *padded* is `True`, the first line of a multi-line statement will + be padded with spaces to match its original position. + """ + try: + if node.end_lineno is None or node.end_col_offset is None: + return None + lineno = node.lineno - 1 + end_lineno = node.end_lineno - 1 + col_offset = node.col_offset + end_col_offset = node.end_col_offset + except AttributeError: + return None + + lines = _splitlines_no_ff(source) + if end_lineno == lineno: + return lines[lineno].encode()[col_offset:end_col_offset].decode() + + if padded: + padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode()) + else: + padding = '' + + first = padding + lines[lineno].encode()[col_offset:].decode() + last = lines[end_lineno].encode()[:end_col_offset].decode() + lines = lines[lineno+1:end_lineno] + + lines.insert(0, first) + lines.append(last) + return ''.join(lines) + + +def walk(node): + """ + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + def visit_Constant(self, node): + value = node.value + type_name = _const_node_type_names.get(type(value)) + if type_name is None: + for cls, name in _const_node_type_names.items(): + if isinstance(value, cls): + type_name = name + break + if type_name is not None: + method = 'visit_' + type_name + try: + visitor = getattr(self, method) + except AttributeError: + pass + else: + import warnings + warnings.warn(f"{method} is deprecated; add visit_Constant", + DeprecationWarning, 2) + return visitor(node) + return self.generic_visit(node) + + +class NodeTransformer(NodeVisitor): + """ + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return Subscript( + value=Name(id='data', ctx=Load()), + slice=Constant(value=node.id), + ctx=node.ctx + ) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + """ + + def generic_visit(self, node): + for field, old_value in iter_fields(node): + if isinstance(old_value, list): + new_values = [] + for value in old_value: + if isinstance(value, AST): + value = self.visit(value) + if value is None: + continue + elif not isinstance(value, AST): + new_values.extend(value) + continue + new_values.append(value) + old_value[:] = new_values + elif isinstance(old_value, AST): + new_node = self.visit(old_value) + if new_node is None: + delattr(node, field) + else: + setattr(node, field, new_node) + return node + + +# If the ast module is loaded more than once, only add deprecated methods once +if not hasattr(Constant, 'n'): + # The following code is for backward compatibility. + # It will be removed in future. + + def _getter(self): + """Deprecated. Use value instead.""" + return self.value + + def _setter(self, value): + self.value = value + + Constant.n = property(_getter, _setter) + Constant.s = property(_getter, _setter) + +class _ABC(type): + + def __init__(cls, *args): + cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead""" + + def __instancecheck__(cls, inst): + if not isinstance(inst, Constant): + return False + if cls in _const_types: + try: + value = inst.value + except AttributeError: + return False + else: + return ( + isinstance(value, _const_types[cls]) and + not isinstance(value, _const_types_not.get(cls, ())) + ) + return type.__instancecheck__(cls, inst) + +def _new(cls, *args, **kwargs): + for key in kwargs: + if key not in cls._fields: + # arbitrary keyword arguments are accepted + continue + pos = cls._fields.index(key) + if pos < len(args): + raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}") + if cls in _const_types: + return Constant(*args, **kwargs) + return Constant.__new__(cls, *args, **kwargs) + +class Num(Constant, metaclass=_ABC): + _fields = ('n',) + __new__ = _new + +class Str(Constant, metaclass=_ABC): + _fields = ('s',) + __new__ = _new + +class Bytes(Constant, metaclass=_ABC): + _fields = ('s',) + __new__ = _new + +class NameConstant(Constant, metaclass=_ABC): + __new__ = _new + +class Ellipsis(Constant, metaclass=_ABC): + _fields = () + + def __new__(cls, *args, **kwargs): + if cls is Ellipsis: + return Constant(..., *args, **kwargs) + return Constant.__new__(cls, *args, **kwargs) + +_const_types = { + Num: (int, float, complex), + Str: (str,), + Bytes: (bytes,), + NameConstant: (type(None), bool), + Ellipsis: (type(...),), +} +_const_types_not = { + Num: (bool,), +} + +_const_node_type_names = { + bool: 'NameConstant', # should be before int + type(None): 'NameConstant', + int: 'Num', + float: 'Num', + complex: 'Num', + str: 'Str', + bytes: 'Bytes', + type(...): 'Ellipsis', +} + +class slice(AST): + """Deprecated AST node class.""" + +class Index(slice): + """Deprecated AST node class. Use the index value directly instead.""" + def __new__(cls, value, **kwargs): + return value + +class ExtSlice(slice): + """Deprecated AST node class. Use ast.Tuple instead.""" + def __new__(cls, dims=(), **kwargs): + return Tuple(list(dims), Load(), **kwargs) + +# If the ast module is loaded more than once, only add deprecated methods once +if not hasattr(Tuple, 'dims'): + # The following code is for backward compatibility. + # It will be removed in future. + + def _dims_getter(self): + """Deprecated. Use elts instead.""" + return self.elts + + def _dims_setter(self, value): + self.elts = value + + Tuple.dims = property(_dims_getter, _dims_setter) + +class Suite(mod): + """Deprecated AST node class. Unused in Python 3.""" + +class AugLoad(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + +class AugStore(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + +class Param(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + + +# Large float and imaginary literals get turned into infinities in the AST. +# We unparse those infinities to INFSTR. +_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) + +class _Precedence(IntEnum): + """Precedence table that originated from python grammar.""" + + TUPLE = auto() + YIELD = auto() # 'yield', 'yield from' + TEST = auto() # 'if'-'else', 'lambda' + OR = auto() # 'or' + AND = auto() # 'and' + NOT = auto() # 'not' + CMP = auto() # '<', '>', '==', '>=', '<=', '!=', + # 'in', 'not in', 'is', 'is not' + EXPR = auto() + BOR = EXPR # '|' + BXOR = auto() # '^' + BAND = auto() # '&' + SHIFT = auto() # '<<', '>>' + ARITH = auto() # '+', '-' + TERM = auto() # '*', '@', '/', '%', '//' + FACTOR = auto() # unary '+', '-', '~' + POWER = auto() # '**' + AWAIT = auto() # 'await' + ATOM = auto() + + def next(self): + try: + return self.__class__(self + 1) + except ValueError: + return self + + +_SINGLE_QUOTES = ("'", '"') +_MULTI_QUOTES = ('"""', "'''") +_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES) + +class _Unparser(NodeVisitor): + """Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarded.""" + + def __init__(self, *, _avoid_backslashes=False): + self._source = [] + self._buffer = [] + self._precedences = {} + self._type_ignores = {} + self._indent = 0 + self._avoid_backslashes = _avoid_backslashes + + def interleave(self, inter, f, seq): + """Call f on each item in seq, calling inter() in between.""" + seq = iter(seq) + try: + f(next(seq)) + except StopIteration: + pass + else: + for x in seq: + inter() + f(x) + + def items_view(self, traverser, items): + """Traverse and separate the given *items* with a comma and append it to + the buffer. If *items* is a single item sequence, a trailing comma + will be added.""" + if len(items) == 1: + traverser(items[0]) + self.write(",") + else: + self.interleave(lambda: self.write(", "), traverser, items) + + def maybe_newline(self): + """Adds a newline if it isn't the start of generated source""" + if self._source: + self.write("\n") + + def fill(self, text=""): + """Indent a piece of text and append it, according to the current + indentation level""" + self.maybe_newline() + self.write(" " * self._indent + text) + + def write(self, text): + """Append a piece of text""" + self._source.append(text) + + def buffer_writer(self, text): + self._buffer.append(text) + + @property + def buffer(self): + value = "".join(self._buffer) + self._buffer.clear() + return value + + @contextmanager + def block(self, *, extra = None): + """A context manager for preparing the source for blocks. It adds + the character':', increases the indentation on enter and decreases + the indentation on exit. If *extra* is given, it will be directly + appended after the colon character. + """ + self.write(":") + if extra: + self.write(extra) + self._indent += 1 + yield + self._indent -= 1 + + @contextmanager + def delimit(self, start, end): + """A context manager for preparing the source for expressions. It adds + *start* to the buffer and enters, after exit it adds *end*.""" + + self.write(start) + yield + self.write(end) + + def delimit_if(self, start, end, condition): + if condition: + return self.delimit(start, end) + else: + return nullcontext() + + def require_parens(self, precedence, node): + """Shortcut to adding precedence related parens""" + return self.delimit_if("(", ")", self.get_precedence(node) > precedence) + + def get_precedence(self, node): + return self._precedences.get(node, _Precedence.TEST) + + def set_precedence(self, precedence, *nodes): + for node in nodes: + self._precedences[node] = precedence + + def get_raw_docstring(self, node): + """If a docstring node is found in the body of the *node* parameter, + return that docstring node, None otherwise. + + Logic mirrored from ``_PyAST_GetDocString``.""" + if not isinstance( + node, (AsyncFunctionDef, FunctionDef, ClassDef, Module) + ) or len(node.body) < 1: + return None + node = node.body[0] + if not isinstance(node, Expr): + return None + node = node.value + if isinstance(node, Constant) and isinstance(node.value, str): + return node + + def get_type_comment(self, node): + comment = self._type_ignores.get(node.lineno) or node.type_comment + if comment is not None: + return f" # type: {comment}" + + def traverse(self, node): + if isinstance(node, list): + for item in node: + self.traverse(item) + else: + super().visit(node) + + # Note: as visit() resets the output text, do NOT rely on + # NodeVisitor.generic_visit to handle any nodes (as it calls back in to + # the subclass visit() method, which resets self._source to an empty list) + def visit(self, node): + """Outputs a source code string that, if converted back to an ast + (using ast.parse) will generate an AST equivalent to *node*""" + self._source = [] + self.traverse(node) + return "".join(self._source) + + def _write_docstring_and_traverse_body(self, node): + if (docstring := self.get_raw_docstring(node)): + self._write_docstring(docstring) + self.traverse(node.body[1:]) + else: + self.traverse(node.body) + + def visit_Module(self, node): + self._type_ignores = { + ignore.lineno: f"ignore{ignore.tag}" + for ignore in node.type_ignores + } + self._write_docstring_and_traverse_body(node) + self._type_ignores.clear() + + def visit_FunctionType(self, node): + with self.delimit("(", ")"): + self.interleave( + lambda: self.write(", "), self.traverse, node.argtypes + ) + + self.write(" -> ") + self.traverse(node.returns) + + def visit_Expr(self, node): + self.fill() + self.set_precedence(_Precedence.YIELD, node.value) + self.traverse(node.value) + + def visit_NamedExpr(self, node): + with self.require_parens(_Precedence.TUPLE, node): + self.set_precedence(_Precedence.ATOM, node.target, node.value) + self.traverse(node.target) + self.write(" := ") + self.traverse(node.value) + + def visit_Import(self, node): + self.fill("import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_ImportFrom(self, node): + self.fill("from ") + self.write("." * (node.level or 0)) + if node.module: + self.write(node.module) + self.write(" import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_Assign(self, node): + self.fill() + for target in node.targets: + self.traverse(target) + self.write(" = ") + self.traverse(node.value) + if type_comment := self.get_type_comment(node): + self.write(type_comment) + + def visit_AugAssign(self, node): + self.fill() + self.traverse(node.target) + self.write(" " + self.binop[node.op.__class__.__name__] + "= ") + self.traverse(node.value) + + def visit_AnnAssign(self, node): + self.fill() + with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)): + self.traverse(node.target) + self.write(": ") + self.traverse(node.annotation) + if node.value: + self.write(" = ") + self.traverse(node.value) + + def visit_Return(self, node): + self.fill("return") + if node.value: + self.write(" ") + self.traverse(node.value) + + def visit_Pass(self, node): + self.fill("pass") + + def visit_Break(self, node): + self.fill("break") + + def visit_Continue(self, node): + self.fill("continue") + + def visit_Delete(self, node): + self.fill("del ") + self.interleave(lambda: self.write(", "), self.traverse, node.targets) + + def visit_Assert(self, node): + self.fill("assert ") + self.traverse(node.test) + if node.msg: + self.write(", ") + self.traverse(node.msg) + + def visit_Global(self, node): + self.fill("global ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Nonlocal(self, node): + self.fill("nonlocal ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Await(self, node): + with self.require_parens(_Precedence.AWAIT, node): + self.write("await") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Yield(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_YieldFrom(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield from ") + if not node.value: + raise ValueError("Node can't be used without a value attribute.") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Raise(self, node): + self.fill("raise") + if not node.exc: + if node.cause: + raise ValueError(f"Node can't use cause without an exception.") + return + self.write(" ") + self.traverse(node.exc) + if node.cause: + self.write(" from ") + self.traverse(node.cause) + + def visit_Try(self, node): + self.fill("try") + with self.block(): + self.traverse(node.body) + for ex in node.handlers: + self.traverse(ex) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + if node.finalbody: + self.fill("finally") + with self.block(): + self.traverse(node.finalbody) + + def visit_ExceptHandler(self, node): + self.fill("except") + if node.type: + self.write(" ") + self.traverse(node.type) + if node.name: + self.write(" as ") + self.write(node.name) + with self.block(): + self.traverse(node.body) + + def visit_ClassDef(self, node): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@") + self.traverse(deco) + self.fill("class " + node.name) + with self.delimit_if("(", ")", condition = node.bases or node.keywords): + comma = False + for e in node.bases: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + with self.block(): + self._write_docstring_and_traverse_body(node) + + def visit_FunctionDef(self, node): + self._function_helper(node, "def") + + def visit_AsyncFunctionDef(self, node): + self._function_helper(node, "async def") + + def _function_helper(self, node, fill_suffix): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@") + self.traverse(deco) + def_str = fill_suffix + " " + node.name + self.fill(def_str) + with self.delimit("(", ")"): + self.traverse(node.args) + if node.returns: + self.write(" -> ") + self.traverse(node.returns) + with self.block(extra=self.get_type_comment(node)): + self._write_docstring_and_traverse_body(node) + + def visit_For(self, node): + self._for_helper("for ", node) + + def visit_AsyncFor(self, node): + self._for_helper("async for ", node) + + def _for_helper(self, fill, node): + self.fill(fill) + self.traverse(node.target) + self.write(" in ") + self.traverse(node.iter) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_If(self, node): + self.fill("if ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # collapse nested ifs into equivalent elifs. + while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If): + node = node.orelse[0] + self.fill("elif ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # final else + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_While(self, node): + self.fill("while ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_With(self, node): + self.fill("with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def visit_AsyncWith(self, node): + self.fill("async with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def _str_literal_helper( + self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False + ): + """Helper for writing string literals, minimizing escapes. + Returns the tuple (string literal to write, possible quote types). + """ + def escape_char(c): + # \n and \t are non-printable, but we only escape them if + # escape_special_whitespace is True + if not escape_special_whitespace and c in "\n\t": + return c + # Always escape backslashes and other non-printable characters + if c == "\\" or not c.isprintable(): + return c.encode("unicode_escape").decode("ascii") + return c + + escaped_string = "".join(map(escape_char, string)) + possible_quotes = quote_types + if "\n" in escaped_string: + possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES] + possible_quotes = [q for q in possible_quotes if q not in escaped_string] + if not possible_quotes: + # If there aren't any possible_quotes, fallback to using repr + # on the original string. Try to use a quote from quote_types, + # e.g., so that we use triple quotes for docstrings. + string = repr(string) + quote = next((q for q in quote_types if string[0] in q), string[0]) + return string[1:-1], [quote] + if escaped_string: + # Sort so that we prefer '''"''' over """\"""" + possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1]) + # If we're using triple quotes and we'd need to escape a final + # quote, escape it + if possible_quotes[0][0] == escaped_string[-1]: + assert len(possible_quotes[0]) == 3 + escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1] + return escaped_string, possible_quotes + + def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): + """Write string literal value with a best effort attempt to avoid backslashes.""" + string, quote_types = self._str_literal_helper(string, quote_types=quote_types) + quote_type = quote_types[0] + self.write(f"{quote_type}{string}{quote_type}") + + def visit_JoinedStr(self, node): + self.write("f") + if self._avoid_backslashes: + self._fstring_JoinedStr(node, self.buffer_writer) + self._write_str_avoiding_backslashes(self.buffer) + return + + # If we don't need to avoid backslashes globally (i.e., we only need + # to avoid them inside FormattedValues), it's cosmetically preferred + # to use escaped whitespace. That is, it's preferred to use backslashes + # for cases like: f"{x}\n". To accomplish this, we keep track of what + # in our buffer corresponds to FormattedValues and what corresponds to + # Constant parts of the f-string, and allow escapes accordingly. + buffer = [] + for value in node.values: + meth = getattr(self, "_fstring_" + type(value).__name__) + meth(value, self.buffer_writer) + buffer.append((self.buffer, isinstance(value, Constant))) + new_buffer = [] + quote_types = _ALL_QUOTES + for value, is_constant in buffer: + # Repeatedly narrow down the list of possible quote_types + value, quote_types = self._str_literal_helper( + value, quote_types=quote_types, + escape_special_whitespace=is_constant + ) + new_buffer.append(value) + value = "".join(new_buffer) + quote_type = quote_types[0] + self.write(f"{quote_type}{value}{quote_type}") + + def visit_FormattedValue(self, node): + self.write("f") + self._fstring_FormattedValue(node, self.buffer_writer) + self._write_str_avoiding_backslashes(self.buffer) + + def _fstring_JoinedStr(self, node, write): + for value in node.values: + meth = getattr(self, "_fstring_" + type(value).__name__) + meth(value, write) + + def _fstring_Constant(self, node, write): + if not isinstance(node.value, str): + raise ValueError("Constants inside JoinedStr should be a string.") + value = node.value.replace("{", "{{").replace("}", "}}") + write(value) + + def _fstring_FormattedValue(self, node, write): + write("{") + unparser = type(self)(_avoid_backslashes=True) + unparser.set_precedence(_Precedence.TEST.next(), node.value) + expr = unparser.visit(node.value) + if expr.startswith("{"): + write(" ") # Separate pair of opening brackets as "{ {" + if "\\" in expr: + raise ValueError("Unable to avoid backslash in f-string expression part") + write(expr) + if node.conversion != -1: + conversion = chr(node.conversion) + if conversion not in "sra": + raise ValueError("Unknown f-string conversion.") + write(f"!{conversion}") + if node.format_spec: + write(":") + meth = getattr(self, "_fstring_" + type(node.format_spec).__name__) + meth(node.format_spec, write) + write("}") + + def visit_Name(self, node): + self.write(node.id) + + def _write_docstring(self, node): + self.fill() + if node.kind == "u": + self.write("u") + self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES) + + def _write_constant(self, value): + if isinstance(value, (float, complex)): + # Substitute overflowing decimal literal for AST infinities, + # and inf - inf for NaNs. + self.write( + repr(value) + .replace("inf", _INFSTR) + .replace("nan", f"({_INFSTR}-{_INFSTR})") + ) + elif self._avoid_backslashes and isinstance(value, str): + self._write_str_avoiding_backslashes(value) + else: + self.write(repr(value)) + + def visit_Constant(self, node): + value = node.value + if isinstance(value, tuple): + with self.delimit("(", ")"): + self.items_view(self._write_constant, value) + elif value is ...: + self.write("...") + else: + if node.kind == "u": + self.write("u") + self._write_constant(node.value) + + def visit_List(self, node): + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + + def visit_ListComp(self, node): + with self.delimit("[", "]"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_GeneratorExp(self, node): + with self.delimit("(", ")"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_SetComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_DictComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.key) + self.write(": ") + self.traverse(node.value) + for gen in node.generators: + self.traverse(gen) + + def visit_comprehension(self, node): + if node.is_async: + self.write(" async for ") + else: + self.write(" for ") + self.set_precedence(_Precedence.TUPLE, node.target) + self.traverse(node.target) + self.write(" in ") + self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs) + self.traverse(node.iter) + for if_clause in node.ifs: + self.write(" if ") + self.traverse(if_clause) + + def visit_IfExp(self, node): + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.TEST.next(), node.body, node.test) + self.traverse(node.body) + self.write(" if ") + self.traverse(node.test) + self.write(" else ") + self.set_precedence(_Precedence.TEST, node.orelse) + self.traverse(node.orelse) + + def visit_Set(self, node): + if node.elts: + with self.delimit("{", "}"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + else: + # `{}` would be interpreted as a dictionary literal, and + # `set` might be shadowed. Thus: + self.write('{*()}') + + def visit_Dict(self, node): + def write_key_value_pair(k, v): + self.traverse(k) + self.write(": ") + self.traverse(v) + + def write_item(item): + k, v = item + if k is None: + # for dictionary unpacking operator in dicts {**{'y': 2}} + # see PEP 448 for details + self.write("**") + self.set_precedence(_Precedence.EXPR, v) + self.traverse(v) + else: + write_key_value_pair(k, v) + + with self.delimit("{", "}"): + self.interleave( + lambda: self.write(", "), write_item, zip(node.keys, node.values) + ) + + def visit_Tuple(self, node): + with self.delimit("(", ")"): + self.items_view(self.traverse, node.elts) + + unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} + unop_precedence = { + "not": _Precedence.NOT, + "~": _Precedence.FACTOR, + "+": _Precedence.FACTOR, + "-": _Precedence.FACTOR, + } + + def visit_UnaryOp(self, node): + operator = self.unop[node.op.__class__.__name__] + operator_precedence = self.unop_precedence[operator] + with self.require_parens(operator_precedence, node): + self.write(operator) + # factor prefixes (+, -, ~) shouldn't be seperated + # from the value they belong, (e.g: +1 instead of + 1) + if operator_precedence is not _Precedence.FACTOR: + self.write(" ") + self.set_precedence(operator_precedence, node.operand) + self.traverse(node.operand) + + binop = { + "Add": "+", + "Sub": "-", + "Mult": "*", + "MatMult": "@", + "Div": "/", + "Mod": "%", + "LShift": "<<", + "RShift": ">>", + "BitOr": "|", + "BitXor": "^", + "BitAnd": "&", + "FloorDiv": "//", + "Pow": "**", + } + + binop_precedence = { + "+": _Precedence.ARITH, + "-": _Precedence.ARITH, + "*": _Precedence.TERM, + "@": _Precedence.TERM, + "/": _Precedence.TERM, + "%": _Precedence.TERM, + "<<": _Precedence.SHIFT, + ">>": _Precedence.SHIFT, + "|": _Precedence.BOR, + "^": _Precedence.BXOR, + "&": _Precedence.BAND, + "//": _Precedence.TERM, + "**": _Precedence.POWER, + } + + binop_rassoc = frozenset(("**",)) + def visit_BinOp(self, node): + operator = self.binop[node.op.__class__.__name__] + operator_precedence = self.binop_precedence[operator] + with self.require_parens(operator_precedence, node): + if operator in self.binop_rassoc: + left_precedence = operator_precedence.next() + right_precedence = operator_precedence + else: + left_precedence = operator_precedence + right_precedence = operator_precedence.next() + + self.set_precedence(left_precedence, node.left) + self.traverse(node.left) + self.write(f" {operator} ") + self.set_precedence(right_precedence, node.right) + self.traverse(node.right) + + cmpops = { + "Eq": "==", + "NotEq": "!=", + "Lt": "<", + "LtE": "<=", + "Gt": ">", + "GtE": ">=", + "Is": "is", + "IsNot": "is not", + "In": "in", + "NotIn": "not in", + } + + def visit_Compare(self, node): + with self.require_parens(_Precedence.CMP, node): + self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) + self.traverse(node.left) + for o, e in zip(node.ops, node.comparators): + self.write(" " + self.cmpops[o.__class__.__name__] + " ") + self.traverse(e) + + boolops = {"And": "and", "Or": "or"} + boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR} + + def visit_BoolOp(self, node): + operator = self.boolops[node.op.__class__.__name__] + operator_precedence = self.boolop_precedence[operator] + + def increasing_level_traverse(node): + nonlocal operator_precedence + operator_precedence = operator_precedence.next() + self.set_precedence(operator_precedence, node) + self.traverse(node) + + with self.require_parens(operator_precedence, node): + s = f" {operator} " + self.interleave(lambda: self.write(s), increasing_level_traverse, node.values) + + def visit_Attribute(self, node): + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + # Special case: 3.__abs__() is a syntax error, so if node.value + # is an integer literal then we need to either parenthesize + # it or add an extra space to get 3 .__abs__(). + if isinstance(node.value, Constant) and isinstance(node.value.value, int): + self.write(" ") + self.write(".") + self.write(node.attr) + + def visit_Call(self, node): + self.set_precedence(_Precedence.ATOM, node.func) + self.traverse(node.func) + with self.delimit("(", ")"): + comma = False + for e in node.args: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + def visit_Subscript(self, node): + def is_simple_tuple(slice_value): + # when unparsing a non-empty tuple, the parentheses can be safely + # omitted if there aren't any elements that explicitly requires + # parentheses (such as starred expressions). + return ( + isinstance(slice_value, Tuple) + and slice_value.elts + and not any(isinstance(elt, Starred) for elt in slice_value.elts) + ) + + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + with self.delimit("[", "]"): + if is_simple_tuple(node.slice): + self.items_view(self.traverse, node.slice.elts) + else: + self.traverse(node.slice) + + def visit_Starred(self, node): + self.write("*") + self.set_precedence(_Precedence.EXPR, node.value) + self.traverse(node.value) + + def visit_Ellipsis(self, node): + self.write("...") + + def visit_Slice(self, node): + if node.lower: + self.traverse(node.lower) + self.write(":") + if node.upper: + self.traverse(node.upper) + if node.step: + self.write(":") + self.traverse(node.step) + + def visit_Match(self, node): + self.fill("match ") + self.traverse(node.subject) + with self.block(): + for case in node.cases: + self.traverse(case) + + def visit_arg(self, node): + self.write(node.arg) + if node.annotation: + self.write(": ") + self.traverse(node.annotation) + + def visit_arguments(self, node): + first = True + # normal arguments + all_args = node.posonlyargs + node.args + defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults + for index, elements in enumerate(zip(all_args, defaults), 1): + a, d = elements + if first: + first = False + else: + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + if index == len(node.posonlyargs): + self.write(", /") + + # varargs, or bare '*' if no varargs but keyword-only arguments present + if node.vararg or node.kwonlyargs: + if first: + first = False + else: + self.write(", ") + self.write("*") + if node.vararg: + self.write(node.vararg.arg) + if node.vararg.annotation: + self.write(": ") + self.traverse(node.vararg.annotation) + + # keyword-only arguments + if node.kwonlyargs: + for a, d in zip(node.kwonlyargs, node.kw_defaults): + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + + # kwargs + if node.kwarg: + if first: + first = False + else: + self.write(", ") + self.write("**" + node.kwarg.arg) + if node.kwarg.annotation: + self.write(": ") + self.traverse(node.kwarg.annotation) + + def visit_keyword(self, node): + if node.arg is None: + self.write("**") + else: + self.write(node.arg) + self.write("=") + self.traverse(node.value) + + def visit_Lambda(self, node): + with self.require_parens(_Precedence.TEST, node): + self.write("lambda ") + self.traverse(node.args) + self.write(": ") + self.set_precedence(_Precedence.TEST, node.body) + self.traverse(node.body) + + def visit_alias(self, node): + self.write(node.name) + if node.asname: + self.write(" as " + node.asname) + + def visit_withitem(self, node): + self.traverse(node.context_expr) + if node.optional_vars: + self.write(" as ") + self.traverse(node.optional_vars) + + def visit_match_case(self, node): + self.fill("case ") + self.traverse(node.pattern) + if node.guard: + self.write(" if ") + self.traverse(node.guard) + with self.block(): + self.traverse(node.body) + + def visit_MatchValue(self, node): + self.traverse(node.value) + + def visit_MatchSingleton(self, node): + self._write_constant(node.value) + + def visit_MatchSequence(self, node): + with self.delimit("[", "]"): + self.interleave( + lambda: self.write(", "), self.traverse, node.patterns + ) + + def visit_MatchStar(self, node): + name = node.name + if name is None: + name = "_" + self.write(f"*{name}") + + def visit_MatchMapping(self, node): + def write_key_pattern_pair(pair): + k, p = pair + self.traverse(k) + self.write(": ") + self.traverse(p) + + with self.delimit("{", "}"): + keys = node.keys + self.interleave( + lambda: self.write(", "), + write_key_pattern_pair, + zip(keys, node.patterns, strict=True), + ) + rest = node.rest + if rest is not None: + if keys: + self.write(", ") + self.write(f"**{rest}") + + def visit_MatchClass(self, node): + self.set_precedence(_Precedence.ATOM, node.cls) + self.traverse(node.cls) + with self.delimit("(", ")"): + patterns = node.patterns + self.interleave( + lambda: self.write(", "), self.traverse, patterns + ) + attrs = node.kwd_attrs + if attrs: + def write_attr_pattern(pair): + attr, pattern = pair + self.write(f"{attr}=") + self.traverse(pattern) + + if patterns: + self.write(", ") + self.interleave( + lambda: self.write(", "), + write_attr_pattern, + zip(attrs, node.kwd_patterns, strict=True), + ) + + def visit_MatchAs(self, node): + name = node.name + pattern = node.pattern + if name is None: + self.write("_") + elif pattern is None: + self.write(node.name) + else: + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.BOR, node.pattern) + self.traverse(node.pattern) + self.write(f" as {node.name}") + + def visit_MatchOr(self, node): + with self.require_parens(_Precedence.BOR, node): + self.set_precedence(_Precedence.BOR.next(), *node.patterns) + self.interleave(lambda: self.write(" | "), self.traverse, node.patterns) + +def unparse(ast_obj): + unparser = _Unparser() + return unparser.visit(ast_obj) + + +def main(): + import argparse + + parser = argparse.ArgumentParser(prog='python -m ast') + parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?', + default='-', + help='the file to parse; defaults to stdin') + parser.add_argument('-m', '--mode', default='exec', + choices=('exec', 'single', 'eval', 'func_type'), + help='specify what kind of code must be parsed') + parser.add_argument('--no-type-comments', default=True, action='store_false', + help="don't add information about type comments") + parser.add_argument('-a', '--include-attributes', action='store_true', + help='include attributes such as line numbers and ' + 'column offsets') + parser.add_argument('-i', '--indent', type=int, default=3, + help='indentation of nodes (number of spaces)') + args = parser.parse_args() + + with args.infile as infile: + source = infile.read() + tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments) + print(dump(tree, include_attributes=args.include_attributes, indent=args.indent)) + +if __name__ == '__main__': + main() diff --git a/evalkit_cambrian/lib/python3.10/binhex.py b/evalkit_cambrian/lib/python3.10/binhex.py new file mode 100644 index 0000000000000000000000000000000000000000..ace5217d2713921d2b03c1a956a0f23ed0bdbccb --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/binhex.py @@ -0,0 +1,502 @@ +"""Macintosh binhex compression/decompression. + +easy interface: +binhex(inputfilename, outputfilename) +hexbin(inputfilename, outputfilename) +""" + +# +# Jack Jansen, CWI, August 1995. +# +# The module is supposed to be as compatible as possible. Especially the +# easy interface should work "as expected" on any platform. +# XXXX Note: currently, textfiles appear in mac-form on all platforms. +# We seem to lack a simple character-translate in python. +# (we should probably use ISO-Latin-1 on all but the mac platform). +# XXXX The simple routines are too simple: they expect to hold the complete +# files in-core. Should be fixed. +# XXXX It would be nice to handle AppleDouble format on unix +# (for servers serving macs). +# XXXX I don't understand what happens when you get 0x90 times the same byte on +# input. The resulting code (xx 90 90) would appear to be interpreted as an +# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety... +# +import binascii +import contextlib +import io +import os +import struct +import warnings + +warnings.warn('the binhex module is deprecated', DeprecationWarning, + stacklevel=2) + + +__all__ = ["binhex","hexbin","Error"] + +class Error(Exception): + pass + +# States (what have we written) +_DID_HEADER = 0 +_DID_DATA = 1 + +# Various constants +REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder +LINELEN = 64 +RUNCHAR = b"\x90" + +# +# This code is no longer byte-order dependent + + +class FInfo: + def __init__(self): + self.Type = '????' + self.Creator = '????' + self.Flags = 0 + +def getfileinfo(name): + finfo = FInfo() + with io.open(name, 'rb') as fp: + # Quick check for textfile + data = fp.read(512) + if 0 not in data: + finfo.Type = 'TEXT' + fp.seek(0, 2) + dsize = fp.tell() + dir, file = os.path.split(name) + file = file.replace(':', '-', 1) + return file, finfo, dsize, 0 + +class openrsrc: + def __init__(self, *args): + pass + + def read(self, *args): + return b'' + + def write(self, *args): + pass + + def close(self): + pass + + +# DeprecationWarning is already emitted on "import binhex". There is no need +# to repeat the warning at each call to deprecated binascii functions. +@contextlib.contextmanager +def _ignore_deprecation_warning(): + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', '', DeprecationWarning) + yield + + +class _Hqxcoderengine: + """Write data to the coder in 3-byte chunks""" + + def __init__(self, ofp): + self.ofp = ofp + self.data = b'' + self.hqxdata = b'' + self.linelen = LINELEN - 1 + + def write(self, data): + self.data = self.data + data + datalen = len(self.data) + todo = (datalen // 3) * 3 + data = self.data[:todo] + self.data = self.data[todo:] + if not data: + return + with _ignore_deprecation_warning(): + self.hqxdata = self.hqxdata + binascii.b2a_hqx(data) + self._flush(0) + + def _flush(self, force): + first = 0 + while first <= len(self.hqxdata) - self.linelen: + last = first + self.linelen + self.ofp.write(self.hqxdata[first:last] + b'\r') + self.linelen = LINELEN + first = last + self.hqxdata = self.hqxdata[first:] + if force: + self.ofp.write(self.hqxdata + b':\r') + + def close(self): + if self.data: + with _ignore_deprecation_warning(): + self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data) + self._flush(1) + self.ofp.close() + del self.ofp + +class _Rlecoderengine: + """Write data to the RLE-coder in suitably large chunks""" + + def __init__(self, ofp): + self.ofp = ofp + self.data = b'' + + def write(self, data): + self.data = self.data + data + if len(self.data) < REASONABLY_LARGE: + return + with _ignore_deprecation_warning(): + rledata = binascii.rlecode_hqx(self.data) + self.ofp.write(rledata) + self.data = b'' + + def close(self): + if self.data: + with _ignore_deprecation_warning(): + rledata = binascii.rlecode_hqx(self.data) + self.ofp.write(rledata) + self.ofp.close() + del self.ofp + +class BinHex: + def __init__(self, name_finfo_dlen_rlen, ofp): + name, finfo, dlen, rlen = name_finfo_dlen_rlen + close_on_error = False + if isinstance(ofp, str): + ofname = ofp + ofp = io.open(ofname, 'wb') + close_on_error = True + try: + ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:') + hqxer = _Hqxcoderengine(ofp) + self.ofp = _Rlecoderengine(hqxer) + self.crc = 0 + if finfo is None: + finfo = FInfo() + self.dlen = dlen + self.rlen = rlen + self._writeinfo(name, finfo) + self.state = _DID_HEADER + except: + if close_on_error: + ofp.close() + raise + + def _writeinfo(self, name, finfo): + nl = len(name) + if nl > 63: + raise Error('Filename too long') + d = bytes([nl]) + name.encode("latin-1") + b'\0' + tp, cr = finfo.Type, finfo.Creator + if isinstance(tp, str): + tp = tp.encode("latin-1") + if isinstance(cr, str): + cr = cr.encode("latin-1") + d2 = tp + cr + + # Force all structs to be packed with big-endian + d3 = struct.pack('>h', finfo.Flags) + d4 = struct.pack('>ii', self.dlen, self.rlen) + info = d + d2 + d3 + d4 + self._write(info) + self._writecrc() + + def _write(self, data): + self.crc = binascii.crc_hqx(data, self.crc) + self.ofp.write(data) + + def _writecrc(self): + # XXXX Should this be here?? + # self.crc = binascii.crc_hqx('\0\0', self.crc) + if self.crc < 0: + fmt = '>h' + else: + fmt = '>H' + self.ofp.write(struct.pack(fmt, self.crc)) + self.crc = 0 + + def write(self, data): + if self.state != _DID_HEADER: + raise Error('Writing data at the wrong time') + self.dlen = self.dlen - len(data) + self._write(data) + + def close_data(self): + if self.dlen != 0: + raise Error('Incorrect data size, diff=%r' % (self.rlen,)) + self._writecrc() + self.state = _DID_DATA + + def write_rsrc(self, data): + if self.state < _DID_DATA: + self.close_data() + if self.state != _DID_DATA: + raise Error('Writing resource data at the wrong time') + self.rlen = self.rlen - len(data) + self._write(data) + + def close(self): + if self.state is None: + return + try: + if self.state < _DID_DATA: + self.close_data() + if self.state != _DID_DATA: + raise Error('Close at the wrong time') + if self.rlen != 0: + raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,)) + self._writecrc() + finally: + self.state = None + ofp = self.ofp + del self.ofp + ofp.close() + +def binhex(inp, out): + """binhex(infilename, outfilename): create binhex-encoded copy of a file""" + finfo = getfileinfo(inp) + ofp = BinHex(finfo, out) + + with io.open(inp, 'rb') as ifp: + # XXXX Do textfile translation on non-mac systems + while True: + d = ifp.read(128000) + if not d: break + ofp.write(d) + ofp.close_data() + + ifp = openrsrc(inp, 'rb') + while True: + d = ifp.read(128000) + if not d: break + ofp.write_rsrc(d) + ofp.close() + ifp.close() + +class _Hqxdecoderengine: + """Read data via the decoder in 4-byte chunks""" + + def __init__(self, ifp): + self.ifp = ifp + self.eof = 0 + + def read(self, totalwtd): + """Read at least wtd bytes (or until EOF)""" + decdata = b'' + wtd = totalwtd + # + # The loop here is convoluted, since we don't really now how + # much to decode: there may be newlines in the incoming data. + while wtd > 0: + if self.eof: return decdata + wtd = ((wtd + 2) // 3) * 4 + data = self.ifp.read(wtd) + # + # Next problem: there may not be a complete number of + # bytes in what we pass to a2b. Solve by yet another + # loop. + # + while True: + try: + with _ignore_deprecation_warning(): + decdatacur, self.eof = binascii.a2b_hqx(data) + break + except binascii.Incomplete: + pass + newdata = self.ifp.read(1) + if not newdata: + raise Error('Premature EOF on binhex file') + data = data + newdata + decdata = decdata + decdatacur + wtd = totalwtd - len(decdata) + if not decdata and not self.eof: + raise Error('Premature EOF on binhex file') + return decdata + + def close(self): + self.ifp.close() + +class _Rledecoderengine: + """Read data via the RLE-coder""" + + def __init__(self, ifp): + self.ifp = ifp + self.pre_buffer = b'' + self.post_buffer = b'' + self.eof = 0 + + def read(self, wtd): + if wtd > len(self.post_buffer): + self._fill(wtd - len(self.post_buffer)) + rv = self.post_buffer[:wtd] + self.post_buffer = self.post_buffer[wtd:] + return rv + + def _fill(self, wtd): + self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4) + if self.ifp.eof: + with _ignore_deprecation_warning(): + self.post_buffer = self.post_buffer + \ + binascii.rledecode_hqx(self.pre_buffer) + self.pre_buffer = b'' + return + + # + # Obfuscated code ahead. We have to take care that we don't + # end up with an orphaned RUNCHAR later on. So, we keep a couple + # of bytes in the buffer, depending on what the end of + # the buffer looks like: + # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0) + # '?\220' - Keep 2 bytes: repeated something-else + # '\220\0' - Escaped \220: Keep 2 bytes. + # '?\220?' - Complete repeat sequence: decode all + # otherwise: keep 1 byte. + # + mark = len(self.pre_buffer) + if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR: + mark = mark - 3 + elif self.pre_buffer[-1:] == RUNCHAR: + mark = mark - 2 + elif self.pre_buffer[-2:] == RUNCHAR + b'\0': + mark = mark - 2 + elif self.pre_buffer[-2:-1] == RUNCHAR: + pass # Decode all + else: + mark = mark - 1 + + with _ignore_deprecation_warning(): + self.post_buffer = self.post_buffer + \ + binascii.rledecode_hqx(self.pre_buffer[:mark]) + self.pre_buffer = self.pre_buffer[mark:] + + def close(self): + self.ifp.close() + +class HexBin: + def __init__(self, ifp): + if isinstance(ifp, str): + ifp = io.open(ifp, 'rb') + # + # Find initial colon. + # + while True: + ch = ifp.read(1) + if not ch: + raise Error("No binhex data found") + # Cater for \r\n terminated lines (which show up as \n\r, hence + # all lines start with \r) + if ch == b'\r': + continue + if ch == b':': + break + + hqxifp = _Hqxdecoderengine(ifp) + self.ifp = _Rledecoderengine(hqxifp) + self.crc = 0 + self._readheader() + + def _read(self, len): + data = self.ifp.read(len) + self.crc = binascii.crc_hqx(data, self.crc) + return data + + def _checkcrc(self): + filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff + #self.crc = binascii.crc_hqx('\0\0', self.crc) + # XXXX Is this needed?? + self.crc = self.crc & 0xffff + if filecrc != self.crc: + raise Error('CRC error, computed %x, read %x' + % (self.crc, filecrc)) + self.crc = 0 + + def _readheader(self): + len = self._read(1) + fname = self._read(ord(len)) + rest = self._read(1 + 4 + 4 + 2 + 4 + 4) + self._checkcrc() + + type = rest[1:5] + creator = rest[5:9] + flags = struct.unpack('>h', rest[9:11])[0] + self.dlen = struct.unpack('>l', rest[11:15])[0] + self.rlen = struct.unpack('>l', rest[15:19])[0] + + self.FName = fname + self.FInfo = FInfo() + self.FInfo.Creator = creator + self.FInfo.Type = type + self.FInfo.Flags = flags + + self.state = _DID_HEADER + + def read(self, *n): + if self.state != _DID_HEADER: + raise Error('Read data at wrong time') + if n: + n = n[0] + n = min(n, self.dlen) + else: + n = self.dlen + rv = b'' + while len(rv) < n: + rv = rv + self._read(n-len(rv)) + self.dlen = self.dlen - n + return rv + + def close_data(self): + if self.state != _DID_HEADER: + raise Error('close_data at wrong time') + if self.dlen: + dummy = self._read(self.dlen) + self._checkcrc() + self.state = _DID_DATA + + def read_rsrc(self, *n): + if self.state == _DID_HEADER: + self.close_data() + if self.state != _DID_DATA: + raise Error('Read resource data at wrong time') + if n: + n = n[0] + n = min(n, self.rlen) + else: + n = self.rlen + self.rlen = self.rlen - n + return self._read(n) + + def close(self): + if self.state is None: + return + try: + if self.rlen: + dummy = self.read_rsrc(self.rlen) + self._checkcrc() + finally: + self.state = None + self.ifp.close() + +def hexbin(inp, out): + """hexbin(infilename, outfilename) - Decode binhexed file""" + ifp = HexBin(inp) + finfo = ifp.FInfo + if not out: + out = ifp.FName + + with io.open(out, 'wb') as ofp: + # XXXX Do translation on non-mac systems + while True: + d = ifp.read(128000) + if not d: break + ofp.write(d) + ifp.close_data() + + d = ifp.read_rsrc(128000) + if d: + ofp = openrsrc(out, 'wb') + ofp.write(d) + while True: + d = ifp.read_rsrc(128000) + if not d: break + ofp.write(d) + ofp.close() + + ifp.close() diff --git a/evalkit_cambrian/lib/python3.10/bz2.py b/evalkit_cambrian/lib/python3.10/bz2.py new file mode 100644 index 0000000000000000000000000000000000000000..fabe4f73c8d8085cffc2792dee4c1629a87b8b35 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/bz2.py @@ -0,0 +1,344 @@ +"""Interface to the libbzip2 compression library. + +This module provides a file interface, classes for incremental +(de)compression, and functions for one-shot (de)compression. +""" + +__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", + "open", "compress", "decompress"] + +__author__ = "Nadeem Vawda " + +from builtins import open as _builtin_open +import io +import os +import _compression + +from _bz2 import BZ2Compressor, BZ2Decompressor + + +_MODE_CLOSED = 0 +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class BZ2File(_compression.BaseStream): + + """A file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + """ + + def __init__(self, filename, mode="r", *, compresslevel=9): + """Open a bzip2-compressed file. + + If filename is a str, bytes, or PathLike object, it gives the + name of the file to be opened. Otherwise, it should be a file + object, which will be used to read or write the compressed data. + + mode can be 'r' for reading (default), 'w' for (over)writing, + 'x' for creating exclusively, or 'a' for appending. These can + equivalently be given as 'rb', 'wb', 'xb', and 'ab'. + + If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + + If mode is 'r', the input file may be the concatenation of + multiple compressed streams. + """ + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + if not (1 <= compresslevel <= 9): + raise ValueError("compresslevel must be between 1 and 9") + + if mode in ("", "r", "rb"): + mode = "rb" + mode_code = _MODE_READ + elif mode in ("w", "wb"): + mode = "wb" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + elif mode in ("x", "xb"): + mode = "xb" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + elif mode in ("a", "ab"): + mode = "ab" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, (str, bytes, os.PathLike)): + self._fp = _builtin_open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _compression.DecompressReader(self._fp, + BZ2Decompressor, trailing_error=OSError) + self._buffer = io.BufferedReader(raw) + else: + self._pos = 0 + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self._mode == _MODE_CLOSED: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = None + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, n=0): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() + # always returns at least one byte (except at EOF), independent + # of the value of n + return self._buffer.peek(n) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readinto(self, b): + """Read bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + self._check_can_read() + return self._buffer.readinto(b) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + if not isinstance(size, int): + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + self._check_can_read() + return self._buffer.readline(size) + + def readlines(self, size=-1): + """Read a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + """ + if not isinstance(size, int): + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + self._check_can_read() + return self._buffer.readlines(size) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def writelines(self, seq): + """Write a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + """ + return _compression.BaseStream.writelines(self, seq) + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", compresslevel=9, + encoding=None, errors=None, newline=None): + """Open a bzip2-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str, bytes, or + PathLike object), or an existing file object to read from or write + to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or + "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. + The default mode is "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the BZ2File + constructor: BZ2File(filename, mode, compresslevel). In this case, + the encoding, errors and newline arguments must not be provided. + + For text mode, a BZ2File object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + bz_mode = mode.replace("t", "") + binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, compresslevel=9): + """Compress a block of data. + + compresslevel, if given, must be a number between 1 and 9. + + For incremental compression, use a BZ2Compressor object instead. + """ + comp = BZ2Compressor(compresslevel) + return comp.compress(data) + comp.flush() + + +def decompress(data): + """Decompress a block of data. + + For incremental decompression, use a BZ2Decompressor object instead. + """ + results = [] + while data: + decomp = BZ2Decompressor() + try: + res = decomp.decompress(data) + except OSError: + if results: + break # Leftover data is not a valid bzip2 stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise ValueError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + return b"".join(results) diff --git a/evalkit_cambrian/lib/python3.10/cProfile.py b/evalkit_cambrian/lib/python3.10/cProfile.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae1fb8859e51fefb5193127a8d8a24aa77fc92b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/cProfile.py @@ -0,0 +1,191 @@ +#! /usr/bin/env python3 + +"""Python interface for the 'lsprof' profiler. + Compatible with the 'profile' module. +""" + +__all__ = ["run", "runctx", "Profile"] + +import _lsprof +import io +import profile as _pyprofile + +# ____________________________________________________________ +# Simple interface + +def run(statement, filename=None, sort=-1): + return _pyprofile._Utils(Profile).run(statement, filename, sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + return _pyprofile._Utils(Profile).runctx(statement, globals, locals, + filename, sort) + +run.__doc__ = _pyprofile.run.__doc__ +runctx.__doc__ = _pyprofile.runctx.__doc__ + +# ____________________________________________________________ + +class Profile(_lsprof.Profiler): + """Profile(timer=None, timeunit=None, subcalls=True, builtins=True) + + Builds a profiler object using the specified timer function. + The default timer is a fast built-in one based on real time. + For custom timer functions returning integers, timeunit can + be a float specifying a scale (i.e. how long each integer unit + is, in seconds). + """ + + # Most of the functionality is in the base class. + # This subclass only adds convenient and backward-compatible methods. + + def print_stats(self, sort=-1): + import pstats + pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats() + + def dump_stats(self, file): + import marshal + with open(file, 'wb') as f: + self.create_stats() + marshal.dump(self.stats, f) + + def create_stats(self): + self.disable() + self.snapshot_stats() + + def snapshot_stats(self): + entries = self.getstats() + self.stats = {} + callersdicts = {} + # call information + for entry in entries: + func = label(entry.code) + nc = entry.callcount # ncalls column of pstats (before '/') + cc = nc - entry.reccallcount # ncalls column of pstats (after '/') + tt = entry.inlinetime # tottime column of pstats + ct = entry.totaltime # cumtime column of pstats + callers = {} + callersdicts[id(entry.code)] = callers + self.stats[func] = cc, nc, tt, ct, callers + # subcall information + for entry in entries: + if entry.calls: + func = label(entry.code) + for subentry in entry.calls: + try: + callers = callersdicts[id(subentry.code)] + except KeyError: + continue + nc = subentry.callcount + cc = nc - subentry.reccallcount + tt = subentry.inlinetime + ct = subentry.totaltime + if func in callers: + prev = callers[func] + nc += prev[0] + cc += prev[1] + tt += prev[2] + ct += prev[3] + callers[func] = nc, cc, tt, ct + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.enable() + try: + exec(cmd, globals, locals) + finally: + self.disable() + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, /, *args, **kw): + self.enable() + try: + return func(*args, **kw) + finally: + self.disable() + + def __enter__(self): + self.enable() + return self + + def __exit__(self, *exc_info): + self.disable() + +# ____________________________________________________________ + +def label(code): + if isinstance(code, str): + return ('~', 0, code) # built-in functions ('~' sorts at the end) + else: + return (code.co_filename, code.co_firstlineno, code.co_name) + +# ____________________________________________________________ + +def main(): + import os + import sys + import runpy + import pstats + from optparse import OptionParser + usage = "cProfile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1, + choices=sorted(pstats.Stats.sort_arg_dict_default)) + parser.add_option('-m', dest="module", action="store_true", + help="Profile a library module", default=False) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + # The script that we're profiling may chdir, so capture the absolute path + # to the output file at startup. + if options.outfile is not None: + options.outfile = os.path.abspath(options.outfile) + + if len(args) > 0: + if options.module: + code = "run_module(modname, run_name='__main__')" + globs = { + 'run_module': runpy.run_module, + 'modname': args[0] + } + else: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with io.open_code(progname) as fp: + code = compile(fp.read(), progname, 'exec') + globs = { + '__file__': progname, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + try: + runctx(code, globs, None, options.outfile, options.sort) + except BrokenPipeError as exc: + # Prevent "Exception ignored" during interpreter shutdown. + sys.stdout = None + sys.exit(exc.errno) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() diff --git a/evalkit_cambrian/lib/python3.10/cgi.py b/evalkit_cambrian/lib/python3.10/cgi.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb8cf28bd66457ef05a8cc19bb53b8f2afbb780 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/cgi.py @@ -0,0 +1,1004 @@ +#! /usr/local/bin/python + +# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is +# intentionally NOT "/usr/bin/env python". On many systems +# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI +# scripts, and /usr/local/bin is the default directory where Python is +# installed, so /usr/bin/env would be unable to find python. Granted, +# binary installations by Linux vendors often install Python in +# /usr/bin. So let those vendors patch cgi.py to match their choice +# of installation. + +"""Support module for CGI (Common Gateway Interface) scripts. + +This module defines a number of utilities for use by CGI scripts +written in Python. +""" + +# History +# ------- +# +# Michael McLay started this module. Steve Majewski changed the +# interface to SvFormContentDict and FormContentDict. The multipart +# parsing was inspired by code submitted by Andreas Paepcke. Guido van +# Rossum rewrote, reformatted and documented the module and is currently +# responsible for its maintenance. +# + +__version__ = "2.6" + + +# Imports +# ======= + +from io import StringIO, BytesIO, TextIOWrapper +from collections.abc import Mapping +import sys +import os +import urllib.parse +from email.parser import FeedParser +from email.message import Message +import html +import locale +import tempfile +import warnings + +__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart", + "parse_header", "test", "print_exception", "print_environ", + "print_form", "print_directory", "print_arguments", + "print_environ_usage"] + +# Logging support +# =============== + +logfile = "" # Filename to log to, if not empty +logfp = None # File object to log to, if not None + +def initlog(*allargs): + """Write a log message, if there is a log file. + + Even though this function is called initlog(), you should always + use log(); log is a variable that is set either to initlog + (initially), to dolog (once the log file has been opened), or to + nolog (when logging is disabled). + + The first argument is a format string; the remaining arguments (if + any) are arguments to the % operator, so e.g. + log("%s: %s", "a", "b") + will write "a: b" to the log file, followed by a newline. + + If the global logfp is not None, it should be a file object to + which log data is written. + + If the global logfp is None, the global logfile may be a string + giving a filename to open, in append mode. This file should be + world writable!!! If the file can't be opened, logging is + silently disabled (since there is no safe place where we could + send an error message). + + """ + global log, logfile, logfp + warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead", + DeprecationWarning, stacklevel=2) + if logfile and not logfp: + try: + logfp = open(logfile, "a", encoding="locale") + except OSError: + pass + if not logfp: + log = nolog + else: + log = dolog + log(*allargs) + +def dolog(fmt, *args): + """Write a log message to the log file. See initlog() for docs.""" + logfp.write(fmt%args + "\n") + +def nolog(*allargs): + """Dummy function, assigned to log when logging is disabled.""" + pass + +def closelog(): + """Close the log file.""" + global log, logfile, logfp + logfile = '' + if logfp: + logfp.close() + logfp = None + log = initlog + +log = initlog # The current logging function + + +# Parsing functions +# ================= + +# Maximum input we will accept when REQUEST_METHOD is POST +# 0 ==> unlimited input +maxlen = 0 + +def parse(fp=None, environ=os.environ, keep_blank_values=0, + strict_parsing=0, separator='&'): + """Parse a query in the environment or from a file (default stdin) + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + """ + if fp is None: + fp = sys.stdin + + # field keys and values (except for files) are returned as strings + # an encoding is required to decode the bytes read from self.fp + if hasattr(fp,'encoding'): + encoding = fp.encoding + else: + encoding = 'latin-1' + + # fp.read() must return bytes + if isinstance(fp, TextIOWrapper): + fp = fp.buffer + + if not 'REQUEST_METHOD' in environ: + environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone + if environ['REQUEST_METHOD'] == 'POST': + ctype, pdict = parse_header(environ['CONTENT_TYPE']) + if ctype == 'multipart/form-data': + return parse_multipart(fp, pdict, separator=separator) + elif ctype == 'application/x-www-form-urlencoded': + clength = int(environ['CONTENT_LENGTH']) + if maxlen and clength > maxlen: + raise ValueError('Maximum content length exceeded') + qs = fp.read(clength).decode(encoding) + else: + qs = '' # Unknown content-type + if 'QUERY_STRING' in environ: + if qs: qs = qs + '&' + qs = qs + environ['QUERY_STRING'] + elif sys.argv[1:]: + if qs: qs = qs + '&' + qs = qs + sys.argv[1] + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + elif 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + else: + if sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing, + encoding=encoding, separator=separator) + + +def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'): + """Parse multipart input. + + Arguments: + fp : input file + pdict: dictionary containing other parameters of content-type header + encoding, errors: request encoding and error handler, passed to + FieldStorage + + Returns a dictionary just like parse_qs(): keys are the field names, each + value is a list of values for that field. For non-file fields, the value + is a list of strings. + """ + # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always + # represented as 7bit US-ASCII. + boundary = pdict['boundary'].decode('ascii') + ctype = "multipart/form-data; boundary={}".format(boundary) + headers = Message() + headers.set_type(ctype) + try: + headers['Content-Length'] = pdict['CONTENT-LENGTH'] + except KeyError: + pass + fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors, + environ={'REQUEST_METHOD': 'POST'}, separator=separator) + return {k: fs.getlist(k) for k in fs} + +def _parseparam(s): + while s[:1] == ';': + s = s[1:] + end = s.find(';') + while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: + end = s.find(';', end + 1) + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] + +def parse_header(line): + """Parse a Content-type like header. + + Return the main content-type and a dictionary of options. + + """ + parts = _parseparam(';' + line) + key = parts.__next__() + pdict = {} + for p in parts: + i = p.find('=') + if i >= 0: + name = p[:i].strip().lower() + value = p[i+1:].strip() + if len(value) >= 2 and value[0] == value[-1] == '"': + value = value[1:-1] + value = value.replace('\\\\', '\\').replace('\\"', '"') + pdict[name] = value + return key, pdict + + +# Classes for field storage +# ========================= + +class MiniFieldStorage: + + """Like FieldStorage, for use when no file uploads are possible.""" + + # Dummy attributes + filename = None + list = None + type = None + file = None + type_options = {} + disposition = None + disposition_options = {} + headers = {} + + def __init__(self, name, value): + """Constructor from field name and value.""" + self.name = name + self.value = value + # self.file = StringIO(value) + + def __repr__(self): + """Return printable representation.""" + return "MiniFieldStorage(%r, %r)" % (self.name, self.value) + + +class FieldStorage: + + """Store a sequence of fields, reading multipart/form-data. + + This class provides naming, typing, files stored on disk, and + more. At the top level, it is accessible like a dictionary, whose + keys are the field names. (Note: None can occur as a field name.) + The items are either a Python list (if there's multiple values) or + another FieldStorage or MiniFieldStorage object. If it's a single + object, it has the following attributes: + + name: the field name, if specified; otherwise None + + filename: the filename, if specified; otherwise None; this is the + client side filename, *not* the file name on which it is + stored (that's a temporary file you don't deal with) + + value: the value as a *string*; for file uploads, this + transparently reads the file every time you request the value + and returns *bytes* + + file: the file(-like) object from which you can read the data *as + bytes* ; None if the data is stored a simple string + + type: the content-type, or None if not specified + + type_options: dictionary of options specified on the content-type + line + + disposition: content-disposition, or None if not specified + + disposition_options: dictionary of corresponding options + + headers: a dictionary(-like) object (sometimes email.message.Message or a + subclass thereof) containing *all* headers + + The class is subclassable, mostly for the purpose of overriding + the make_file() method, which is called internally to come up with + a file open for reading and writing. This makes it possible to + override the default choice of storing all files in a temporary + directory and unlinking them as soon as they have been opened. + + """ + def __init__(self, fp=None, headers=None, outerboundary=b'', + environ=os.environ, keep_blank_values=0, strict_parsing=0, + limit=None, encoding='utf-8', errors='replace', + max_num_fields=None, separator='&'): + """Constructor. Read multipart/* until last part. + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + (not used when the request method is GET) + Can be : + 1. a TextIOWrapper object + 2. an object whose read() and readline() methods return bytes + + headers : header dictionary-like object; default: + taken from environ as per CGI spec + + outerboundary : terminating multipart boundary + (for internal use only) + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + limit : used internally to read parts of multipart/form-data forms, + to exit from the reading loop when reached. It is the difference + between the form content-length and the number of bytes already + read + + encoding, errors : the encoding and error handler used to decode the + binary stream to strings. Must be the same as the charset defined + for the page sending the form (content-type : meta http-equiv or + header) + + max_num_fields: int. If set, then __init__ throws a ValueError + if there are more than n fields read by parse_qsl(). + + """ + method = 'GET' + self.keep_blank_values = keep_blank_values + self.strict_parsing = strict_parsing + self.max_num_fields = max_num_fields + self.separator = separator + if 'REQUEST_METHOD' in environ: + method = environ['REQUEST_METHOD'].upper() + self.qs_on_post = None + if method == 'GET' or method == 'HEAD': + if 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + elif sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape') + fp = BytesIO(qs) + if headers is None: + headers = {'content-type': + "application/x-www-form-urlencoded"} + if headers is None: + headers = {} + if method == 'POST': + # Set default content-type for POST to what's traditional + headers['content-type'] = "application/x-www-form-urlencoded" + if 'CONTENT_TYPE' in environ: + headers['content-type'] = environ['CONTENT_TYPE'] + if 'QUERY_STRING' in environ: + self.qs_on_post = environ['QUERY_STRING'] + if 'CONTENT_LENGTH' in environ: + headers['content-length'] = environ['CONTENT_LENGTH'] + else: + if not (isinstance(headers, (Mapping, Message))): + raise TypeError("headers must be mapping or an instance of " + "email.message.Message") + self.headers = headers + if fp is None: + self.fp = sys.stdin.buffer + # self.fp.read() must return bytes + elif isinstance(fp, TextIOWrapper): + self.fp = fp.buffer + else: + if not (hasattr(fp, 'read') and hasattr(fp, 'readline')): + raise TypeError("fp must be file pointer") + self.fp = fp + + self.encoding = encoding + self.errors = errors + + if not isinstance(outerboundary, bytes): + raise TypeError('outerboundary must be bytes, not %s' + % type(outerboundary).__name__) + self.outerboundary = outerboundary + + self.bytes_read = 0 + self.limit = limit + + # Process content-disposition header + cdisp, pdict = "", {} + if 'content-disposition' in self.headers: + cdisp, pdict = parse_header(self.headers['content-disposition']) + self.disposition = cdisp + self.disposition_options = pdict + self.name = None + if 'name' in pdict: + self.name = pdict['name'] + self.filename = None + if 'filename' in pdict: + self.filename = pdict['filename'] + self._binary_file = self.filename is not None + + # Process content-type header + # + # Honor any existing content-type header. But if there is no + # content-type header, use some sensible defaults. Assume + # outerboundary is "" at the outer level, but something non-false + # inside a multi-part. The default for an inner part is text/plain, + # but for an outer part it should be urlencoded. This should catch + # bogus clients which erroneously forget to include a content-type + # header. + # + # See below for what we do if there does exist a content-type header, + # but it happens to be something we don't understand. + if 'content-type' in self.headers: + ctype, pdict = parse_header(self.headers['content-type']) + elif self.outerboundary or method != 'POST': + ctype, pdict = "text/plain", {} + else: + ctype, pdict = 'application/x-www-form-urlencoded', {} + self.type = ctype + self.type_options = pdict + if 'boundary' in pdict: + self.innerboundary = pdict['boundary'].encode(self.encoding, + self.errors) + else: + self.innerboundary = b"" + + clen = -1 + if 'content-length' in self.headers: + try: + clen = int(self.headers['content-length']) + except ValueError: + pass + if maxlen and clen > maxlen: + raise ValueError('Maximum content length exceeded') + self.length = clen + if self.limit is None and clen >= 0: + self.limit = clen + + self.list = self.file = None + self.done = 0 + if ctype == 'application/x-www-form-urlencoded': + self.read_urlencoded() + elif ctype[:10] == 'multipart/': + self.read_multi(environ, keep_blank_values, strict_parsing) + else: + self.read_single() + + def __del__(self): + try: + self.file.close() + except AttributeError: + pass + + def __enter__(self): + return self + + def __exit__(self, *args): + self.file.close() + + def __repr__(self): + """Return a printable representation.""" + return "FieldStorage(%r, %r, %r)" % ( + self.name, self.filename, self.value) + + def __iter__(self): + return iter(self.keys()) + + def __getattr__(self, name): + if name != 'value': + raise AttributeError(name) + if self.file: + self.file.seek(0) + value = self.file.read() + self.file.seek(0) + elif self.list is not None: + value = self.list + else: + value = None + return value + + def __getitem__(self, key): + """Dictionary style indexing.""" + if self.list is None: + raise TypeError("not indexable") + found = [] + for item in self.list: + if item.name == key: found.append(item) + if not found: + raise KeyError(key) + if len(found) == 1: + return found[0] + else: + return found + + def getvalue(self, key, default=None): + """Dictionary style get() method, including 'value' lookup.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return value.value + else: + return default + + def getfirst(self, key, default=None): + """ Return the first value received.""" + if key in self: + value = self[key] + if isinstance(value, list): + return value[0].value + else: + return value.value + else: + return default + + def getlist(self, key): + """ Return list of received values.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return [value.value] + else: + return [] + + def keys(self): + """Dictionary style keys() method.""" + if self.list is None: + raise TypeError("not indexable") + return list(set(item.name for item in self.list)) + + def __contains__(self, key): + """Dictionary style __contains__ method.""" + if self.list is None: + raise TypeError("not indexable") + return any(item.name == key for item in self.list) + + def __len__(self): + """Dictionary style len(x) support.""" + return len(self.keys()) + + def __bool__(self): + if self.list is None: + raise TypeError("Cannot be converted to bool.") + return bool(self.list) + + def read_urlencoded(self): + """Internal: read data in query string format.""" + qs = self.fp.read(self.length) + if not isinstance(qs, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(qs).__name__)) + qs = qs.decode(self.encoding, self.errors) + if self.qs_on_post: + qs += '&' + self.qs_on_post + query = urllib.parse.parse_qsl( + qs, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list = [MiniFieldStorage(key, value) for key, value in query] + self.skip_lines() + + FieldStorageClass = None + + def read_multi(self, environ, keep_blank_values, strict_parsing): + """Internal: read a part that is itself multipart.""" + ib = self.innerboundary + if not valid_boundary(ib): + raise ValueError('Invalid boundary in multipart form: %r' % (ib,)) + self.list = [] + if self.qs_on_post: + query = urllib.parse.parse_qsl( + self.qs_on_post, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list.extend(MiniFieldStorage(key, value) for key, value in query) + + klass = self.FieldStorageClass or self.__class__ + first_line = self.fp.readline() # bytes + if not isinstance(first_line, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(first_line).__name__)) + self.bytes_read += len(first_line) + + # Ensure that we consume the file until we've hit our inner boundary + while (first_line.strip() != (b"--" + self.innerboundary) and + first_line): + first_line = self.fp.readline() + self.bytes_read += len(first_line) + + # Propagate max_num_fields into the sub class appropriately + max_num_fields = self.max_num_fields + if max_num_fields is not None: + max_num_fields -= len(self.list) + + while True: + parser = FeedParser() + hdr_text = b"" + while True: + data = self.fp.readline() + hdr_text += data + if not data.strip(): + break + if not hdr_text: + break + # parser takes strings, not bytes + self.bytes_read += len(hdr_text) + parser.feed(hdr_text.decode(self.encoding, self.errors)) + headers = parser.close() + + # Some clients add Content-Length for part headers, ignore them + if 'content-length' in headers: + del headers['content-length'] + + limit = None if self.limit is None \ + else self.limit - self.bytes_read + part = klass(self.fp, headers, ib, environ, keep_blank_values, + strict_parsing, limit, + self.encoding, self.errors, max_num_fields, self.separator) + + if max_num_fields is not None: + max_num_fields -= 1 + if part.list: + max_num_fields -= len(part.list) + if max_num_fields < 0: + raise ValueError('Max number of fields exceeded') + + self.bytes_read += part.bytes_read + self.list.append(part) + if part.done or self.bytes_read >= self.length > 0: + break + self.skip_lines() + + def read_single(self): + """Internal: read an atomic part.""" + if self.length >= 0: + self.read_binary() + self.skip_lines() + else: + self.read_lines() + self.file.seek(0) + + bufsize = 8*1024 # I/O buffering size for copy to file + + def read_binary(self): + """Internal: read binary data.""" + self.file = self.make_file() + todo = self.length + if todo >= 0: + while todo > 0: + data = self.fp.read(min(todo, self.bufsize)) # bytes + if not isinstance(data, bytes): + raise ValueError("%s should return bytes, got %s" + % (self.fp, type(data).__name__)) + self.bytes_read += len(data) + if not data: + self.done = -1 + break + self.file.write(data) + todo = todo - len(data) + + def read_lines(self): + """Internal: read lines until EOF or outerboundary.""" + if self._binary_file: + self.file = self.__file = BytesIO() # store data as bytes for files + else: + self.file = self.__file = StringIO() # as strings for other fields + if self.outerboundary: + self.read_lines_to_outerboundary() + else: + self.read_lines_to_eof() + + def __write(self, line): + """line is always bytes, not string""" + if self.__file is not None: + if self.__file.tell() + len(line) > 1000: + self.file = self.make_file() + data = self.__file.getvalue() + self.file.write(data) + self.__file = None + if self._binary_file: + # keep bytes + self.file.write(line) + else: + # decode to string + self.file.write(line.decode(self.encoding, self.errors)) + + def read_lines_to_eof(self): + """Internal: read lines until EOF.""" + while 1: + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + if not line: + self.done = -1 + break + self.__write(line) + + def read_lines_to_outerboundary(self): + """Internal: read lines until outerboundary. + Data is read as bytes: boundaries and line ends must be converted + to bytes for comparisons. + """ + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + delim = b"" + last_line_lfend = True + _read = 0 + while 1: + + if self.limit is not None and 0 <= self.limit <= _read: + break + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + _read += len(line) + if not line: + self.done = -1 + break + if delim == b"\r": + line = delim + line + delim = b"" + if line.startswith(b"--") and last_line_lfend: + strippedline = line.rstrip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + odelim = delim + if line.endswith(b"\r\n"): + delim = b"\r\n" + line = line[:-2] + last_line_lfend = True + elif line.endswith(b"\n"): + delim = b"\n" + line = line[:-1] + last_line_lfend = True + elif line.endswith(b"\r"): + # We may interrupt \r\n sequences if they span the 2**16 + # byte boundary + delim = b"\r" + line = line[:-1] + last_line_lfend = False + else: + delim = b"" + last_line_lfend = False + self.__write(odelim + line) + + def skip_lines(self): + """Internal: skip lines until outer boundary if defined.""" + if not self.outerboundary or self.done: + return + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + last_line_lfend = True + while True: + line = self.fp.readline(1<<16) + self.bytes_read += len(line) + if not line: + self.done = -1 + break + if line.endswith(b"--") and last_line_lfend: + strippedline = line.strip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + last_line_lfend = line.endswith(b'\n') + + def make_file(self): + """Overridable: return a readable & writable file. + + The file will be used as follows: + - data is written to it + - seek(0) + - data is read from it + + The file is opened in binary mode for files, in text mode + for other fields + + This version opens a temporary file for reading and writing, + and immediately deletes (unlinks) it. The trick (on Unix!) is + that the file can still be used, but it can't be opened by + another process, and it will automatically be deleted when it + is closed or when the current process terminates. + + If you want a more permanent file, you derive a class which + overrides this method. If you want a visible temporary file + that is nevertheless automatically deleted when the script + terminates, try defining a __del__ method in a derived class + which unlinks the temporary files you have created. + + """ + if self._binary_file: + return tempfile.TemporaryFile("wb+") + else: + return tempfile.TemporaryFile("w+", + encoding=self.encoding, newline = '\n') + + +# Test/debug code +# =============== + +def test(environ=os.environ): + """Robust test CGI script, usable as main program. + + Write minimal HTTP headers and dump all information provided to + the script in HTML form. + + """ + print("Content-type: text/html") + print() + sys.stderr = sys.stdout + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + print_environ_usage() + def f(): + exec("testing print_exception() -- italics?") + def g(f=f): + f() + print("

What follows is a test, not an actual exception:

") + g() + except: + print_exception() + + print("

Second try with a small maxlen...

") + + global maxlen + maxlen = 50 + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + except: + print_exception() + +def print_exception(type=None, value=None, tb=None, limit=None): + if type is None: + type, value, tb = sys.exc_info() + import traceback + print() + print("

Traceback (most recent call last):

") + list = traceback.format_tb(tb, limit) + \ + traceback.format_exception_only(type, value) + print("
%s%s
" % ( + html.escape("".join(list[:-1])), + html.escape(list[-1]), + )) + del tb + +def print_environ(environ=os.environ): + """Dump the shell environment as HTML.""" + keys = sorted(environ.keys()) + print() + print("

Shell Environment:

") + print("
") + for key in keys: + print("
", html.escape(key), "
", html.escape(environ[key])) + print("
") + print() + +def print_form(form): + """Dump the contents of a form as HTML.""" + keys = sorted(form.keys()) + print() + print("

Form Contents:

") + if not keys: + print("

No form fields.") + print("

") + for key in keys: + print("
" + html.escape(key) + ":", end=' ') + value = form[key] + print("" + html.escape(repr(type(value))) + "") + print("
" + html.escape(repr(value))) + print("
") + print() + +def print_directory(): + """Dump the current directory as HTML.""" + print() + print("

Current Working Directory:

") + try: + pwd = os.getcwd() + except OSError as msg: + print("OSError:", html.escape(str(msg))) + else: + print(html.escape(pwd)) + print() + +def print_arguments(): + print() + print("

Command Line Arguments:

") + print() + print(sys.argv) + print() + +def print_environ_usage(): + """Dump a list of environment variables used by CGI as HTML.""" + print(""" +

These environment variables could have been set:

+
    +
  • AUTH_TYPE +
  • CONTENT_LENGTH +
  • CONTENT_TYPE +
  • DATE_GMT +
  • DATE_LOCAL +
  • DOCUMENT_NAME +
  • DOCUMENT_ROOT +
  • DOCUMENT_URI +
  • GATEWAY_INTERFACE +
  • LAST_MODIFIED +
  • PATH +
  • PATH_INFO +
  • PATH_TRANSLATED +
  • QUERY_STRING +
  • REMOTE_ADDR +
  • REMOTE_HOST +
  • REMOTE_IDENT +
  • REMOTE_USER +
  • REQUEST_METHOD +
  • SCRIPT_NAME +
  • SERVER_NAME +
  • SERVER_PORT +
  • SERVER_PROTOCOL +
  • SERVER_ROOT +
  • SERVER_SOFTWARE +
+In addition, HTTP headers sent by the server may be passed in the +environment as well. Here are some common variable names: +
    +
  • HTTP_ACCEPT +
  • HTTP_CONNECTION +
  • HTTP_HOST +
  • HTTP_PRAGMA +
  • HTTP_REFERER +
  • HTTP_USER_AGENT +
+""") + + +# Utilities +# ========= + +def valid_boundary(s): + import re + if isinstance(s, bytes): + _vb_pattern = b"^[ -~]{0,200}[!-~]$" + else: + _vb_pattern = "^[ -~]{0,200}[!-~]$" + return re.match(_vb_pattern, s) + +# Invoke mainline +# =============== + +# Call test() when this file is run as a script (not imported as a module) +if __name__ == '__main__': + test() diff --git a/evalkit_cambrian/lib/python3.10/cgitb.py b/evalkit_cambrian/lib/python3.10/cgitb.py new file mode 100644 index 0000000000000000000000000000000000000000..17ddda376884dfb6ba2a1e7a315e333463644c25 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/cgitb.py @@ -0,0 +1,321 @@ +"""More comprehensive traceback formatting for Python scripts. + +To enable this module, do: + + import cgitb; cgitb.enable() + +at the top of your script. The optional arguments to enable() are: + + display - if true, tracebacks are displayed in the web browser + logdir - if set, tracebacks are written to files in this directory + context - number of lines of source code to show for each stack frame + format - 'text' or 'html' controls the output format + +By default, tracebacks are displayed but not saved, the context is 5 lines +and the output format is 'html' (for backwards compatibility with the +original use of this module) + +Alternatively, if you have caught an exception and want cgitb to display it +for you, call cgitb.handler(). The optional argument to handler() is a +3-item tuple (etype, evalue, etb) just like the value of sys.exc_info(). +The default handler displays output as HTML. + +""" +import inspect +import keyword +import linecache +import os +import pydoc +import sys +import tempfile +import time +import tokenize +import traceback + +def reset(): + """Return a string that resets the CGI and browser to a known state.""" + return ''' + --> --> + + ''' + +__UNDEF__ = [] # a special sentinel object +def small(text): + if text: + return '' + text + '' + else: + return '' + +def strong(text): + if text: + return '' + text + '' + else: + return '' + +def grey(text): + if text: + return '' + text + '' + else: + return '' + +def lookup(name, frame, locals): + """Find the value for a given name in the given environment.""" + if name in locals: + return 'local', locals[name] + if name in frame.f_globals: + return 'global', frame.f_globals[name] + if '__builtins__' in frame.f_globals: + builtins = frame.f_globals['__builtins__'] + if type(builtins) is type({}): + if name in builtins: + return 'builtin', builtins[name] + else: + if hasattr(builtins, name): + return 'builtin', getattr(builtins, name) + return None, __UNDEF__ + +def scanvars(reader, frame, locals): + """Scan one logical line of Python and look up values of variables used.""" + vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ + for ttype, token, start, end, line in tokenize.generate_tokens(reader): + if ttype == tokenize.NEWLINE: break + if ttype == tokenize.NAME and token not in keyword.kwlist: + if lasttoken == '.': + if parent is not __UNDEF__: + value = getattr(parent, token, __UNDEF__) + vars.append((prefix + token, prefix, value)) + else: + where, value = lookup(token, frame, locals) + vars.append((token, where, value)) + elif token == '.': + prefix += lasttoken + '.' + parent = value + else: + parent, prefix = None, '' + lasttoken = token + return vars + +def html(einfo, context=5): + """Return a nice HTML document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = '' + pydoc.html.heading( + '%s' % + strong(pydoc.html.escape(str(etype))), + '#ffffff', '#6622aa', pyver + '
' + date) + ''' +

A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred.

''' + + indent = '' + small(' ' * 5) + ' ' + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + if file: + file = os.path.abspath(file) + link = '%s' % (file, pydoc.html.escape(file)) + else: + file = link = '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + strong(pydoc.html.escape(func)) + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.html.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = ['%s%s %s' % + (' ', link, call)] + if index is not None: + i = lnum - index + for line in lines: + num = small(' ' * (5-len(str(i))) + str(i)) + ' ' + if i in highlight: + line = '=>%s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % line) + else: + line = '  %s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % grey(line)) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where in ('global', 'builtin'): + name = ('%s ' % where) + strong(name) + elif where == 'local': + name = strong(name) + else: + name = where + strong(name.split('.')[-1]) + dump.append('%s = %s' % (name, pydoc.html.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('%s' % small(grey(', '.join(dump)))) + frames.append(''' + +%s
''' % '\n'.join(rows)) + + exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), + pydoc.html.escape(str(evalue)))] + for name in dir(evalue): + if name[:1] == '_': continue + value = pydoc.html.repr(getattr(evalue, name)) + exception.append('\n
%s%s =\n%s' % (indent, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + + + +''' % pydoc.html.escape( + ''.join(traceback.format_exception(etype, evalue, etb))) + +def text(einfo, context=5): + """Return a plain text document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' +A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred. +''' + + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + file = file and os.path.abspath(file) or '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + func + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.text.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = [' %s %s' % (file, call)] + if index is not None: + i = lnum - index + for line in lines: + num = '%5d ' % i + rows.append(num+line.rstrip()) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where == 'global': name = 'global ' + name + elif where != 'local': name = where + name.split('.')[-1] + dump.append('%s = %s' % (name, pydoc.text.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('\n'.join(dump)) + frames.append('\n%s\n' % '\n'.join(rows)) + + exception = ['%s: %s' % (str(etype), str(evalue))] + for name in dir(evalue): + value = pydoc.text.repr(getattr(evalue, name)) + exception.append('\n%s%s = %s' % (" "*4, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + +The above is a description of an error in a Python program. Here is +the original traceback: + +%s +''' % ''.join(traceback.format_exception(etype, evalue, etb)) + +class Hook: + """A hook to replace sys.excepthook that shows tracebacks in HTML.""" + + def __init__(self, display=1, logdir=None, context=5, file=None, + format="html"): + self.display = display # send tracebacks to browser if true + self.logdir = logdir # log tracebacks to files if not None + self.context = context # number of source code lines per frame + self.file = file or sys.stdout # place to send the output + self.format = format + + def __call__(self, etype, evalue, etb): + self.handle((etype, evalue, etb)) + + def handle(self, info=None): + info = info or sys.exc_info() + if self.format == "html": + self.file.write(reset()) + + formatter = (self.format=="html") and html or text + plain = False + try: + doc = formatter(info, self.context) + except: # just in case something goes wrong + doc = ''.join(traceback.format_exception(*info)) + plain = True + + if self.display: + if plain: + doc = pydoc.html.escape(doc) + self.file.write('

' + doc + '
\n') + else: + self.file.write(doc + '\n') + else: + self.file.write('

A problem occurred in a Python script.\n') + + if self.logdir is not None: + suffix = ['.txt', '.html'][self.format=="html"] + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + + try: + with os.fdopen(fd, 'w') as file: + file.write(doc) + msg = '%s contains the description of this error.' % path + except: + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

%s

\n' % msg) + else: + self.file.write(msg + '\n') + try: + self.file.flush() + except: pass + +handler = Hook().handle +def enable(display=1, logdir=None, context=5, format="html"): + """Install an exception handler that formats tracebacks as HTML. + + The optional argument 'display' can be set to 0 to suppress sending the + traceback to the browser, and 'logdir' can be set to a directory to cause + tracebacks to be written to files there.""" + sys.excepthook = Hook(display=display, logdir=logdir, + context=context, format=format) diff --git a/evalkit_cambrian/lib/python3.10/chunk.py b/evalkit_cambrian/lib/python3.10/chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..870c39fe7f5037152dd5b430e6b6f3f7e5d7a8de --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/chunk.py @@ -0,0 +1,169 @@ +"""Simple class to read IFF chunks. + +An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File +Format)) has the following structure: + ++----------------+ +| ID (4 bytes) | ++----------------+ +| size (4 bytes) | ++----------------+ +| data | +| ... | ++----------------+ + +The ID is a 4-byte string which identifies the type of chunk. + +The size field (a 32-bit value, encoded using big-endian byte order) +gives the size of the whole chunk, including the 8-byte header. + +Usually an IFF-type file consists of one or more chunks. The proposed +usage of the Chunk class defined here is to instantiate an instance at +the start of each chunk and read from the instance until it reaches +the end, after which a new instance can be instantiated. At the end +of the file, creating a new instance will fail with an EOFError +exception. + +Usage: +while True: + try: + chunk = Chunk(file) + except EOFError: + break + chunktype = chunk.getname() + while True: + data = chunk.read(nbytes) + if not data: + pass + # do something with data + +The interface is file-like. The implemented methods are: +read, close, seek, tell, isatty. +Extra methods are: skip() (called by close, skips to the end of the chunk), +getname() (returns the name (ID) of the chunk) + +The __init__ method has one required argument, a file-like object +(including a chunk instance), and one optional argument, a flag which +specifies whether or not chunks are aligned on 2-byte boundaries. The +default is 1, i.e. aligned. +""" + +class Chunk: + def __init__(self, file, align=True, bigendian=True, inclheader=False): + import struct + self.closed = False + self.align = align # whether to align to word (2-byte) boundaries + if bigendian: + strflag = '>' + else: + strflag = '<' + self.file = file + self.chunkname = file.read(4) + if len(self.chunkname) < 4: + raise EOFError + try: + self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] + except struct.error: + raise EOFError from None + if inclheader: + self.chunksize = self.chunksize - 8 # subtract header + self.size_read = 0 + try: + self.offset = self.file.tell() + except (AttributeError, OSError): + self.seekable = False + else: + self.seekable = True + + def getname(self): + """Return the name (ID) of the current chunk.""" + return self.chunkname + + def getsize(self): + """Return the size of the current chunk.""" + return self.chunksize + + def close(self): + if not self.closed: + try: + self.skip() + finally: + self.closed = True + + def isatty(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return False + + def seek(self, pos, whence=0): + """Seek to specified position into the chunk. + Default position is 0 (start of chunk). + If the file is not seekable, this will result in an error. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if not self.seekable: + raise OSError("cannot seek") + if whence == 1: + pos = pos + self.size_read + elif whence == 2: + pos = pos + self.chunksize + if pos < 0 or pos > self.chunksize: + raise RuntimeError + self.file.seek(self.offset + pos, 0) + self.size_read = pos + + def tell(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return self.size_read + + def read(self, size=-1): + """Read at most size bytes from the chunk. + If size is omitted or negative, read until the end + of the chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.size_read >= self.chunksize: + return b'' + if size < 0: + size = self.chunksize - self.size_read + if size > self.chunksize - self.size_read: + size = self.chunksize - self.size_read + data = self.file.read(size) + self.size_read = self.size_read + len(data) + if self.size_read == self.chunksize and \ + self.align and \ + (self.chunksize & 1): + dummy = self.file.read(1) + self.size_read = self.size_read + len(dummy) + return data + + def skip(self): + """Skip the rest of the chunk. + If you are not interested in the contents of the chunk, + this method should be called so that the file points to + the start of the next chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.seekable: + try: + n = self.chunksize - self.size_read + # maybe fix alignment + if self.align and (self.chunksize & 1): + n = n + 1 + self.file.seek(n, 1) + self.size_read = self.size_read + n + return + except OSError: + pass + while self.size_read < self.chunksize: + n = min(8192, self.chunksize - self.size_read) + dummy = self.read(n) + if not dummy: + raise EOFError diff --git a/evalkit_cambrian/lib/python3.10/code.py b/evalkit_cambrian/lib/python3.10/code.py new file mode 100644 index 0000000000000000000000000000000000000000..76000f8c8b2c1e1c98f8fb4c831c2ea3e2de268d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/code.py @@ -0,0 +1,315 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(ei[0], ei[1], last_tb) + finally: + last_tb = ei = None + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None, exitmsg=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None, exitmsg=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner, exitmsg) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-q', action='store_true', + help="don't print version and copyright messages") + args = parser.parse_args() + if args.q or sys.flags.quiet: + banner = '' + else: + banner = None + interact(banner) diff --git a/evalkit_cambrian/lib/python3.10/codecs.py b/evalkit_cambrian/lib/python3.10/codecs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b173b612101e7c635303a2129e3fe363791e2eb --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/codecs.py @@ -0,0 +1,1127 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" + +import builtins +import sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", + "StreamReader", "StreamWriter", + "StreamReaderWriter", "StreamRecoder", + "getencoder", "getdecoder", "getincrementalencoder", + "getincrementaldecoder", "getreader", "getwriter", + "encode", "decode", "iterencode", "iterdecode", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", + "backslashreplace_errors", "namereplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python 3.4 to denylist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None, + *, _is_text_encoding=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at %#x>" % \ + (self.__class__.__module__, self.__class__.__qualname__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \\N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \\N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Resets the codec buffers used for keeping internal state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + if chars < 0: + # For compatibility with other read() methods that take a + # single argument + chars = size + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + if not data: + break + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping internal state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with StreamReaderWriter(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = b''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + # Seeks must be propagated to both the readers and writers + # as they might need to reset their internal buffers. + self.reader.seek(offset, whence) + self.writer.seek(offset, whence) + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + If encoding is not None, then the + underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + + try: + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + except: + file.close() + raise + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \\u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") + namereplace_errors = lookup_error("namereplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + namereplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/evalkit_cambrian/lib/python3.10/codeop.py b/evalkit_cambrian/lib/python3.10/codeop.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8713ecf46bfa4a60bed8e110e4c0ab3b8c8d73 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/codeop.py @@ -0,0 +1,153 @@ +r"""Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +""" + +import __future__ +import warnings + +_features = [getattr(__future__, fname) + for fname in __future__.all_feature_names] + +__all__ = ["compile_command", "Compile", "CommandCompiler"] + +# The following flags match the values from Include/cpython/compile.h +# Caveat emptor: These flags are undocumented on purpose and depending +# on their effect outside the standard library is **unsupported**. +PyCF_DONT_IMPLY_DEDENT = 0x200 +PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000 + +def _maybe_compile(compiler, source, filename, symbol): + # Check for source consisting of only blank lines and comments. + for line in source.split("\n"): + line = line.strip() + if line and line[0] != '#': + break # Leave it alone. + else: + if symbol != "eval": + source = "pass" # Replace it with a 'pass' statement + + # Disable compiler warnings when checking for incomplete input. + with warnings.catch_warnings(): + warnings.simplefilter("ignore", (SyntaxWarning, DeprecationWarning)) + try: + compiler(source, filename, symbol) + except SyntaxError: # Let other compile() errors propagate. + try: + compiler(source + "\n", filename, symbol) + return None + except SyntaxError as e: + if "incomplete input" in str(e): + return None + # fallthrough + + return compiler(source, filename, symbol) + + +def _is_syntax_error(err1, err2): + rep1 = repr(err1) + rep2 = repr(err2) + if "was never closed" in rep1 and "was never closed" in rep2: + return False + if rep1 == rep2: + return True + return False + +def _compile(source, filename, symbol): + return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT) + +def compile_command(source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(_compile, source, filename, symbol) + +class Compile: + """Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.""" + def __init__(self): + self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT + + def __call__(self, source, filename, symbol): + codeob = compile(source, filename, symbol, self.flags, True) + for feature in _features: + if codeob.co_flags & feature.compiler_flag: + self.flags |= feature.compiler_flag + return codeob + +class CommandCompiler: + """Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.""" + + def __init__(self,): + self.compiler = Compile() + + def __call__(self, source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(self.compiler, source, filename, symbol) diff --git a/evalkit_cambrian/lib/python3.10/contextvars.py b/evalkit_cambrian/lib/python3.10/contextvars.py new file mode 100644 index 0000000000000000000000000000000000000000..d78c80dfe6f99cefe1a875b16a9e7296546636fe --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/contextvars.py @@ -0,0 +1,4 @@ +from _contextvars import Context, ContextVar, Token, copy_context + + +__all__ = ('Context', 'ContextVar', 'Token', 'copy_context') diff --git a/evalkit_cambrian/lib/python3.10/copyreg.py b/evalkit_cambrian/lib/python3.10/copyreg.py new file mode 100644 index 0000000000000000000000000000000000000000..356db6f083e39cc671278aaae97cd5d2c2274483 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/copyreg.py @@ -0,0 +1,219 @@ +"""Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +""" + +__all__ = ["pickle", "constructor", + "add_extension", "remove_extension", "clear_extension_cache"] + +dispatch_table = {} + +def pickle(ob_type, pickle_function, constructor_ob=None): + if not callable(pickle_function): + raise TypeError("reduction functions must be callable") + dispatch_table[ob_type] = pickle_function + + # The constructor_ob function is a vestige of safe for unpickling. + # There is no reason for the caller to pass it anymore. + if constructor_ob is not None: + constructor(constructor_ob) + +def constructor(object): + if not callable(object): + raise TypeError("constructors must be callable") + +# Example: provide pickling support for complex numbers. + +try: + complex +except NameError: + pass +else: + + def pickle_complex(c): + return complex, (c.real, c.imag) + + pickle(complex, pickle_complex, complex) + +def pickle_union(obj): + import functools, operator + return functools.reduce, (operator.or_, obj.__args__) + +pickle(type(int | str), pickle_union) + +# Support for pickling new-style objects + +def _reconstructor(cls, base, state): + if base is object: + obj = object.__new__(cls) + else: + obj = base.__new__(cls, state) + if base.__init__ != object.__init__: + base.__init__(obj, state) + return obj + +_HEAPTYPE = 1<<9 +_new_type = type(int.__new__) + +# Python code for object.__reduce_ex__ for protocols 0 and 1 + +def _reduce_ex(self, proto): + assert proto < 2 + cls = self.__class__ + for base in cls.__mro__: + if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: + break + new = base.__new__ + if isinstance(new, _new_type) and new.__self__ is base: + break + else: + base = object # not really reachable + if base is object: + state = None + else: + if base is cls: + raise TypeError(f"cannot pickle {cls.__name__!r} object") + state = base(self) + args = (cls, base, state) + try: + getstate = self.__getstate__ + except AttributeError: + if getattr(self, "__slots__", None): + raise TypeError(f"cannot pickle {cls.__name__!r} object: " + f"a class that defines __slots__ without " + f"defining __getstate__ cannot be pickled " + f"with protocol {proto}") from None + try: + dict = self.__dict__ + except AttributeError: + dict = None + else: + dict = getstate() + if dict: + return _reconstructor, args, dict + else: + return _reconstructor, args + +# Helper for __reduce_ex__ protocol 2 + +def __newobj__(cls, *args): + return cls.__new__(cls, *args) + +def __newobj_ex__(cls, args, kwargs): + """Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + """ + return cls.__new__(cls, *args, **kwargs) + +def _slotnames(cls): + """Return a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + """ + + # Get the value from a cache in the class if possible + names = cls.__dict__.get("__slotnames__") + if names is not None: + return names + + # Not cached -- calculate the value + names = [] + if not hasattr(cls, "__slots__"): + # This class has no slots + pass + else: + # Slots found -- gather slot names from all base classes + for c in cls.__mro__: + if "__slots__" in c.__dict__: + slots = c.__dict__['__slots__'] + # if class has a single slot, it can be given as a string + if isinstance(slots, str): + slots = (slots,) + for name in slots: + # special descriptors + if name in ("__dict__", "__weakref__"): + continue + # mangled names + elif name.startswith('__') and not name.endswith('__'): + stripped = c.__name__.lstrip('_') + if stripped: + names.append('_%s%s' % (stripped, name)) + else: + names.append(name) + else: + names.append(name) + + # Cache the outcome in the class if at all possible + try: + cls.__slotnames__ = names + except: + pass # But don't die if we can't + + return names + +# A registry of extension codes. This is an ad-hoc compression +# mechanism. Whenever a global reference to , is about +# to be pickled, the (, ) tuple is looked up here to see +# if it is a registered extension code for it. Extension codes are +# universal, so that the meaning of a pickle does not depend on +# context. (There are also some codes reserved for local use that +# don't have this restriction.) Codes are positive ints; 0 is +# reserved. + +_extension_registry = {} # key -> code +_inverted_registry = {} # code -> key +_extension_cache = {} # code -> object +# Don't ever rebind those names: pickling grabs a reference to them when +# it's initialized, and won't see a rebinding. + +def add_extension(module, name, code): + """Register an extension code.""" + code = int(code) + if not 1 <= code <= 0x7fffffff: + raise ValueError("code out of range") + key = (module, name) + if (_extension_registry.get(key) == code and + _inverted_registry.get(code) == key): + return # Redundant registrations are benign + if key in _extension_registry: + raise ValueError("key %s is already registered with code %s" % + (key, _extension_registry[key])) + if code in _inverted_registry: + raise ValueError("code %s is already in use for key %s" % + (code, _inverted_registry[code])) + _extension_registry[key] = code + _inverted_registry[code] = key + +def remove_extension(module, name, code): + """Unregister an extension code. For testing only.""" + key = (module, name) + if (_extension_registry.get(key) != code or + _inverted_registry.get(code) != key): + raise ValueError("key %s is not registered with code %s" % + (key, code)) + del _extension_registry[key] + del _inverted_registry[code] + if code in _extension_cache: + del _extension_cache[code] + +def clear_extension_cache(): + _extension_cache.clear() + +# Standard extension code assignments + +# Reserved ranges + +# First Last Count Purpose +# 1 127 127 Reserved for Python standard library +# 128 191 64 Reserved for Zope +# 192 239 48 Reserved for 3rd parties +# 240 255 16 Reserved for private use (will never be assigned) +# 256 Inf Inf Reserved for future assignment + +# Extension codes are assigned by the Python Software Foundation. diff --git a/evalkit_cambrian/lib/python3.10/decimal.py b/evalkit_cambrian/lib/python3.10/decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..7746ea2601024ceb7e750472753fcfc80fa2bd01 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/decimal.py @@ -0,0 +1,11 @@ + +try: + from _decimal import * + from _decimal import __doc__ + from _decimal import __version__ + from _decimal import __libmpdec_version__ +except ImportError: + from _pydecimal import * + from _pydecimal import __doc__ + from _pydecimal import __version__ + from _pydecimal import __libmpdec_version__ diff --git a/evalkit_cambrian/lib/python3.10/doctest.py b/evalkit_cambrian/lib/python3.10/doctest.py new file mode 100644 index 0000000000000000000000000000000000000000..37b31cfbb2c97339b490450c0d1867bea5a706f8 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/doctest.py @@ -0,0 +1,2810 @@ +# Module doctest. +# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). +# Major enhancements and refactoring by: +# Jim Fulton +# Edward Loper + +# Provided as-is; use at your own risk; no warranty; no promises; enjoy! + +r"""Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +""" + +__docformat__ = 'reStructuredText en' + +__all__ = [ + # 0, Option Flags + 'register_optionflag', + 'DONT_ACCEPT_TRUE_FOR_1', + 'DONT_ACCEPT_BLANKLINE', + 'NORMALIZE_WHITESPACE', + 'ELLIPSIS', + 'SKIP', + 'IGNORE_EXCEPTION_DETAIL', + 'COMPARISON_FLAGS', + 'REPORT_UDIFF', + 'REPORT_CDIFF', + 'REPORT_NDIFF', + 'REPORT_ONLY_FIRST_FAILURE', + 'REPORTING_FLAGS', + 'FAIL_FAST', + # 1. Utility Functions + # 2. Example & DocTest + 'Example', + 'DocTest', + # 3. Doctest Parser + 'DocTestParser', + # 4. Doctest Finder + 'DocTestFinder', + # 5. Doctest Runner + 'DocTestRunner', + 'OutputChecker', + 'DocTestFailure', + 'UnexpectedException', + 'DebugRunner', + # 6. Test Functions + 'testmod', + 'testfile', + 'run_docstring_examples', + # 7. Unittest Support + 'DocTestSuite', + 'DocFileSuite', + 'set_unittest_reportflags', + # 8. Debugging Support + 'script_from_examples', + 'testsource', + 'debug_src', + 'debug', +] + +import __future__ +import difflib +import inspect +import linecache +import os +import pdb +import re +import sys +import traceback +import unittest +from io import StringIO, IncrementalNewlineDecoder +from collections import namedtuple + +TestResults = namedtuple('TestResults', 'failed attempted') + +# There are 4 basic classes: +# - Example: a pair, plus an intra-docstring line number. +# - DocTest: a collection of examples, parsed from a docstring, plus +# info about where the docstring came from (name, filename, lineno). +# - DocTestFinder: extracts DocTests from a given object's docstring and +# its contained objects' docstrings. +# - DocTestRunner: runs DocTest cases, and accumulates statistics. +# +# So the basic picture is: +# +# list of: +# +------+ +---------+ +-------+ +# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| +# +------+ +---------+ +-------+ +# | Example | +# | ... | +# | Example | +# +---------+ + +# Option constants. + +OPTIONFLAGS_BY_NAME = {} +def register_optionflag(name): + # Create a new flag unless `name` is already known. + return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) + +DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') +DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') +NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') +ELLIPSIS = register_optionflag('ELLIPSIS') +SKIP = register_optionflag('SKIP') +IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') + +COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | + DONT_ACCEPT_BLANKLINE | + NORMALIZE_WHITESPACE | + ELLIPSIS | + SKIP | + IGNORE_EXCEPTION_DETAIL) + +REPORT_UDIFF = register_optionflag('REPORT_UDIFF') +REPORT_CDIFF = register_optionflag('REPORT_CDIFF') +REPORT_NDIFF = register_optionflag('REPORT_NDIFF') +REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') +FAIL_FAST = register_optionflag('FAIL_FAST') + +REPORTING_FLAGS = (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF | + REPORT_ONLY_FIRST_FAILURE | + FAIL_FAST) + +# Special string markers for use in `want` strings: +BLANKLINE_MARKER = '' +ELLIPSIS_MARKER = '...' + +###################################################################### +## Table of Contents +###################################################################### +# 1. Utility Functions +# 2. Example & DocTest -- store test cases +# 3. DocTest Parser -- extracts examples from strings +# 4. DocTest Finder -- extracts test cases from objects +# 5. DocTest Runner -- runs test cases +# 6. Test Functions -- convenient wrappers for testing +# 7. Unittest Support +# 8. Debugging Support +# 9. Example Usage + +###################################################################### +## 1. Utility Functions +###################################################################### + +def _extract_future_flags(globs): + """ + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + """ + flags = 0 + for fname in __future__.all_feature_names: + feature = globs.get(fname, None) + if feature is getattr(__future__, fname): + flags |= feature.compiler_flag + return flags + +def _normalize_module(module, depth=2): + """ + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + """ + if inspect.ismodule(module): + return module + elif isinstance(module, str): + return __import__(module, globals(), locals(), ["*"]) + elif module is None: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + else: + raise TypeError("Expected a module, string, or None") + +def _newline_convert(data): + # The IO module provides a handy decoder for universal newline conversion + return IncrementalNewlineDecoder(None, True).decode(data, True) + +def _load_testfile(filename, package, module_relative, encoding): + if module_relative: + package = _normalize_module(package, 3) + filename = _module_relative_path(package, filename) + if (loader := getattr(package, '__loader__', None)) is None: + try: + loader = package.__spec__.loader + except AttributeError: + pass + if hasattr(loader, 'get_data'): + file_contents = loader.get_data(filename) + file_contents = file_contents.decode(encoding) + # get_data() opens files as 'rb', so one must do the equivalent + # conversion as universal newlines would do. + return _newline_convert(file_contents), filename + with open(filename, encoding=encoding) as f: + return f.read(), filename + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning of + every non-blank line in `s`, and return the result. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + +def _exception_traceback(exc_info): + """ + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + """ + # Get a traceback message. + excout = StringIO() + exc_type, exc_val, exc_tb = exc_info + traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) + return excout.getvalue() + +# Override some StringIO methods. +class _SpoofOut(StringIO): + def getvalue(self): + result = StringIO.getvalue(self) + # If anything at all was written, make sure there's a trailing + # newline. There's no way for the expected output to indicate + # that a trailing newline is missing. + if result and not result.endswith("\n"): + result += "\n" + return result + + def truncate(self, size=None): + self.seek(size) + StringIO.truncate(self) + +# Worst-case linear-time ellipsis matching. +def _ellipsis_match(want, got): + """ + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + """ + if ELLIPSIS_MARKER not in want: + return want == got + + # Find "the real" strings. + ws = want.split(ELLIPSIS_MARKER) + assert len(ws) >= 2 + + # Deal with exact matches possibly needed at one or both ends. + startpos, endpos = 0, len(got) + w = ws[0] + if w: # starts with exact match + if got.startswith(w): + startpos = len(w) + del ws[0] + else: + return False + w = ws[-1] + if w: # ends with exact match + if got.endswith(w): + endpos -= len(w) + del ws[-1] + else: + return False + + if startpos > endpos: + # Exact end matches required more characters than we have, as in + # _ellipsis_match('aa...aa', 'aaa') + return False + + # For the rest, we only need to find the leftmost non-overlapping + # match for each piece. If there's no overall match that way alone, + # there's no overall match period. + for w in ws: + # w may be '' at times, if there are consecutive ellipses, or + # due to an ellipsis at the start or end of `want`. That's OK. + # Search for an empty string succeeds, and doesn't change startpos. + startpos = got.find(w, startpos, endpos) + if startpos < 0: + return False + startpos += len(w) + + return True + +def _comment_line(line): + "Return a commented form of the given line" + line = line.rstrip() + if line: + return '# '+line + else: + return '#' + +def _strip_exception_details(msg): + # Support for IGNORE_EXCEPTION_DETAIL. + # Get rid of everything except the exception name; in particular, drop + # the possibly dotted module path (if any) and the exception message (if + # any). We assume that a colon is never part of a dotted name, or of an + # exception name. + # E.g., given + # "foo.bar.MyError: la di da" + # return "MyError" + # Or for "abc.def" or "abc.def:\n" return "def". + + start, end = 0, len(msg) + # The exception name must appear on the first line. + i = msg.find("\n") + if i >= 0: + end = i + # retain up to the first colon (if any) + i = msg.find(':', 0, end) + if i >= 0: + end = i + # retain just the exception name + i = msg.rfind('.', 0, end) + if i >= 0: + start = i+1 + return msg[start: end] + +class _OutputRedirectingPdb(pdb.Pdb): + """ + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + """ + def __init__(self, out): + self.__out = out + self.__debugger_used = False + # do not play signal games in the pdb + pdb.Pdb.__init__(self, stdout=out, nosigint=True) + # still use input() to get user input + self.use_rawinput = 1 + + def set_trace(self, frame=None): + self.__debugger_used = True + if frame is None: + frame = sys._getframe().f_back + pdb.Pdb.set_trace(self, frame) + + def set_continue(self): + # Calling set_continue unconditionally would break unit test + # coverage reporting, as Bdb.set_continue calls sys.settrace(None). + if self.__debugger_used: + pdb.Pdb.set_continue(self) + + def trace_dispatch(self, *args): + # Redirect stdout to the given stream. + save_stdout = sys.stdout + sys.stdout = self.__out + # Call Pdb's trace dispatch method. + try: + return pdb.Pdb.trace_dispatch(self, *args) + finally: + sys.stdout = save_stdout + +# [XX] Normalize with respect to os.path.pardir? +def _module_relative_path(module, test_path): + if not inspect.ismodule(module): + raise TypeError('Expected a module: %r' % module) + if test_path.startswith('/'): + raise ValueError('Module-relative files may not have absolute paths') + + # Normalize the path. On Windows, replace "/" with "\". + test_path = os.path.join(*(test_path.split('/'))) + + # Find the base directory for the path. + if hasattr(module, '__file__'): + # A normal module/package + basedir = os.path.split(module.__file__)[0] + elif module.__name__ == '__main__': + # An interactive session. + if len(sys.argv)>0 and sys.argv[0] != '': + basedir = os.path.split(sys.argv[0])[0] + else: + basedir = os.curdir + else: + if hasattr(module, '__path__'): + for directory in module.__path__: + fullpath = os.path.join(directory, test_path) + if os.path.exists(fullpath): + return fullpath + + # A module w/o __file__ (this includes builtins) + raise ValueError("Can't resolve paths relative to the module " + "%r (it has no __file__)" + % module.__name__) + + # Combine the base directory and the test path. + return os.path.join(basedir, test_path) + +###################################################################### +## 2. Example & DocTest +###################################################################### +## - An "example" is a pair, where "source" is a +## fragment of source code, and "want" is the expected output for +## "source." The Example class also includes information about +## where the example was extracted from. +## +## - A "doctest" is a collection of examples, typically extracted from +## a string (such as an object's docstring). The DocTest class also +## includes information about where the string was extracted from. + +class Example: + """ + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that precede the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + """ + def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, + options=None): + # Normalize inputs. + if not source.endswith('\n'): + source += '\n' + if want and not want.endswith('\n'): + want += '\n' + if exc_msg is not None and not exc_msg.endswith('\n'): + exc_msg += '\n' + # Store properties. + self.source = source + self.want = want + self.lineno = lineno + self.indent = indent + if options is None: options = {} + self.options = options + self.exc_msg = exc_msg + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self.source == other.source and \ + self.want == other.want and \ + self.lineno == other.lineno and \ + self.indent == other.indent and \ + self.options == other.options and \ + self.exc_msg == other.exc_msg + + def __hash__(self): + return hash((self.source, self.want, self.lineno, self.indent, + self.exc_msg)) + +class DocTest: + """ + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + """ + def __init__(self, examples, globs, name, filename, lineno, docstring): + """ + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + """ + assert not isinstance(examples, str), \ + "DocTest no longer accepts str; use DocTestParser instead" + self.examples = examples + self.docstring = docstring + self.globs = globs.copy() + self.name = name + self.filename = filename + self.lineno = lineno + + def __repr__(self): + if len(self.examples) == 0: + examples = 'no examples' + elif len(self.examples) == 1: + examples = '1 example' + else: + examples = '%d examples' % len(self.examples) + return ('<%s %s from %s:%s (%s)>' % + (self.__class__.__name__, + self.name, self.filename, self.lineno, examples)) + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self.examples == other.examples and \ + self.docstring == other.docstring and \ + self.globs == other.globs and \ + self.name == other.name and \ + self.filename == other.filename and \ + self.lineno == other.lineno + + def __hash__(self): + return hash((self.docstring, self.name, self.filename, self.lineno)) + + # This lets us sort tests by name: + def __lt__(self, other): + if not isinstance(other, DocTest): + return NotImplemented + return ((self.name, self.filename, self.lineno, id(self)) + < + (other.name, other.filename, other.lineno, id(other))) + +###################################################################### +## 3. DocTestParser +###################################################################### + +class DocTestParser: + """ + A class used to parse strings containing doctest examples. + """ + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .+$\n? # But any other line + )*) + ''', re.MULTILINE | re.VERBOSE) + + # A regular expression for handling `want` strings that contain + # expected exceptions. It divides `want` into three pieces: + # - the traceback header line (`hdr`) + # - the traceback stack (`stack`) + # - the exception message (`msg`), as generated by + # traceback.format_exception_only() + # `msg` may have multiple lines. We assume/require that the + # exception message is the first non-indented line starting with a word + # character following the traceback header line. + _EXCEPTION_RE = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + + # A callable returning a true value iff its argument is a blank line + # or contains a single comment. + _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match + + def parse(self, string, name=''): + """ + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + """ + string = string.expandtabs() + # If all lines begin with the same indentation, then strip it. + min_indent = self._min_indent(string) + if min_indent > 0: + string = '\n'.join([l[min_indent:] for l in string.split('\n')]) + + output = [] + charno, lineno = 0, 0 + # Find all doctest examples in the string: + for m in self._EXAMPLE_RE.finditer(string): + # Add the pre-example text to `output`. + output.append(string[charno:m.start()]) + # Update lineno (lines before this example) + lineno += string.count('\n', charno, m.start()) + # Extract info from the regexp match. + (source, options, want, exc_msg) = \ + self._parse_example(m, name, lineno) + # Create an Example, and add it to the list. + if not self._IS_BLANK_OR_COMMENT(source): + output.append( Example(source, want, exc_msg, + lineno=lineno, + indent=min_indent+len(m.group('indent')), + options=options) ) + # Update lineno (lines inside this example) + lineno += string.count('\n', m.start(), m.end()) + # Update charno. + charno = m.end() + # Add any remaining post-example text to `output`. + output.append(string[charno:]) + return output + + def get_doctest(self, string, globs, name, filename, lineno): + """ + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + """ + return DocTest(self.get_examples(string, name), globs, + name, filename, lineno, string) + + def get_examples(self, string, name=''): + """ + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called \"line 1\" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + """ + return [x for x in self.parse(string, name) + if isinstance(x, Example)] + + def _parse_example(self, m, name, lineno): + """ + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + # Get the example's indentation level. + indent = len(m.group('indent')) + + # Divide source into lines; check that they're properly + # indented; and then strip their indentation & prompts. + source_lines = m.group('source').split('\n') + self._check_prompt_blank(source_lines, indent, name, lineno) + self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) + source = '\n'.join([sl[indent+4:] for sl in source_lines]) + + # Divide want into lines; check that it's properly indented; and + # then strip the indentation. Spaces before the last newline should + # be preserved, so plain rstrip() isn't good enough. + want = m.group('want') + want_lines = want.split('\n') + if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): + del want_lines[-1] # forget final newline & spaces after it + self._check_prefix(want_lines, ' '*indent, name, + lineno + len(source_lines)) + want = '\n'.join([wl[indent:] for wl in want_lines]) + + # If `want` contains a traceback message, then extract it. + m = self._EXCEPTION_RE.match(want) + if m: + exc_msg = m.group('msg') + else: + exc_msg = None + + # Extract options from the source. + options = self._find_options(source, name, lineno) + + return source, options, want, exc_msg + + # This regular expression looks for option directives in the + # source code of an example. Option directives are comments + # starting with "doctest:". Warning: this may give false + # positives for string-literals that contain the string + # "#doctest:". Eliminating these false positives would require + # actually parsing the string; but we limit them by ignoring any + # line containing "#doctest:" that is *followed* by a quote mark. + _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', + re.MULTILINE) + + def _find_options(self, source, name, lineno): + """ + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + options = {} + # (note: with the current regexp, this will match at most once:) + for m in self._OPTION_DIRECTIVE_RE.finditer(source): + option_strings = m.group(1).replace(',', ' ').split() + for option in option_strings: + if (option[0] not in '+-' or + option[1:] not in OPTIONFLAGS_BY_NAME): + raise ValueError('line %r of the doctest for %s ' + 'has an invalid option: %r' % + (lineno+1, name, option)) + flag = OPTIONFLAGS_BY_NAME[option[1:]] + options[flag] = (option[0] == '+') + if options and self._IS_BLANK_OR_COMMENT(source): + raise ValueError('line %r of the doctest for %s has an option ' + 'directive on a line with no example: %r' % + (lineno, name, source)) + return options + + # This regular expression finds the indentation of every non-blank + # line in a string. + _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) + + def _min_indent(self, s): + "Return the minimum indentation of any non-blank line in `s`" + indents = [len(indent) for indent in self._INDENT_RE.findall(s)] + if len(indents) > 0: + return min(indents) + else: + return 0 + + def _check_prompt_blank(self, lines, indent, name, lineno): + """ + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + """ + for i, line in enumerate(lines): + if len(line) >= indent+4 and line[indent+3] != ' ': + raise ValueError('line %r of the docstring for %s ' + 'lacks blank after %s: %r' % + (lineno+i+1, name, + line[indent:indent+3], line)) + + def _check_prefix(self, lines, prefix, name, lineno): + """ + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + """ + for i, line in enumerate(lines): + if line and not line.startswith(prefix): + raise ValueError('line %r of the docstring for %s has ' + 'inconsistent leading whitespace: %r' % + (lineno+i+1, name, line)) + + +###################################################################### +## 4. DocTest Finder +###################################################################### + +class DocTestFinder: + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + """ + + def __init__(self, verbose=False, parser=DocTestParser(), + recurse=True, exclude_empty=True): + """ + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + """ + self._parser = parser + self._verbose = verbose + self._recurse = recurse + self._exclude_empty = exclude_empty + + def find(self, obj, name=None, module=None, globs=None, extraglobs=None): + """ + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + """ + # If name was not specified, then extract it from the object. + if name is None: + name = getattr(obj, '__name__', None) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) + + # Find the module that contains the given object (if obj is + # a module, then module=obj.). Note: this may fail, in which + # case module will be None. + if module is False: + module = None + elif module is None: + module = inspect.getmodule(obj) + + # Read the module's source code. This is used by + # DocTestFinder._find_lineno to find the line number for a + # given object's docstring. + try: + file = inspect.getsourcefile(obj) + except TypeError: + source_lines = None + else: + if not file: + # Check to see if it's one of our special internal "files" + # (see __patched_linecache_getlines). + file = inspect.getfile(obj) + if not file[0]+file[-2:] == '<]>': file = None + if file is None: + source_lines = None + else: + if module is not None: + # Supply the module globals in case the module was + # originally loaded via a PEP 302 loader and + # file is not a valid filesystem path + source_lines = linecache.getlines(file, module.__dict__) + else: + # No access to a loader, so assume it's a normal + # filesystem path + source_lines = linecache.getlines(file) + if not source_lines: + source_lines = None + + # Initialize globals, and merge in extraglobs. + if globs is None: + if module is None: + globs = {} + else: + globs = module.__dict__.copy() + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + if '__name__' not in globs: + globs['__name__'] = '__main__' # provide a default module name + + # Recursively explore `obj`, extracting DocTests. + tests = [] + self._find(tests, obj, name, module, source_lines, globs, {}) + # Sort the tests by alpha order of names, for consistency in + # verbose-mode output. This was a feature of doctest in Pythons + # <= 2.3 that got lost by accident in 2.4. It was repaired in + # 2.4.4 and 2.5. + tests.sort() + return tests + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.ismethoddescriptor(object): + if hasattr(object, '__objclass__'): + obj_mod = object.__objclass__.__module__ + elif hasattr(object, '__module__'): + obj_mod = object.__module__ + else: + return True # [XX] no easy way to tell otherwise + return module.__name__ == obj_mod + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _is_routine(self, obj): + """ + Safely unwrap objects and determine if they are functions. + """ + maybe_routine = obj + try: + maybe_routine = inspect.unwrap(maybe_routine) + except ValueError: + pass + return inspect.isroutine(maybe_routine) + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + if self._verbose: + print('Finding tests in %s' % name) + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname = '%s.%s' % (name, valname) + + # Recurse to functions & classes. + if ((self._is_routine(val) or inspect.isclass(val)) and + self._from_module(module, val)): + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a module's __test__ dictionary. + if inspect.ismodule(obj) and self._recurse: + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, str): + raise ValueError("DocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isroutine(val) or inspect.isclass(val) or + inspect.ismodule(val) or isinstance(val, str)): + raise ValueError("DocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if inspect.isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, (staticmethod, classmethod)): + val = val.__func__ + + # Recurse to methods, properties, and nested classes. + if ((inspect.isroutine(val) or inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + # Extract the object's docstring. If it doesn't have one, + # then return None (no test for this object). + if isinstance(obj, str): + docstring = obj + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, str): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Find the docstring's location in the file. + lineno = self._find_lineno(obj, source_lines) + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + # __file__ can be None for namespace packages. + filename = getattr(module, '__file__', None) or module.__name__ + if filename[-4:] == ".pyc": + filename = filename[:-1] + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + def _find_lineno(self, obj, source_lines): + """ + Return a line number of the given object's docstring. + + Returns `None` if the given object does not have a docstring. + """ + lineno = None + docstring = getattr(obj, '__doc__', None) + + # Find the line number for modules. + if inspect.ismodule(obj) and docstring is not None: + lineno = 0 + + # Find the line number for classes. + # Note: this could be fooled if a class is defined multiple + # times in a single file. + if inspect.isclass(obj) and docstring is not None: + if source_lines is None: + return None + pat = re.compile(r'^\s*class\s*%s\b' % + getattr(obj, '__name__', '-')) + for i, line in enumerate(source_lines): + if pat.match(line): + lineno = i + break + + # Find the line number for functions & methods. + if inspect.ismethod(obj): obj = obj.__func__ + if inspect.isfunction(obj) and getattr(obj, '__doc__', None): + # We don't use `docstring` var here, because `obj` can be changed. + obj = obj.__code__ + if inspect.istraceback(obj): obj = obj.tb_frame + if inspect.isframe(obj): obj = obj.f_code + if inspect.iscode(obj): + lineno = getattr(obj, 'co_firstlineno', None)-1 + + # Find the line number where the docstring starts. Assume + # that it's the first line that begins with a quote mark. + # Note: this could be fooled by a multiline function + # signature, where a continuation line begins with a quote + # mark. + if lineno is not None: + if source_lines is None: + return lineno+1 + pat = re.compile(r'(^|.*:)\s*\w*("|\')') + for lineno in range(lineno, len(source_lines)): + if pat.match(source_lines[lineno]): + return lineno + + # We couldn't find the line number. + return None + +###################################################################### +## 5. DocTest Runner +###################################################################### + +class DocTestRunner: + """ + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + >>> for test in tests: + ... print(test.name, '->', runner.run(test)) + _TestClass -> TestResults(failed=0, attempted=2) + _TestClass.__init__ -> TestResults(failed=0, attempted=2) + _TestClass.get -> TestResults(failed=0, attempted=2) + _TestClass.square -> TestResults(failed=0, attempted=1) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + TestResults(failed=0, attempted=7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + """ + # This divider string is used to separate failure messages, and to + # separate sections of the summary. + DIVIDER = "*" * 70 + + def __init__(self, checker=None, verbose=None, optionflags=0): + """ + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + """ + self._checker = checker or OutputChecker() + if verbose is None: + verbose = '-v' in sys.argv + self._verbose = verbose + self.optionflags = optionflags + self.original_optionflags = optionflags + + # Keep track of the examples we've run. + self.tries = 0 + self.failures = 0 + self._name2ft = {} + + # Create a fake output target for capturing doctest output. + self._fakeout = _SpoofOut() + + #///////////////////////////////////////////////////////////////// + # Reporting methods + #///////////////////////////////////////////////////////////////// + + def report_start(self, out, test, example): + """ + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + """ + if self._verbose: + if example.want: + out('Trying:\n' + _indent(example.source) + + 'Expecting:\n' + _indent(example.want)) + else: + out('Trying:\n' + _indent(example.source) + + 'Expecting nothing\n') + + def report_success(self, out, test, example, got): + """ + Report that the given example ran successfully. (Only + displays a message if verbose=True) + """ + if self._verbose: + out("ok\n") + + def report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + out(self._failure_header(test, example) + + self._checker.output_difference(example, got, self.optionflags)) + + def report_unexpected_exception(self, out, test, example, exc_info): + """ + Report that the given example raised an unexpected exception. + """ + out(self._failure_header(test, example) + + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) + + def _failure_header(self, test, example): + out = [self.DIVIDER] + if test.filename: + if test.lineno is not None and example.lineno is not None: + lineno = test.lineno + example.lineno + 1 + else: + lineno = '?' + out.append('File "%s", line %s, in %s' % + (test.filename, lineno, test.name)) + else: + out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append('Failed example:') + source = example.source + out.append(_indent(source)) + return '\n'.join(out) + + #///////////////////////////////////////////////////////////////// + # DocTest Running + #///////////////////////////////////////////////////////////////// + + def __run(self, test, compileflags, out): + """ + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + """ + # Keep track of the number of failures and tries. + failures = tries = 0 + + # Save the option flags (since option directives can be used + # to modify them). + original_optionflags = self.optionflags + + SUCCESS, FAILURE, BOOM = range(3) # `outcome` state + + check = self._checker.check_output + + # Process each example. + for examplenum, example in enumerate(test.examples): + + # If REPORT_ONLY_FIRST_FAILURE is set, then suppress + # reporting after the first failure. + quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and + failures > 0) + + # Merge in the example's options. + self.optionflags = original_optionflags + if example.options: + for (optionflag, val) in example.options.items(): + if val: + self.optionflags |= optionflag + else: + self.optionflags &= ~optionflag + + # If 'SKIP' is set, then skip this example. + if self.optionflags & SKIP: + continue + + # Record that we started this example. + tries += 1 + if not quiet: + self.report_start(out, test, example) + + # Use a special filename for compile(), so we can retrieve + # the source code during interactive debugging (see + # __patched_linecache_getlines). + filename = '' % (test.name, examplenum) + + # Run the example in the given context (globs), and record + # any exception that gets raised. (But don't intercept + # keyboard interrupts.) + try: + # Don't blink! This is where the user's code gets run. + exec(compile(example.source, filename, "single", + compileflags, True), test.globs) + self.debugger.set_continue() # ==== Example Finished ==== + exception = None + except KeyboardInterrupt: + raise + except: + exception = sys.exc_info() + self.debugger.set_continue() # ==== Example Finished ==== + + got = self._fakeout.getvalue() # the actual output + self._fakeout.truncate(0) + outcome = FAILURE # guilty until proved innocent or insane + + # If the example executed without raising any exceptions, + # verify its output. + if exception is None: + if check(example.want, got, self.optionflags): + outcome = SUCCESS + + # The example raised an exception: check if it was expected. + else: + exc_msg = traceback.format_exception_only(*exception[:2])[-1] + if not quiet: + got += _exception_traceback(exception) + + # If `example.exc_msg` is None, then we weren't expecting + # an exception. + if example.exc_msg is None: + outcome = BOOM + + # We expected an exception: see whether it matches. + elif check(example.exc_msg, exc_msg, self.optionflags): + outcome = SUCCESS + + # Another chance if they didn't care about the detail. + elif self.optionflags & IGNORE_EXCEPTION_DETAIL: + if check(_strip_exception_details(example.exc_msg), + _strip_exception_details(exc_msg), + self.optionflags): + outcome = SUCCESS + + # Report the outcome. + if outcome is SUCCESS: + if not quiet: + self.report_success(out, test, example, got) + elif outcome is FAILURE: + if not quiet: + self.report_failure(out, test, example, got) + failures += 1 + elif outcome is BOOM: + if not quiet: + self.report_unexpected_exception(out, test, example, + exception) + failures += 1 + else: + assert False, ("unknown outcome", outcome) + + if failures and self.optionflags & FAIL_FAST: + break + + # Restore the option flags (in case they were modified) + self.optionflags = original_optionflags + + # Record and return the number of failures and tries. + self.__record_outcome(test, failures, tries) + return TestResults(failures, tries) + + def __record_outcome(self, test, f, t): + """ + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + """ + f2, t2 = self._name2ft.get(test.name, (0,0)) + self._name2ft[test.name] = (f+f2, t+t2) + self.failures += f + self.tries += t + + __LINECACHE_FILENAME_RE = re.compile(r'.+)' + r'\[(?P\d+)\]>$') + def __patched_linecache_getlines(self, filename, module_globals=None): + m = self.__LINECACHE_FILENAME_RE.match(filename) + if m and m.group('name') == self.test.name: + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(keepends=True) + else: + return self.save_linecache_getlines(filename, module_globals) + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + """ + self.test = test + + if compileflags is None: + compileflags = _extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + encoding = save_stdout.encoding + if encoding is None or encoding.lower() == 'utf-8': + out = save_stdout.write + else: + # Use backslashreplace error handling on write + def out(s): + s = str(s.encode(encoding, 'backslashreplace'), encoding) + save_stdout.write(s) + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_trace = sys.gettrace() + save_set_trace = pdb.set_trace + self.debugger = _OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + # Make sure sys.displayhook just prints the value to stdout + save_displayhook = sys.displayhook + sys.displayhook = sys.__displayhook__ + + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + sys.settrace(save_trace) + linecache.getlines = self.save_linecache_getlines + sys.displayhook = save_displayhook + if clear_globs: + test.globs.clear() + import builtins + builtins._ = None + + #///////////////////////////////////////////////////////////////// + # Summarization + #///////////////////////////////////////////////////////////////// + def summarize(self, verbose=None): + """ + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + """ + if verbose is None: + verbose = self._verbose + notests = [] + passed = [] + failed = [] + totalt = totalf = 0 + for x in self._name2ft.items(): + name, (f, t) = x + assert f <= t + totalt += t + totalf += f + if t == 0: + notests.append(name) + elif f == 0: + passed.append( (name, t) ) + else: + failed.append(x) + if verbose: + if notests: + print(len(notests), "items had no tests:") + notests.sort() + for thing in notests: + print(" ", thing) + if passed: + print(len(passed), "items passed all tests:") + passed.sort() + for thing, count in passed: + print(" %3d tests in %s" % (count, thing)) + if failed: + print(self.DIVIDER) + print(len(failed), "items had failures:") + failed.sort() + for thing, (f, t) in failed: + print(" %3d of %3d in %s" % (f, t, thing)) + if verbose: + print(totalt, "tests in", len(self._name2ft), "items.") + print(totalt - totalf, "passed and", totalf, "failed.") + if totalf: + print("***Test Failed***", totalf, "failures.") + elif verbose: + print("Test passed.") + return TestResults(totalf, totalt) + + #///////////////////////////////////////////////////////////////// + # Backward compatibility cruft to maintain doctest.master. + #///////////////////////////////////////////////////////////////// + def merge(self, other): + d = self._name2ft + for name, (f, t) in other._name2ft.items(): + if name in d: + # Don't print here by default, since doing + # so breaks some of the buildbots + #print("*** DocTestRunner.merge: '" + name + "' in both" \ + # " testers; summing outcomes.") + f2, t2 = d[name] + f = f + f2 + t = t + t2 + d[name] = f, t + +class OutputChecker: + """ + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + """ + def _toAscii(self, s): + """ + Convert string to hex-escaped ASCII string. + """ + return str(s.encode('ASCII', 'backslashreplace'), "ASCII") + + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + + # If `want` contains hex-escaped character such as "\u1234", + # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). + # On the other hand, `got` could be another sequence of + # characters such as [\u1234], so `want` and `got` should + # be folded to hex-escaped ASCII string to compare. + got = self._toAscii(got) + want = self._toAscii(want) + + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # The values True and False replaced 1 and 0 as the return + # value for boolean comparisons in Python 2.3. + if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): + if (got,want) == ("True\n", "1\n"): + return True + if (got,want) == ("False\n", "0\n"): + return True + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub(r'(?m)^[^\S\n]+$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & ELLIPSIS: + if _ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + # Should we do a fancy diff? + def _do_a_fancy_diff(self, want, got, optionflags): + # Not unless they asked for a fancy diff. + if not optionflags & (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF): + return False + + # If expected output uses ellipsis, a meaningful fancy diff is + # too hard ... or maybe not. In two real-life failures Tim saw, + # a diff was a major help anyway, so this is commented out. + # [todo] _ellipsis_match() knows which pieces do and don't match, + # and could be the basis for a kick-ass diff in this case. + ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: + ## return False + + # ndiff does intraline difference marking, so can be useful even + # for 1-line differences. + if optionflags & REPORT_NDIFF: + return True + + # The other diff types need at least a few lines to be helpful. + return want.count('\n') > 2 and got.count('\n') > 2 + + def output_difference(self, example, got, optionflags): + """ + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + """ + want = example.want + # If s are being used, then replace blank lines + # with in the actual output string. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) + + # Check if we should use diff. + if self._do_a_fancy_diff(want, got, optionflags): + # Split want & got into lines. + want_lines = want.splitlines(keepends=True) + got_lines = got.splitlines(keepends=True) + # Use difflib to find their differences. + if optionflags & REPORT_UDIFF: + diff = difflib.unified_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'unified diff with -expected +actual' + elif optionflags & REPORT_CDIFF: + diff = difflib.context_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'context diff with expected followed by actual' + elif optionflags & REPORT_NDIFF: + engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) + diff = list(engine.compare(want_lines, got_lines)) + kind = 'ndiff with -expected +actual' + else: + assert 0, 'Bad diff option' + return 'Differences (%s):\n' % kind + _indent(''.join(diff)) + + # If we're not using diff, then simply list the expected + # output followed by the actual output. + if want and got: + return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) + elif want: + return 'Expected:\n%sGot nothing\n' % _indent(want) + elif got: + return 'Expected nothing\nGot:\n%s' % _indent(got) + else: + return 'Expected nothing\nGot nothing\n' + +class DocTestFailure(Exception): + """A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - got: the actual output + """ + def __init__(self, test, example, got): + self.test = test + self.example = example + self.got = got + + def __str__(self): + return str(self.test) + +class UnexpectedException(Exception): + """A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - exc_info: the exception info + """ + def __init__(self, test, example, exc_info): + self.test = test + self.example = example + self.exc_info = exc_info + + def __str__(self): + return str(self.test) + +class DebugRunner(DocTestRunner): + r"""Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException as f: + ... failure = f + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + doctest.UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + TestResults(failed=0, attempted=1) + + >>> test.globs + {} + + """ + + def run(self, test, compileflags=None, out=None, clear_globs=True): + r = DocTestRunner.run(self, test, compileflags, out, False) + if clear_globs: + test.globs.clear() + return r + + def report_unexpected_exception(self, out, test, example, exc_info): + raise UnexpectedException(test, example, exc_info) + + def report_failure(self, out, test, example, got): + raise DocTestFailure(test, example, got) + +###################################################################### +## 6. Test Functions +###################################################################### +# These should be backwards compatible. + +# For backward compatibility, a global instance of a DocTestRunner +# class, updated by testmod. +master = None + +def testmod(m=None, name=None, globs=None, verbose=None, + report=True, optionflags=0, extraglobs=None, + raise_on_error=False, exclude_empty=False): + """m=None, name=None, globs=None, verbose=None, report=True, + optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See help(doctest) for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + # If no module was given, then use __main__. + if m is None: + # DWA - m will still be None if this wasn't invoked from the command + # line, in which case the following TypeError is about as good an error + # as we should expect + m = sys.modules.get('__main__') + + # Check that we were actually given a module. + if not inspect.ismodule(m): + raise TypeError("testmod: module required; %r" % (m,)) + + # If no name was given, then use the module's name. + if name is None: + name = m.__name__ + + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(exclude_empty=exclude_empty) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return TestResults(runner.failures, runner.tries) + +def testfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, parser=DocTestParser(), + encoding=None): + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg "encoding" specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + text, filename = _load_testfile(filename, package, module_relative, + encoding or "utf-8") + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + if '__name__' not in globs: + globs['__name__'] = '__main__' + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + # Read the file, convert it to a test, and run it. + test = parser.get_doctest(text, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return TestResults(runner.failures, runner.tries) + +def run_docstring_examples(f, globs, verbose=False, name="NoName", + compileflags=None, optionflags=0): + """ + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + """ + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(verbose=verbose, recurse=False) + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + for test in finder.find(f, name, globs=globs): + runner.run(test, compileflags=compileflags) + +###################################################################### +## 7. Unittest Support +###################################################################### + +_unittest_reportflags = 0 + +def set_unittest_reportflags(flags): + """Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + """ + global _unittest_reportflags + + if (flags & REPORTING_FLAGS) != flags: + raise ValueError("Only reporting flags allowed", flags) + old = _unittest_reportflags + _unittest_reportflags = flags + return old + + +class DocTestCase(unittest.TestCase): + + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None): + + unittest.TestCase.__init__(self) + self._dt_optionflags = optionflags + self._dt_checker = checker + self._dt_globs = test.globs.copy() + self._dt_test = test + self._dt_setUp = setUp + self._dt_tearDown = tearDown + + def setUp(self): + test = self._dt_test + + if self._dt_setUp is not None: + self._dt_setUp(test) + + def tearDown(self): + test = self._dt_test + + if self._dt_tearDown is not None: + self._dt_tearDown(test) + + # restore the original globs + test.globs.clear() + test.globs.update(self._dt_globs) + + def runTest(self): + test = self._dt_test + old = sys.stdout + new = StringIO() + optionflags = self._dt_optionflags + + if not (optionflags & REPORTING_FLAGS): + # The option flags don't include any reporting flags, + # so add the default reporting flags + optionflags |= _unittest_reportflags + + runner = DocTestRunner(optionflags=optionflags, + checker=self._dt_checker, verbose=False) + + try: + runner.DIVIDER = "-"*70 + failures, tries = runner.run( + test, out=new.write, clear_globs=False) + finally: + sys.stdout = old + + if failures: + raise self.failureException(self.format_failure(new.getvalue())) + + def format_failure(self, err): + test = self._dt_test + if test.lineno is None: + lineno = 'unknown line number' + else: + lineno = '%s' % test.lineno + lname = '.'.join(test.name.split('.')[-1:]) + return ('Failed doctest test for %s\n' + ' File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def debug(self): + r"""Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexpected + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException as f: + ... failure = f + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + """ + + self.setUp() + runner = DebugRunner(optionflags=self._dt_optionflags, + checker=self._dt_checker, verbose=False) + runner.run(self._dt_test, clear_globs=False) + self.tearDown() + + def id(self): + return self._dt_test.name + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self._dt_test == other._dt_test and \ + self._dt_optionflags == other._dt_optionflags and \ + self._dt_setUp == other._dt_setUp and \ + self._dt_tearDown == other._dt_tearDown and \ + self._dt_checker == other._dt_checker + + def __hash__(self): + return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown, + self._dt_checker)) + + def __repr__(self): + name = self._dt_test.name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + + __str__ = object.__str__ + + def shortDescription(self): + return "Doctest: " + self._dt_test.name + +class SkipDocTestCase(DocTestCase): + def __init__(self, module): + self.module = module + DocTestCase.__init__(self, None) + + def setUp(self): + self.skipTest("DocTestSuite will not work with -O2 and above") + + def test_skip(self): + pass + + def shortDescription(self): + return "Skipping tests from %s" % self.module.__name__ + + __str__ = shortDescription + + +class _DocTestSuite(unittest.TestSuite): + + def _removeTestAtIndex(self, index): + pass + + +def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, + **options): + """ + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + """ + + if test_finder is None: + test_finder = DocTestFinder() + + module = _normalize_module(module) + tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) + + if not tests and sys.flags.optimize >=2: + # Skip doctests when running with -O2 + suite = _DocTestSuite() + suite.addTest(SkipDocTestCase(module)) + return suite + + tests.sort() + suite = _DocTestSuite() + + for test in tests: + if len(test.examples) == 0: + continue + if not test.filename: + filename = module.__file__ + if filename[-4:] == ".pyc": + filename = filename[:-1] + test.filename = filename + suite.addTest(DocTestCase(test, **options)) + + return suite + +class DocFileCase(DocTestCase): + + def id(self): + return '_'.join(self._dt_test.name.split('.')) + + def __repr__(self): + return self._dt_test.filename + + def format_failure(self, err): + return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' + % (self._dt_test.name, self._dt_test.filename, err) + ) + +def DocFileTest(path, module_relative=True, package=None, + globs=None, parser=DocTestParser(), + encoding=None, **options): + if globs is None: + globs = {} + else: + globs = globs.copy() + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path. + doc, path = _load_testfile(path, package, module_relative, + encoding or "utf-8") + + if "__file__" not in globs: + globs["__file__"] = path + + # Find the file and read it. + name = os.path.basename(path) + + # Convert it to a test, and wrap it in a DocFileCase. + test = parser.get_doctest(doc, globs, name, path, 0) + return DocFileCase(test, **options) + +def DocFileSuite(*paths, **kw): + """A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + + encoding + An encoding that will be used to convert the files to unicode. + """ + suite = _DocTestSuite() + + # We do this here so that _normalize_module is called at the right + # level. If it were called in DocFileTest, then this function + # would be the caller and we might guess the package incorrectly. + if kw.get('module_relative', True): + kw['package'] = _normalize_module(kw.get('package')) + + for path in paths: + suite.addTest(DocFileTest(path, **kw)) + + return suite + +###################################################################### +## 8. Debugging Support +###################################################################### + +def script_from_examples(s): + r"""Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print(script_from_examples(text)) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + + """ + output = [] + for piece in DocTestParser().parse(s): + if isinstance(piece, Example): + # Add the example's source code (strip trailing NL) + output.append(piece.source[:-1]) + # Add the expected output: + want = piece.want + if want: + output.append('# Expected:') + output += ['## '+l for l in want.split('\n')[:-1]] + else: + # Add non-example text. + output += [_comment_line(l) + for l in piece.split('\n')[:-1]] + + # Trim junk on both ends. + while output and output[-1] == '#': + output.pop() + while output and output[0] == '#': + output.pop(0) + # Combine the output, and return it. + # Add a courtesy newline to prevent exec from choking (see bug #1172785) + return '\n'.join(output) + '\n' + +def testsource(module, name): + """Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + """ + module = _normalize_module(module) + tests = DocTestFinder().find(module) + test = [t for t in tests if t.name == name] + if not test: + raise ValueError(name, "not found in tests") + test = test[0] + testsrc = script_from_examples(test.docstring) + return testsrc + +def debug_src(src, pm=False, globs=None): + """Debug a single doctest docstring, in argument `src`'""" + testsrc = script_from_examples(src) + debug_script(testsrc, pm, globs) + +def debug_script(src, pm=False, globs=None): + "Debug a test script. `src` is the script, as a string." + import pdb + + if globs: + globs = globs.copy() + else: + globs = {} + + if pm: + try: + exec(src, globs, globs) + except: + print(sys.exc_info()[1]) + p = pdb.Pdb(nosigint=True) + p.reset() + p.interaction(None, sys.exc_info()[2]) + else: + pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) + +def debug(module, name, pm=False): + """Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + """ + module = _normalize_module(module) + testsrc = testsource(module, name) + debug_script(testsrc, pm, module.__dict__) + +###################################################################### +## 9. Example Usage +###################################################################### +class _TestClass: + """ + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + """ + + def __init__(self, val): + """val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print(t.get()) + 123 + """ + + self.val = val + + def square(self): + """square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + """ + + self.val = self.val ** 2 + return self + + def get(self): + """get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print(x.get()) + -42 + """ + + return self.val + +__test__ = {"_TestClass": _TestClass, + "string": r""" + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + """, + + "bool-int equivalence": r""" + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + """, + + "blank lines": r""" + Blank lines can be marked with : + >>> print('foo\n\nbar\n') + foo + + bar + + """, + + "ellipsis": r""" + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print(list(range(1000))) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + """, + + "whitespace normalization": r""" + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + """, + } + + +def _test(): + import argparse + + parser = argparse.ArgumentParser(description="doctest runner") + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='print very verbose output for all tests') + parser.add_argument('-o', '--option', action='append', + choices=OPTIONFLAGS_BY_NAME.keys(), default=[], + help=('specify a doctest option flag to apply' + ' to the test run; may be specified more' + ' than once to apply multiple options')) + parser.add_argument('-f', '--fail-fast', action='store_true', + help=('stop running tests after first failure (this' + ' is a shorthand for -o FAIL_FAST, and is' + ' in addition to any other -o options)')) + parser.add_argument('file', nargs='+', + help='file containing the tests to run') + args = parser.parse_args() + testfiles = args.file + # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, + # but since we are using argparse we are passing it manually now. + verbose = args.verbose + options = 0 + for option in args.option: + options |= OPTIONFLAGS_BY_NAME[option] + if args.fail_fast: + options |= FAIL_FAST + for filename in testfiles: + if filename.endswith(".py"): + # It is a module -- insert its dir into sys.path and try to + # import it. If it is part of a package, that possibly + # won't work because of package imports. + dirname, filename = os.path.split(filename) + sys.path.insert(0, dirname) + m = __import__(filename[:-3]) + del sys.path[0] + failures, _ = testmod(m, verbose=verbose, optionflags=options) + else: + failures, _ = testfile(filename, module_relative=False, + verbose=verbose, optionflags=options) + if failures: + return 1 + return 0 + + +if __name__ == "__main__": + sys.exit(_test()) diff --git a/evalkit_cambrian/lib/python3.10/enum.py b/evalkit_cambrian/lib/python3.10/enum.py new file mode 100644 index 0000000000000000000000000000000000000000..f5657a6eba29c19e139bff5089e01d5f1eb362d5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/enum.py @@ -0,0 +1,1053 @@ +import sys +from types import MappingProxyType, DynamicClassAttribute + + +__all__ = [ + 'EnumMeta', + 'Enum', 'IntEnum', 'Flag', 'IntFlag', + 'auto', 'unique', + ] + + +def _is_descriptor(obj): + """ + Returns True if obj is a descriptor, False otherwise. + """ + return ( + hasattr(obj, '__get__') or + hasattr(obj, '__set__') or + hasattr(obj, '__delete__') + ) + +def _is_dunder(name): + """ + Returns True if a __dunder__ name, False otherwise. + """ + return ( + len(name) > 4 and + name[:2] == name[-2:] == '__' and + name[2] != '_' and + name[-3] != '_' + ) + +def _is_sunder(name): + """ + Returns True if a _sunder_ name, False otherwise. + """ + return ( + len(name) > 2 and + name[0] == name[-1] == '_' and + name[1:2] != '_' and + name[-2:-1] != '_' + ) + +def _is_private(cls_name, name): + # do not use `re` as `re` imports `enum` + pattern = '_%s__' % (cls_name, ) + pat_len = len(pattern) + if ( + len(name) > pat_len + and name.startswith(pattern) + and name[pat_len:pat_len+1] != ['_'] + and (name[-1] != '_' or name[-2] != '_') + ): + return True + else: + return False + +def _make_class_unpicklable(cls): + """ + Make the given class un-picklable. + """ + def _break_on_call_reduce(self, proto): + raise TypeError('%r cannot be pickled' % self) + cls.__reduce_ex__ = _break_on_call_reduce + cls.__module__ = '' + +_auto_null = object() +class auto: + """ + Instances are replaced with an appropriate value in Enum class suites. + """ + value = _auto_null + + +class _EnumDict(dict): + """ + Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + """ + def __init__(self): + super().__init__() + self._member_names = [] + self._last_values = [] + self._ignore = [] + self._auto_called = False + + def __setitem__(self, key, value): + """ + Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + """ + if _is_private(self._cls_name, key): + import warnings + warnings.warn( + "private variables, such as %r, will be normal attributes in 3.11" + % (key, ), + DeprecationWarning, + stacklevel=2, + ) + if _is_sunder(key): + if key not in ( + '_order_', '_create_pseudo_member_', + '_generate_next_value_', '_missing_', '_ignore_', + ): + raise ValueError('_names_ are reserved for future Enum use') + if key == '_generate_next_value_': + # check if members already defined as auto() + if self._auto_called: + raise TypeError("_generate_next_value_ must be defined before members") + setattr(self, '_generate_next_value', value) + elif key == '_ignore_': + if isinstance(value, str): + value = value.replace(',',' ').split() + else: + value = list(value) + self._ignore = value + already = set(value) & set(self._member_names) + if already: + raise ValueError( + '_ignore_ cannot specify already set names: %r' + % (already, ) + ) + elif _is_dunder(key): + if key == '__order__': + key = '_order_' + elif key in self._member_names: + # descriptor overwriting an enum? + raise TypeError('Attempted to reuse key: %r' % key) + elif key in self._ignore: + pass + elif not _is_descriptor(value): + if key in self: + # enum overwriting a descriptor? + raise TypeError('%r already defined as: %r' % (key, self[key])) + if isinstance(value, auto): + if value.value == _auto_null: + value.value = self._generate_next_value( + key, + 1, + len(self._member_names), + self._last_values[:], + ) + self._auto_called = True + value = value.value + self._member_names.append(key) + self._last_values.append(value) + super().__setitem__(key, value) + + +# Dummy value for Enum as EnumMeta explicitly checks for it, but of course +# until EnumMeta finishes running the first time the Enum class doesn't exist. +# This is also why there are checks in EnumMeta like `if Enum is not None` +Enum = None + +class EnumMeta(type): + """ + Metaclass for Enum + """ + @classmethod + def __prepare__(metacls, cls, bases, **kwds): + # check that previous enum members do not exist + metacls._check_for_existing_members(cls, bases) + # create the namespace dict + enum_dict = _EnumDict() + enum_dict._cls_name = cls + # inherit previous flags and _generate_next_value_ function + member_type, first_enum = metacls._get_mixins_(cls, bases) + if first_enum is not None: + enum_dict['_generate_next_value_'] = getattr( + first_enum, '_generate_next_value_', None, + ) + return enum_dict + + def __new__(metacls, cls, bases, classdict, **kwds): + # an Enum class is final once enumeration items have been defined; it + # cannot be mixed with other types (int, float, etc.) if it has an + # inherited __new__ unless a new __new__ is defined (or the resulting + # class will fail). + # + # remove any keys listed in _ignore_ + classdict.setdefault('_ignore_', []).append('_ignore_') + ignore = classdict['_ignore_'] + for key in ignore: + classdict.pop(key, None) + member_type, first_enum = metacls._get_mixins_(cls, bases) + __new__, save_new, use_args = metacls._find_new_( + classdict, member_type, first_enum, + ) + + # save enum items into separate mapping so they don't get baked into + # the new class + enum_members = {k: classdict[k] for k in classdict._member_names} + for name in classdict._member_names: + del classdict[name] + + # adjust the sunders + _order_ = classdict.pop('_order_', None) + + # check for illegal enum names (any others?) + invalid_names = set(enum_members) & {'mro', ''} + if invalid_names: + raise ValueError('Invalid enum member name: {0}'.format( + ','.join(invalid_names))) + + # create a default docstring if one has not been provided + if '__doc__' not in classdict: + classdict['__doc__'] = 'An enumeration.' + + enum_class = super().__new__(metacls, cls, bases, classdict, **kwds) + enum_class._member_names_ = [] # names in definition order + enum_class._member_map_ = {} # name->value map + enum_class._member_type_ = member_type + + # save DynamicClassAttribute attributes from super classes so we know + # if we can take the shortcut of storing members in the class dict + dynamic_attributes = { + k for c in enum_class.mro() + for k, v in c.__dict__.items() + if isinstance(v, DynamicClassAttribute) + } + + # Reverse value->name map for hashable values. + enum_class._value2member_map_ = {} + + # If a custom type is mixed into the Enum, and it does not know how + # to pickle itself, pickle.dumps will succeed but pickle.loads will + # fail. Rather than have the error show up later and possibly far + # from the source, sabotage the pickle protocol for this class so + # that pickle.dumps also fails. + # + # However, if the new class implements its own __reduce_ex__, do not + # sabotage -- it's on them to make sure it works correctly. We use + # __reduce_ex__ instead of any of the others as it is preferred by + # pickle over __reduce__, and it handles all pickle protocols. + if '__reduce_ex__' not in classdict: + if member_type is not object: + methods = ('__getnewargs_ex__', '__getnewargs__', + '__reduce_ex__', '__reduce__') + if not any(m in member_type.__dict__ for m in methods): + if '__new__' in classdict: + # too late, sabotage + _make_class_unpicklable(enum_class) + else: + # final attempt to verify that pickling would work: + # travel mro until __new__ is found, checking for + # __reduce__ and friends along the way -- if any of them + # are found before/when __new__ is found, pickling should + # work + sabotage = None + for chain in bases: + for base in chain.__mro__: + if base is object: + continue + elif any(m in base.__dict__ for m in methods): + # found one, we're good + sabotage = False + break + elif '__new__' in base.__dict__: + # not good + sabotage = True + break + if sabotage is not None: + break + if sabotage: + _make_class_unpicklable(enum_class) + # instantiate them, checking for duplicates as we go + # we instantiate first instead of checking for duplicates first in case + # a custom __new__ is doing something funky with the values -- such as + # auto-numbering ;) + for member_name in classdict._member_names: + value = enum_members[member_name] + if not isinstance(value, tuple): + args = (value, ) + else: + args = value + if member_type is tuple: # special case for tuple enums + args = (args, ) # wrap it one more time + if not use_args: + enum_member = __new__(enum_class) + if not hasattr(enum_member, '_value_'): + enum_member._value_ = value + else: + enum_member = __new__(enum_class, *args) + if not hasattr(enum_member, '_value_'): + if member_type is object: + enum_member._value_ = value + else: + enum_member._value_ = member_type(*args) + value = enum_member._value_ + enum_member._name_ = member_name + enum_member.__objclass__ = enum_class + enum_member.__init__(*args) + # If another member with the same value was already defined, the + # new member becomes an alias to the existing one. + for name, canonical_member in enum_class._member_map_.items(): + if canonical_member._value_ == enum_member._value_: + enum_member = canonical_member + break + else: + # Aliases don't appear in member names (only in __members__). + enum_class._member_names_.append(member_name) + # performance boost for any member that would not shadow + # a DynamicClassAttribute + if member_name not in dynamic_attributes: + setattr(enum_class, member_name, enum_member) + # now add to _member_map_ + enum_class._member_map_[member_name] = enum_member + try: + # This may fail if value is not hashable. We can't add the value + # to the map, and by-value lookups for this value will be + # linear. + enum_class._value2member_map_[value] = enum_member + except TypeError: + pass + + # double check that repr and friends are not the mixin's or various + # things break (such as pickle) + # however, if the method is defined in the Enum itself, don't replace + # it + for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): + if name in classdict: + continue + class_method = getattr(enum_class, name) + obj_method = getattr(member_type, name, None) + enum_method = getattr(first_enum, name, None) + if obj_method is not None and obj_method is class_method: + setattr(enum_class, name, enum_method) + + # replace any other __new__ with our own (as long as Enum is not None, + # anyway) -- again, this is to support pickle + if Enum is not None: + # if the user defined their own __new__, save it before it gets + # clobbered in case they subclass later + if save_new: + enum_class.__new_member__ = __new__ + enum_class.__new__ = Enum.__new__ + + # py3 support for definition order (helps keep py2/py3 code in sync) + if _order_ is not None: + if isinstance(_order_, str): + _order_ = _order_.replace(',', ' ').split() + if _order_ != enum_class._member_names_: + raise TypeError('member order does not match _order_') + + return enum_class + + def __bool__(self): + """ + classes/types should always be True. + """ + return True + + def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): + """ + Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + """ + if names is None: # simple value lookup + return cls.__new__(cls, value) + # otherwise, functional API: we're creating a new Enum type + return cls._create_( + value, + names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + def __contains__(cls, obj): + if not isinstance(obj, Enum): + import warnings + warnings.warn( + "in 3.12 __contains__ will no longer raise TypeError, but will return True if\n" + "obj is a member or a member's value", + DeprecationWarning, + stacklevel=2, + ) + raise TypeError( + "unsupported operand type(s) for 'in': '%s' and '%s'" % ( + type(obj).__qualname__, cls.__class__.__qualname__)) + return isinstance(obj, cls) and obj._name_ in cls._member_map_ + + def __delattr__(cls, attr): + # nicer error message when someone tries to delete an attribute + # (see issue19025). + if attr in cls._member_map_: + raise AttributeError("%s: cannot delete Enum member." % cls.__name__) + super().__delattr__(attr) + + def __dir__(self): + return ( + ['__class__', '__doc__', '__members__', '__module__'] + + self._member_names_ + ) + + def __getattr__(cls, name): + """ + Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + if _is_dunder(name): + raise AttributeError(name) + try: + return cls._member_map_[name] + except KeyError: + raise AttributeError(name) from None + + def __getitem__(cls, name): + return cls._member_map_[name] + + def __iter__(cls): + """ + Returns members in definition order. + """ + return (cls._member_map_[name] for name in cls._member_names_) + + def __len__(cls): + return len(cls._member_names_) + + @property + def __members__(cls): + """ + Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + """ + return MappingProxyType(cls._member_map_) + + def __repr__(cls): + return "" % cls.__name__ + + def __reversed__(cls): + """ + Returns members in reverse definition order. + """ + return (cls._member_map_[name] for name in reversed(cls._member_names_)) + + def __setattr__(cls, name, value): + """ + Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + """ + member_map = cls.__dict__.get('_member_map_', {}) + if name in member_map: + raise AttributeError('Cannot reassign members.') + super().__setattr__(name, value) + + def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1): + """ + Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + """ + metacls = cls.__class__ + bases = (cls, ) if type is None else (type, cls) + _, first_enum = cls._get_mixins_(cls, bases) + classdict = metacls.__prepare__(class_name, bases) + + # special processing needed for names? + if isinstance(names, str): + names = names.replace(',', ' ').split() + if isinstance(names, (tuple, list)) and names and isinstance(names[0], str): + original_names, names = names, [] + last_values = [] + for count, name in enumerate(original_names): + value = first_enum._generate_next_value_(name, start, count, last_values[:]) + last_values.append(value) + names.append((name, value)) + + # Here, names is either an iterable of (name, value) or a mapping. + for item in names: + if isinstance(item, str): + member_name, member_value = item, names[item] + else: + member_name, member_value = item + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, class_name, bases, classdict) + + # TODO: replace the frame hack if a blessed way to know the calling + # module is ever developed + if module is None: + try: + module = sys._getframe(2).f_globals['__name__'] + except (AttributeError, ValueError, KeyError): + pass + if module is None: + _make_class_unpicklable(enum_class) + else: + enum_class.__module__ = module + if qualname is not None: + enum_class.__qualname__ = qualname + + return enum_class + + def _convert_(cls, name, module, filter, source=None): + """ + Create a new Enum subclass that replaces a collection of global constants + """ + # convert all constants from source (or module) that pass filter() to + # a new Enum called name, and export the enum and its members back to + # module; + # also, replace the __reduce_ex__ method so unpickling works in + # previous Python versions + module_globals = vars(sys.modules[module]) + if source: + source = vars(source) + else: + source = module_globals + # _value2member_map_ is populated in the same order every time + # for a consistent reverse mapping of number to name when there + # are multiple names for the same number. + members = [ + (name, value) + for name, value in source.items() + if filter(name)] + try: + # sort by value + members.sort(key=lambda t: (t[1], t[0])) + except TypeError: + # unless some values aren't comparable, in which case sort by name + members.sort(key=lambda t: t[0]) + cls = cls(name, members, module=module) + cls.__reduce_ex__ = _reduce_ex_by_name + module_globals.update(cls.__members__) + module_globals[name] = cls + return cls + + @staticmethod + def _check_for_existing_members(class_name, bases): + for chain in bases: + for base in chain.__mro__: + if issubclass(base, Enum) and base._member_names_: + raise TypeError( + "%s: cannot extend enumeration %r" + % (class_name, base.__name__) + ) + + @staticmethod + def _get_mixins_(class_name, bases): + """ + Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + """ + if not bases: + return object, Enum + + def _find_data_type(bases): + data_types = set() + for chain in bases: + candidate = None + for base in chain.__mro__: + if base is object: + continue + elif issubclass(base, Enum): + if base._member_type_ is not object: + data_types.add(base._member_type_) + break + elif '__new__' in base.__dict__: + if issubclass(base, Enum): + continue + data_types.add(candidate or base) + break + else: + candidate = candidate or base + if len(data_types) > 1: + raise TypeError('%r: too many data types: %r' % (class_name, data_types)) + elif data_types: + return data_types.pop() + else: + return None + + # ensure final parent class is an Enum derivative, find any concrete + # data type, and check that Enum has no members + first_enum = bases[-1] + if not issubclass(first_enum, Enum): + raise TypeError("new enumerations should be created as " + "`EnumName([mixin_type, ...] [data_type,] enum_type)`") + member_type = _find_data_type(bases) or object + if first_enum._member_names_: + raise TypeError("Cannot extend enumerations") + return member_type, first_enum + + @staticmethod + def _find_new_(classdict, member_type, first_enum): + """ + Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + """ + # now find the correct __new__, checking to see of one was defined + # by the user; also check earlier enum classes in case a __new__ was + # saved as __new_member__ + __new__ = classdict.get('__new__', None) + + # should __new__ be saved as __new_member__ later? + save_new = __new__ is not None + + if __new__ is None: + # check all possibles for __new_member__ before falling back to + # __new__ + for method in ('__new_member__', '__new__'): + for possible in (member_type, first_enum): + target = getattr(possible, method, None) + if target not in { + None, + None.__new__, + object.__new__, + Enum.__new__, + }: + __new__ = target + break + if __new__ is not None: + break + else: + __new__ = object.__new__ + + # if a non-object.__new__ is used then whatever value/tuple was + # assigned to the enum member name will be passed to __new__ and to the + # new enum member's __init__ + if __new__ is object.__new__: + use_args = False + else: + use_args = True + return __new__, save_new, use_args + + +class Enum(metaclass=EnumMeta): + """ + Generic enumeration. + + Derive from this class to define new enumerations. + """ + def __new__(cls, value): + # all enum instances are actually created during class construction + # without calling this method; this method is called by the metaclass' + # __call__ (i.e. Color(3) ), and by pickle + if type(value) is cls: + # For lookups like Color(Color.RED) + return value + # by-value search for a matching enum member + # see if it's in the reverse mapping (for hashable values) + try: + return cls._value2member_map_[value] + except KeyError: + # Not found, no need to do long O(n) search + pass + except TypeError: + # not there, now do long search -- O(n) behavior + for member in cls._member_map_.values(): + if member._value_ == value: + return member + # still not found -- try _missing_ hook + try: + exc = None + result = cls._missing_(value) + except Exception as e: + exc = e + result = None + try: + if isinstance(result, cls): + return result + else: + ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + if result is None and exc is None: + raise ve_exc + elif exc is None: + exc = TypeError( + 'error in %s._missing_: returned %r instead of None or a valid member' + % (cls.__name__, result) + ) + if not isinstance(exc, ValueError): + exc.__context__ = ve_exc + raise exc + finally: + # ensure all variables that could hold an exception are destroyed + exc = None + ve_exc = None + + def _generate_next_value_(name, start, count, last_values): + """ + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + """ + for last_value in reversed(last_values): + try: + return last_value + 1 + except TypeError: + pass + else: + return start + + @classmethod + def _missing_(cls, value): + return None + + def __repr__(self): + return "<%s.%s: %r>" % ( + self.__class__.__name__, self._name_, self._value_) + + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self._name_) + + def __dir__(self): + """ + Returns all members and all public methods + """ + added_behavior = [ + m + for cls in self.__class__.mro() + for m in cls.__dict__ + if m[0] != '_' and m not in self._member_map_ + ] + [m for m in self.__dict__ if m[0] != '_'] + return (['__class__', '__doc__', '__module__'] + added_behavior) + + def __format__(self, format_spec): + """ + Returns format using actual value type unless __str__ has been overridden. + """ + # mixed-in Enums should use the mixed-in type's __format__, otherwise + # we can get strange results with the Enum name showing up instead of + # the value + + # pure Enum branch, or branch with __str__ explicitly overridden + str_overridden = type(self).__str__ not in (Enum.__str__, Flag.__str__) + if self._member_type_ is object or str_overridden: + cls = str + val = str(self) + # mix-in branch + else: + cls = self._member_type_ + val = self._value_ + return cls.__format__(val, format_spec) + + def __hash__(self): + return hash(self._name_) + + def __reduce_ex__(self, proto): + return self.__class__, (self._value_, ) + + # DynamicClassAttribute is used to provide access to the `name` and + # `value` properties of enum members while keeping some measure of + # protection from modification, while still allowing for an enumeration + # to have members named `name` and `value`. This works because enumeration + # members are not set directly on the enum class -- __getattr__ is + # used to look them up. + + @DynamicClassAttribute + def name(self): + """The name of the Enum member.""" + return self._name_ + + @DynamicClassAttribute + def value(self): + """The value of the Enum member.""" + return self._value_ + + +class IntEnum(int, Enum): + """Enum where members are also (and must be) ints""" + + +def _reduce_ex_by_name(self, proto): + return self.name + +class Flag(Enum): + """ + Support for flags + """ + + def _generate_next_value_(name, start, count, last_values): + """ + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + """ + if not count: + return start if start is not None else 1 + for last_value in reversed(last_values): + try: + high_bit = _high_bit(last_value) + break + except Exception: + raise TypeError('Invalid Flag value: %r' % last_value) from None + return 2 ** (high_bit+1) + + @classmethod + def _missing_(cls, value): + """ + Returns member (possibly creating it) if one can be found for value. + """ + original_value = value + if value < 0: + value = ~value + possible_member = cls._create_pseudo_member_(value) + if original_value < 0: + possible_member = ~possible_member + return possible_member + + @classmethod + def _create_pseudo_member_(cls, value): + """ + Create a composite member iff value contains only members. + """ + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + # verify all bits are accounted for + _, extra_flags = _decompose(cls, value) + if extra_flags: + raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + # construct a singleton enum pseudo-member + pseudo_member = object.__new__(cls) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __contains__(self, other): + """ + Returns True if self has at least the same flags set as other. + """ + if not isinstance(other, self.__class__): + raise TypeError( + "unsupported operand type(s) for 'in': '%s' and '%s'" % ( + type(other).__qualname__, self.__class__.__qualname__)) + return other._value_ & self._value_ == other._value_ + + def __repr__(self): + cls = self.__class__ + if self._name_ is not None: + return '<%s.%s: %r>' % (cls.__name__, self._name_, self._value_) + members, uncovered = _decompose(cls, self._value_) + return '<%s.%s: %r>' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + self._value_, + ) + + def __str__(self): + cls = self.__class__ + if self._name_ is not None: + return '%s.%s' % (cls.__name__, self._name_) + members, uncovered = _decompose(cls, self._value_) + if len(members) == 1 and members[0]._name_ is None: + return '%s.%r' % (cls.__name__, members[0]._value_) + else: + return '%s.%s' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + ) + + def __bool__(self): + return bool(self._value_) + + def __or__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ | other._value_) + + def __and__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ & other._value_) + + def __xor__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ ^ other._value_) + + def __invert__(self): + members, uncovered = _decompose(self.__class__, self._value_) + inverted = self.__class__(0) + for m in self.__class__: + if m not in members and not (m._value_ & self._value_): + inverted = inverted | m + return self.__class__(inverted) + + +class IntFlag(int, Flag): + """ + Support for integer-based Flags + """ + + @classmethod + def _missing_(cls, value): + """ + Returns member (possibly creating it) if one can be found for value. + """ + if not isinstance(value, int): + raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + new_member = cls._create_pseudo_member_(value) + return new_member + + @classmethod + def _create_pseudo_member_(cls, value): + """ + Create a composite member iff value contains only members. + """ + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + need_to_create = [value] + # get unaccounted for bits + _, extra_flags = _decompose(cls, value) + # timer = 10 + while extra_flags: + # timer -= 1 + bit = _high_bit(extra_flags) + flag_value = 2 ** bit + if (flag_value not in cls._value2member_map_ and + flag_value not in need_to_create + ): + need_to_create.append(flag_value) + if extra_flags == -flag_value: + extra_flags = 0 + else: + extra_flags ^= flag_value + for value in reversed(need_to_create): + # construct singleton pseudo-members + pseudo_member = int.__new__(cls, value) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __or__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + result = self.__class__(self._value_ | self.__class__(other)._value_) + return result + + def __and__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ & self.__class__(other)._value_) + + def __xor__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ ^ self.__class__(other)._value_) + + __ror__ = __or__ + __rand__ = __and__ + __rxor__ = __xor__ + + def __invert__(self): + result = self.__class__(~self._value_) + return result + + +def _high_bit(value): + """ + returns index of highest bit, or -1 if value is zero or negative + """ + return value.bit_length() - 1 + +def unique(enumeration): + """ + Class decorator for enumerations ensuring unique member values. + """ + duplicates = [] + for name, member in enumeration.__members__.items(): + if name != member.name: + duplicates.append((name, member.name)) + if duplicates: + alias_details = ', '.join( + ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) + raise ValueError('duplicate values found in %r: %s' % + (enumeration, alias_details)) + return enumeration + +def _decompose(flag, value): + """ + Extract all members from the value. + """ + # _decompose is only called if the value is not named + not_covered = value + negative = value < 0 + members = [] + for member in flag: + member_value = member.value + if member_value and member_value & value == member_value: + members.append(member) + not_covered &= ~member_value + if not negative: + tmp = not_covered + while tmp: + flag_value = 2 ** _high_bit(tmp) + if flag_value in flag._value2member_map_: + members.append(flag._value2member_map_[flag_value]) + not_covered &= ~flag_value + tmp &= ~flag_value + if not members and value in flag._value2member_map_: + members.append(flag._value2member_map_[value]) + members.sort(key=lambda m: m._value_, reverse=True) + if len(members) > 1 and members[0].value == value: + # we have the breakdown, don't need the value member itself + members.pop(0) + return members, not_covered diff --git a/evalkit_cambrian/lib/python3.10/fileinput.py b/evalkit_cambrian/lib/python3.10/fileinput.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd19906dcf5d276b9259d5e1147cde270f372de --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/fileinput.py @@ -0,0 +1,462 @@ +"""Helper class to quickly write a loop over all standard input files. + +Typical use is: + + import fileinput + for line in fileinput.input(encoding="utf-8"): + process(line) + +This iterates over the lines of all files listed in sys.argv[1:], +defaulting to sys.stdin if the list is empty. If a filename is '-' it +is also replaced by sys.stdin and the optional arguments mode and +openhook are ignored. To specify an alternative list of filenames, +pass it as the argument to input(). A single file name is also allowed. + +Functions filename(), lineno() return the filename and cumulative line +number of the line that has just been read; filelineno() returns its +line number in the current file; isfirstline() returns true iff the +line just read is the first line of its file; isstdin() returns true +iff the line was read from sys.stdin. Function nextfile() closes the +current file so that the next iteration will read the first line from +the next file (if any); lines not read from the file will not count +towards the cumulative line count; the filename is not changed until +after the first line of the next file has been read. Function close() +closes the sequence. + +Before any lines have been read, filename() returns None and both line +numbers are zero; nextfile() has no effect. After all lines have been +read, filename() and the line number functions return the values +pertaining to the last line read; nextfile() has no effect. + +All files are opened in text mode by default, you can override this by +setting the mode parameter to input() or FileInput.__init__(). +If an I/O error occurs during opening or reading a file, the OSError +exception is raised. + +If sys.stdin is used more than once, the second and further use will +return no lines, except perhaps for interactive use, or if it has been +explicitly reset (e.g. using sys.stdin.seek(0)). + +Empty files are opened and immediately closed; the only time their +presence in the list of filenames is noticeable at all is when the +last file opened is empty. + +It is possible that the last line of a file doesn't end in a newline +character; otherwise lines are returned including the trailing +newline. + +Class FileInput is the implementation; its methods filename(), +lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close() +correspond to the functions in the module. In addition it has a +readline() method which returns the next input line, and a +__getitem__() method which implements the sequence behavior. The +sequence must be accessed in strictly sequential order; sequence +access and readline() cannot be mixed. + +Optional in-place filtering: if the keyword argument inplace=1 is +passed to input() or to the FileInput constructor, the file is moved +to a backup file and standard output is directed to the input file. +This makes it possible to write a filter that rewrites its input file +in place. If the keyword argument backup="." is also +given, it specifies the extension for the backup file, and the backup +file remains around; by default, the extension is ".bak" and it is +deleted when the output file is closed. In-place filtering is +disabled when standard input is read. XXX The current implementation +does not work for MS-DOS 8+3 filesystems. +""" + +import io +import sys, os +from types import GenericAlias + +__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno", + "fileno", "isfirstline", "isstdin", "FileInput", "hook_compressed", + "hook_encoded"] + +_state = None + +def input(files=None, inplace=False, backup="", *, mode="r", openhook=None, + encoding=None, errors=None): + """Return an instance of the FileInput class, which can be iterated. + + The parameters are passed to the constructor of the FileInput class. + The returned instance, in addition to being an iterator, + keeps global state for the functions of this module,. + """ + global _state + if _state and _state._file: + raise RuntimeError("input() already active") + _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook, + encoding=encoding, errors=errors) + return _state + +def close(): + """Close the sequence.""" + global _state + state = _state + _state = None + if state: + state.close() + +def nextfile(): + """ + Close the current file so that the next iteration will read the first + line from the next file (if any); lines not read from the file will + not count towards the cumulative line count. The filename is not + changed until after the first line of the next file has been read. + Before the first line has been read, this function has no effect; + it cannot be used to skip the first file. After the last line of the + last file has been read, this function has no effect. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.nextfile() + +def filename(): + """ + Return the name of the file currently being read. + Before the first line has been read, returns None. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filename() + +def lineno(): + """ + Return the cumulative line number of the line that has just been read. + Before the first line has been read, returns 0. After the last line + of the last file has been read, returns the line number of that line. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.lineno() + +def filelineno(): + """ + Return the line number in the current file. Before the first line + has been read, returns 0. After the last line of the last file has + been read, returns the line number of that line within the file. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filelineno() + +def fileno(): + """ + Return the file number of the current file. When no file is currently + opened, returns -1. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.fileno() + +def isfirstline(): + """ + Returns true the line just read is the first line of its file, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isfirstline() + +def isstdin(): + """ + Returns true if the last line was read from sys.stdin, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isstdin() + +class FileInput: + """FileInput([files[, inplace[, backup]]], *, mode=None, openhook=None) + + Class FileInput is the implementation of the module; its methods + filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(), + nextfile() and close() correspond to the functions of the same name + in the module. + In addition it has a readline() method which returns the next + input line, and a __getitem__() method which implements the + sequence behavior. The sequence must be accessed in strictly + sequential order; random access and readline() cannot be mixed. + """ + + def __init__(self, files=None, inplace=False, backup="", *, + mode="r", openhook=None, encoding=None, errors=None): + if isinstance(files, str): + files = (files,) + elif isinstance(files, os.PathLike): + files = (os.fspath(files), ) + else: + if files is None: + files = sys.argv[1:] + if not files: + files = ('-',) + else: + files = tuple(files) + self._files = files + self._inplace = inplace + self._backup = backup + self._savestdout = None + self._output = None + self._filename = None + self._startlineno = 0 + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = None + self._encoding = encoding + self._errors = errors + + # We can not use io.text_encoding() here because old openhook doesn't + # take encoding parameter. + if (sys.flags.warn_default_encoding and + "b" not in mode and encoding is None and openhook is None): + import warnings + warnings.warn("'encoding' argument not specified.", + EncodingWarning, 2) + + # restrict mode argument to reading modes + if mode not in ('r', 'rU', 'U', 'rb'): + raise ValueError("FileInput opening mode must be one of " + "'r', 'rU', 'U' and 'rb'") + if 'U' in mode: + import warnings + warnings.warn("'U' mode is deprecated", + DeprecationWarning, 2) + self._mode = mode + self._write_mode = mode.replace('r', 'w') if 'U' not in mode else 'w' + if openhook: + if inplace: + raise ValueError("FileInput cannot use an opening hook in inplace mode") + if not callable(openhook): + raise ValueError("FileInput openhook must be callable") + self._openhook = openhook + + def __del__(self): + self.close() + + def close(self): + try: + self.nextfile() + finally: + self._files = () + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __iter__(self): + return self + + def __next__(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + raise StopIteration + self.nextfile() + # repeat with next file + + def __getitem__(self, i): + import warnings + warnings.warn( + "Support for indexing FileInput objects is deprecated. " + "Use iterator protocol instead.", + DeprecationWarning, + stacklevel=2 + ) + if i != self.lineno(): + raise RuntimeError("accessing lines out of order") + try: + return self.__next__() + except StopIteration: + raise IndexError("end of input reached") + + def nextfile(self): + savestdout = self._savestdout + self._savestdout = None + if savestdout: + sys.stdout = savestdout + + output = self._output + self._output = None + try: + if output: + output.close() + finally: + file = self._file + self._file = None + try: + del self._readline # restore FileInput._readline + except AttributeError: + pass + try: + if file and not self._isstdin: + file.close() + finally: + backupfilename = self._backupfilename + self._backupfilename = None + if backupfilename and not self._backup: + try: os.unlink(backupfilename) + except OSError: pass + + self._isstdin = False + + def readline(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + return line + self.nextfile() + # repeat with next file + + def _readline(self): + if not self._files: + if 'b' in self._mode: + return b'' + else: + return '' + self._filename = self._files[0] + self._files = self._files[1:] + self._startlineno = self.lineno() + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = 0 + + # EncodingWarning is emitted in __init__() already + if "b" not in self._mode: + encoding = self._encoding or "locale" + else: + encoding = None + + if self._filename == '-': + self._filename = '' + if 'b' in self._mode: + self._file = getattr(sys.stdin, 'buffer', sys.stdin) + else: + self._file = sys.stdin + self._isstdin = True + else: + if self._inplace: + self._backupfilename = ( + os.fspath(self._filename) + (self._backup or ".bak")) + try: + os.unlink(self._backupfilename) + except OSError: + pass + # The next few lines may raise OSError + os.rename(self._filename, self._backupfilename) + self._file = open(self._backupfilename, self._mode, + encoding=encoding, errors=self._errors) + try: + perm = os.fstat(self._file.fileno()).st_mode + except OSError: + self._output = open(self._filename, self._write_mode, + encoding=encoding, errors=self._errors) + else: + mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC + if hasattr(os, 'O_BINARY'): + mode |= os.O_BINARY + + fd = os.open(self._filename, mode, perm) + self._output = os.fdopen(fd, self._write_mode, + encoding=encoding, errors=self._errors) + try: + os.chmod(self._filename, perm) + except OSError: + pass + self._savestdout = sys.stdout + sys.stdout = self._output + else: + # This may raise OSError + if self._openhook: + # Custom hooks made previous to Python 3.10 didn't have + # encoding argument + if self._encoding is None: + self._file = self._openhook(self._filename, self._mode) + else: + self._file = self._openhook( + self._filename, self._mode, encoding=self._encoding, errors=self._errors) + else: + self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors) + self._readline = self._file.readline # hide FileInput._readline + return self._readline() + + def filename(self): + return self._filename + + def lineno(self): + return self._startlineno + self._filelineno + + def filelineno(self): + return self._filelineno + + def fileno(self): + if self._file: + try: + return self._file.fileno() + except ValueError: + return -1 + else: + return -1 + + def isfirstline(self): + return self._filelineno == 1 + + def isstdin(self): + return self._isstdin + + __class_getitem__ = classmethod(GenericAlias) + + +def hook_compressed(filename, mode, *, encoding=None, errors=None): + if encoding is None and "b" not in mode: # EncodingWarning is emitted in FileInput() already. + encoding = "locale" + ext = os.path.splitext(filename)[1] + if ext == '.gz': + import gzip + stream = gzip.open(filename, mode) + elif ext == '.bz2': + import bz2 + stream = bz2.BZ2File(filename, mode) + else: + return open(filename, mode, encoding=encoding, errors=errors) + + # gzip and bz2 are binary mode by default. + if "b" not in mode: + stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors) + return stream + + +def hook_encoded(encoding, errors=None): + def openhook(filename, mode): + return open(filename, mode, encoding=encoding, errors=errors) + return openhook + + +def _test(): + import getopt + inplace = False + backup = False + opts, args = getopt.getopt(sys.argv[1:], "ib:") + for o, a in opts: + if o == '-i': inplace = True + if o == '-b': backup = a + for line in input(args, inplace=inplace, backup=backup): + if line[-1:] == '\n': line = line[:-1] + if line[-1:] == '\r': line = line[:-1] + print("%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(), + isfirstline() and "*" or "", line)) + print("%d: %s[%d]" % (lineno(), filename(), filelineno())) + +if __name__ == '__main__': + _test() diff --git a/evalkit_cambrian/lib/python3.10/fnmatch.py b/evalkit_cambrian/lib/python3.10/fnmatch.py new file mode 100644 index 0000000000000000000000000000000000000000..fee59bf73ff264cbe26a9af05a0b247e3397776c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/fnmatch.py @@ -0,0 +1,199 @@ +"""Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +""" +import os +import posixpath +import re +import functools + +__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] + +# Build a thread-safe incrementing counter to help create unique regexp group +# names across calls. +from itertools import count +_nextgroupnum = count().__next__ +del count + +def fnmatch(name, pat): + """Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + """ + name = os.path.normcase(name) + pat = os.path.normcase(pat) + return fnmatchcase(name, pat) + +@functools.lru_cache(maxsize=256, typed=True) +def _compile_pattern(pat): + if isinstance(pat, bytes): + pat_str = str(pat, 'ISO-8859-1') + res_str = translate(pat_str) + res = bytes(res_str, 'ISO-8859-1') + else: + res = translate(pat) + return re.compile(res).match + +def filter(names, pat): + """Construct a list from those elements of the iterable NAMES that match PAT.""" + result = [] + pat = os.path.normcase(pat) + match = _compile_pattern(pat) + if os.path is posixpath: + # normcase on posix is NOP. Optimize it away from the loop. + for name in names: + if match(name): + result.append(name) + else: + for name in names: + if match(os.path.normcase(name)): + result.append(name) + return result + +def fnmatchcase(name, pat): + """Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + """ + match = _compile_pattern(pat) + return match(name) is not None + + +def translate(pat): + """Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + """ + + STAR = object() + res = [] + add = res.append + i, n = 0, len(pat) + while i < n: + c = pat[i] + i = i+1 + if c == '*': + # compress consecutive `*` into one + if (not res) or res[-1] is not STAR: + add(STAR) + elif c == '?': + add('.') + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j+1 + if j < n and pat[j] == ']': + j = j+1 + while j < n and pat[j] != ']': + j = j+1 + if j >= n: + add('\\[') + else: + stuff = pat[i:j] + if '-' not in stuff: + stuff = stuff.replace('\\', r'\\') + else: + chunks = [] + k = i+2 if pat[i] == '!' else i+1 + while True: + k = pat.find('-', k, j) + if k < 0: + break + chunks.append(pat[i:k]) + i = k+1 + k = k+3 + chunk = pat[i:j] + if chunk: + chunks.append(chunk) + else: + chunks[-1] += '-' + # Remove empty ranges -- invalid in RE. + for k in range(len(chunks)-1, 0, -1): + if chunks[k-1][-1] > chunks[k][0]: + chunks[k-1] = chunks[k-1][:-1] + chunks[k][1:] + del chunks[k] + # Escape backslashes and hyphens for set difference (--). + # Hyphens that create ranges shouldn't be escaped. + stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-') + for s in chunks) + # Escape set operations (&&, ~~ and ||). + stuff = re.sub(r'([&~|])', r'\\\1', stuff) + i = j+1 + if not stuff: + # Empty range: never match. + add('(?!)') + elif stuff == '!': + # Negated empty range: match any character. + add('.') + else: + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] in ('^', '['): + stuff = '\\' + stuff + add(f'[{stuff}]') + else: + add(re.escape(c)) + assert i == n + + # Deal with STARs. + inp = res + res = [] + add = res.append + i, n = 0, len(inp) + # Fixed pieces at the start? + while i < n and inp[i] is not STAR: + add(inp[i]) + i += 1 + # Now deal with STAR fixed STAR fixed ... + # For an interior `STAR fixed` pairing, we want to do a minimal + # .*? match followed by `fixed`, with no possibility of backtracking. + # We can't spell that directly, but can trick it into working by matching + # .*?fixed + # in a lookahead assertion, save the matched part in a group, then + # consume that group via a backreference. If the overall match fails, + # the lookahead assertion won't try alternatives. So the translation is: + # (?=(?P.*?fixed))(?P=name) + # Group names are created as needed: g0, g1, g2, ... + # The numbers are obtained from _nextgroupnum() to ensure they're unique + # across calls and across threads. This is because people rely on the + # undocumented ability to join multiple translate() results together via + # "|" to build large regexps matching "one of many" shell patterns. + while i < n: + assert inp[i] is STAR + i += 1 + if i == n: + add(".*") + break + assert inp[i] is not STAR + fixed = [] + while i < n and inp[i] is not STAR: + fixed.append(inp[i]) + i += 1 + fixed = "".join(fixed) + if i == n: + add(".*") + add(fixed) + else: + groupnum = _nextgroupnum() + add(f"(?=(?P.*?{fixed}))(?P=g{groupnum})") + assert i == n + res = "".join(res) + return fr'(?s:{res})\Z' diff --git a/evalkit_cambrian/lib/python3.10/functools.py b/evalkit_cambrian/lib/python3.10/functools.py new file mode 100644 index 0000000000000000000000000000000000000000..305ceb450a71c4a7bb8112a353cc1c94cd8f7c59 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/functools.py @@ -0,0 +1,992 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan , +# Raymond Hettinger , +# and Łukasz Langa . +# Copyright (C) 2006-2013 Python Software Foundation. +# See C source code for _functools credits/copyright + +__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', + 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce', + 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod', + 'cached_property'] + +from abc import get_cache_token +from collections import namedtuple +# import types, weakref # Deferred to single_dispatch() +from reprlib import recursive_repr +from _thread import RLock +from types import GenericAlias + + +################################################################################ +### update_wrapper() and wraps() decorator +################################################################################ + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', + '__annotations__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Issue #17482: set __wrapped__ last so we don't inadvertently copy it + # from the wrapped function when updating __dict__ + wrapper.__wrapped__ = wrapped + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + + +################################################################################ +### total_ordering class decorator +################################################################################ + +# The total ordering functions all invoke the root magic method directly +# rather than using the corresponding operator. This avoids possible +# infinite recursion that could occur when the operator dispatch logic +# detects a NotImplemented result and then calls a reflected method. + +def _gt_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _le_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return op_result or self == other + +def _ge_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a < b).' + op_result = type(self).__lt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _ge_from_le(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _lt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _gt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a <= b).' + op_result = type(self).__le__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _lt_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _ge_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return op_result or self == other + +def _le_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a > b).' + op_result = type(self).__gt__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _le_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _gt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _lt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a >= b).' + op_result = type(self).__ge__(self, other) + if op_result is NotImplemented: + return op_result + return not op_result + +_convert = { + '__lt__': [('__gt__', _gt_from_lt), + ('__le__', _le_from_lt), + ('__ge__', _ge_from_lt)], + '__le__': [('__ge__', _ge_from_le), + ('__lt__', _lt_from_le), + ('__gt__', _gt_from_le)], + '__gt__': [('__lt__', _lt_from_gt), + ('__ge__', _ge_from_gt), + ('__le__', _le_from_gt)], + '__ge__': [('__le__', _le_from_ge), + ('__gt__', _gt_from_ge), + ('__lt__', _lt_from_ge)] +} + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + # Find user-defined comparisons (not those inherited from object). + roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in _convert[root]: + if opname not in roots: + opfunc.__name__ = opname + setattr(cls, opname, opfunc) + return cls + + +################################################################################ +### cmp_to_key() function converter +################################################################################ + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + __hash__ = None + return K + +try: + from _functools import cmp_to_key +except ImportError: + pass + + +################################################################################ +### reduce() sequence to a single item +################################################################################ + +_initial_missing = object() + +def reduce(function, sequence, initial=_initial_missing): + """ + reduce(function, iterable[, initial]) -> value + + Apply a function of two arguments cumulatively to the items of a sequence + or iterable, from left to right, so as to reduce the iterable to a single + value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates + ((((1+2)+3)+4)+5). If initial is present, it is placed before the items + of the iterable in the calculation, and serves as a default when the + iterable is empty. + """ + + it = iter(sequence) + + if initial is _initial_missing: + try: + value = next(it) + except StopIteration: + raise TypeError( + "reduce() of empty iterable with no initial value") from None + else: + value = initial + + for element in it: + value = function(value, element) + + return value + +try: + from _functools import reduce +except ImportError: + pass + + +################################################################################ +### partial() argument application +################################################################################ + +# Purely functional, no descriptor behaviour +class partial: + """New function with partial application of the given arguments + and keywords. + """ + + __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" + + def __new__(cls, func, /, *args, **keywords): + if not callable(func): + raise TypeError("the first argument must be callable") + + if hasattr(func, "func"): + args = func.args + args + keywords = {**func.keywords, **keywords} + func = func.func + + self = super(partial, cls).__new__(cls) + + self.func = func + self.args = args + self.keywords = keywords + return self + + def __call__(self, /, *args, **keywords): + keywords = {**self.keywords, **keywords} + return self.func(*self.args, *args, **keywords) + + @recursive_repr() + def __repr__(self): + qualname = type(self).__qualname__ + args = [repr(self.func)] + args.extend(repr(x) for x in self.args) + args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) + if type(self).__module__ == "functools": + return f"functools.{qualname}({', '.join(args)})" + return f"{qualname}({', '.join(args)})" + + def __reduce__(self): + return type(self), (self.func,), (self.func, self.args, + self.keywords or None, self.__dict__ or None) + + def __setstate__(self, state): + if not isinstance(state, tuple): + raise TypeError("argument to __setstate__ must be a tuple") + if len(state) != 4: + raise TypeError(f"expected 4 items in state, got {len(state)}") + func, args, kwds, namespace = state + if (not callable(func) or not isinstance(args, tuple) or + (kwds is not None and not isinstance(kwds, dict)) or + (namespace is not None and not isinstance(namespace, dict))): + raise TypeError("invalid partial state") + + args = tuple(args) # just in case it's a subclass + if kwds is None: + kwds = {} + elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? + kwds = dict(kwds) + if namespace is None: + namespace = {} + + self.__dict__ = namespace + self.func = func + self.args = args + self.keywords = kwds + +try: + from _functools import partial +except ImportError: + pass + +# Descriptor version +class partialmethod(object): + """Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func, /, *args, **keywords): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError("{!r} is not callable or a descriptor" + .format(func)) + + # func could be a descriptor like classmethod which isn't callable, + # so we can't inherit from partial (it verifies func is callable) + if isinstance(func, partialmethod): + # flattening is mandatory in order to place cls/self before all + # other arguments + # it's also more efficient since only one function will be called + self.func = func.func + self.args = func.args + args + self.keywords = {**func.keywords, **keywords} + else: + self.func = func + self.args = args + self.keywords = keywords + + def __repr__(self): + args = ", ".join(map(repr, self.args)) + keywords = ", ".join("{}={!r}".format(k, v) + for k, v in self.keywords.items()) + format_string = "{module}.{cls}({func}, {args}, {keywords})" + return format_string.format(module=self.__class__.__module__, + cls=self.__class__.__qualname__, + func=self.func, + args=args, + keywords=keywords) + + def _make_unbound_method(self): + def _method(cls_or_self, /, *args, **keywords): + keywords = {**self.keywords, **keywords} + return self.func(cls_or_self, *self.args, *args, **keywords) + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method._partialmethod = self + return _method + + def __get__(self, obj, cls=None): + get = getattr(self.func, "__get__", None) + result = None + if get is not None: + new_func = get(obj, cls) + if new_func is not self.func: + # Assume __get__ returning something new indicates the + # creation of an appropriate callable + result = partial(new_func, *self.args, **self.keywords) + try: + result.__self__ = new_func.__self__ + except AttributeError: + pass + if result is None: + # If the underlying descriptor didn't do anything, treat this + # like an instance method + result = self._make_unbound_method().__get__(obj, cls) + return result + + @property + def __isabstractmethod__(self): + return getattr(self.func, "__isabstractmethod__", False) + + __class_getitem__ = classmethod(GenericAlias) + + +# Helper functions + +def _unwrap_partial(func): + while isinstance(func, partial): + func = func.func + return func + +################################################################################ +### LRU Cache function decorator +################################################################################ + +_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + +class _HashedSeq(list): + """ This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + +def _make_key(args, kwds, typed, + kwd_mark = (object(),), + fasttypes = {int, str}, + tuple=tuple, type=type, len=len): + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + # All of code below relies on kwds preserving the order input by the user. + # Formerly, we sorted() the kwds before looping. The new way is *much* + # faster; however, it means that f(x=1, y=2) will now be treated as a + # distinct call from f(y=2, x=1) which will be cached separately. + key = args + if kwds: + key += kwd_mark + for item in kwds.items(): + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for v in kwds.values()) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + +def lru_cache(maxsize=128, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + if isinstance(maxsize, int): + # Negative maxsize is treated as 0 + if maxsize < 0: + maxsize = 0 + elif callable(maxsize) and isinstance(typed, bool): + # The user_function was passed in directly via the maxsize argument + user_function, maxsize = maxsize, 128 + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} + return update_wrapper(wrapper, user_function) + elif maxsize is not None: + raise TypeError( + 'Expected first argument to be an integer, a callable, or None') + + def decorating_function(user_function): + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} + return update_wrapper(wrapper, user_function) + + return decorating_function + +def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update + nonlocal misses + misses += 1 + result = user_function(*args, **kwds) + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + cache[key] = result + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + oldresult = root[RESULT] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, cache_len()) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + +try: + from _functools import _lru_cache_wrapper +except ImportError: + pass + + +################################################################################ +### cache -- simplified access to the infinity cache +################################################################################ + +def cache(user_function, /): + 'Simple lightweight unbounded cache. Sometimes called "memoize".' + return lru_cache(maxsize=None)(user_function) + + +################################################################################ +### singledispatch() - single-dispatch generic function decorator +################################################################################ + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from https://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if candidate is None: + raise RuntimeError("Inconsistent hierarchy") + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + +def _c3_mro(cls, abcs=None): + """Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + """ + for i, base in enumerate(reversed(cls.__bases__)): + if hasattr(base, '__abstractmethods__'): + boundary = len(cls.__bases__) - i + break # Bases up to the last explicit ABC are considered first. + else: + boundary = 0 + abcs = list(abcs) if abcs else [] + explicit_bases = list(cls.__bases__[:boundary]) + abstract_bases = [] + other_bases = list(cls.__bases__[boundary:]) + for base in abcs: + if issubclass(cls, base) and not any( + issubclass(b, base) for b in cls.__bases__ + ): + # If *cls* is the class that introduces behaviour described by + # an ABC *base*, insert said ABC to its MRO. + abstract_bases.append(base) + for base in abstract_bases: + abcs.remove(base) + explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] + abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] + other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] + return _c3_merge( + [[cls]] + + explicit_c3_mros + abstract_c3_mros + other_c3_mros + + [explicit_bases] + [abstract_bases] + [other_bases] + ) + +def _compose_mro(cls, types): + """Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + """ + bases = set(cls.__mro__) + # Remove entries which are already present in the __mro__ or unrelated. + def is_related(typ): + return (typ not in bases and hasattr(typ, '__mro__') + and not isinstance(typ, GenericAlias) + and issubclass(cls, typ)) + types = [n for n in types if is_related(n)] + # Remove entries which are strict bases of other entries (they will end up + # in the MRO anyway. + def is_strict_base(typ): + for other in types: + if typ != other and typ in other.__mro__: + return True + return False + types = [n for n in types if not is_strict_base(n)] + # Subclasses of the ABCs in *types* which are also implemented by + # *cls* can be used to stabilize ABC ordering. + type_set = set(types) + mro = [] + for typ in types: + found = [] + for sub in typ.__subclasses__(): + if sub not in bases and issubclass(cls, sub): + found.append([s for s in sub.__mro__ if s in type_set]) + if not found: + mro.append(typ) + continue + # Favor subclasses with the biggest number of useful bases + found.sort(key=len, reverse=True) + for sub in found: + for subcls in sub: + if subcls not in mro: + mro.append(subcls) + return _c3_mro(cls, abcs=mro) + +def _find_impl(cls, registry): + """Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + """ + mro = _compose_mro(cls, registry.keys()) + match = None + for t in mro: + if match is not None: + # If *match* is an implicit ABC but there is another unrelated, + # equally matching implicit ABC, refuse the temptation to guess. + if (t in registry and t not in cls.__mro__ + and match not in cls.__mro__ + and not issubclass(match, t)): + raise RuntimeError("Ambiguous dispatch: {} or {}".format( + match, t)) + break + if t in registry: + match = t + return registry.get(match) + +def singledispatch(func): + """Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + """ + # There are many programs that use functools without singledispatch, so we + # trade-off making singledispatch marginally slower for the benefit of + # making start-up of such applications slightly faster. + import types, weakref + + registry = {} + dispatch_cache = weakref.WeakKeyDictionary() + cache_token = None + + def dispatch(cls): + """generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + """ + nonlocal cache_token + if cache_token is not None: + current_token = get_cache_token() + if cache_token != current_token: + dispatch_cache.clear() + cache_token = current_token + try: + impl = dispatch_cache[cls] + except KeyError: + try: + impl = registry[cls] + except KeyError: + impl = _find_impl(cls, registry) + dispatch_cache[cls] = impl + return impl + + def _is_valid_dispatch_type(cls): + return isinstance(cls, type) and not isinstance(cls, GenericAlias) + + def register(cls, func=None): + """generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + """ + nonlocal cache_token + if _is_valid_dispatch_type(cls): + if func is None: + return lambda f: register(cls, f) + else: + if func is not None: + raise TypeError( + f"Invalid first argument to `register()`. " + f"{cls!r} is not a class." + ) + ann = getattr(cls, '__annotations__', {}) + if not ann: + raise TypeError( + f"Invalid first argument to `register()`: {cls!r}. " + f"Use either `@register(some_class)` or plain `@register` " + f"on an annotated function." + ) + func = cls + + # only import typing if annotation parsing is necessary + from typing import get_type_hints + argname, cls = next(iter(get_type_hints(func).items())) + if not _is_valid_dispatch_type(cls): + raise TypeError( + f"Invalid annotation for {argname!r}. " + f"{cls!r} is not a class." + ) + + registry[cls] = func + if cache_token is None and hasattr(cls, '__abstractmethods__'): + cache_token = get_cache_token() + dispatch_cache.clear() + return func + + def wrapper(*args, **kw): + if not args: + raise TypeError(f'{funcname} requires at least ' + '1 positional argument') + + return dispatch(args[0].__class__)(*args, **kw) + + funcname = getattr(func, '__name__', 'singledispatch function') + registry[object] = func + wrapper.register = register + wrapper.dispatch = dispatch + wrapper.registry = types.MappingProxyType(registry) + wrapper._clear_cache = dispatch_cache.clear + update_wrapper(wrapper, func) + return wrapper + + +# Descriptor version +class singledispatchmethod: + """Single-dispatch generic method descriptor. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError(f"{func!r} is not callable or a descriptor") + + self.dispatcher = singledispatch(func) + self.func = func + + def register(self, cls, method=None): + """generic_method.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_method*. + """ + return self.dispatcher.register(cls, func=method) + + def __get__(self, obj, cls=None): + def _method(*args, **kwargs): + method = self.dispatcher.dispatch(args[0].__class__) + return method.__get__(obj, cls)(*args, **kwargs) + + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method.register = self.register + update_wrapper(_method, self.func) + return _method + + @property + def __isabstractmethod__(self): + return getattr(self.func, '__isabstractmethod__', False) + + +################################################################################ +### cached_property() - computed once per instance, cached as attribute +################################################################################ + +_NOT_FOUND = object() + + +class cached_property: + def __init__(self, func): + self.func = func + self.attrname = None + self.__doc__ = func.__doc__ + self.lock = RLock() + + def __set_name__(self, owner, name): + if self.attrname is None: + self.attrname = name + elif name != self.attrname: + raise TypeError( + "Cannot assign the same cached_property to two different names " + f"({self.attrname!r} and {name!r})." + ) + + def __get__(self, instance, owner=None): + if instance is None: + return self + if self.attrname is None: + raise TypeError( + "Cannot use cached_property instance without calling __set_name__ on it.") + try: + cache = instance.__dict__ + except AttributeError: # not all objects have __dict__ (e.g. class defines slots) + msg = ( + f"No '__dict__' attribute on {type(instance).__name__!r} " + f"instance to cache {self.attrname!r} property." + ) + raise TypeError(msg) from None + val = cache.get(self.attrname, _NOT_FOUND) + if val is _NOT_FOUND: + with self.lock: + # check if another thread filled cache while we awaited lock + val = cache.get(self.attrname, _NOT_FOUND) + if val is _NOT_FOUND: + val = self.func(instance) + try: + cache[self.attrname] = val + except TypeError: + msg = ( + f"The '__dict__' attribute on {type(instance).__name__!r} instance " + f"does not support item assignment for caching {self.attrname!r} property." + ) + raise TypeError(msg) from None + return val + + __class_getitem__ = classmethod(GenericAlias) diff --git a/evalkit_cambrian/lib/python3.10/genericpath.py b/evalkit_cambrian/lib/python3.10/genericpath.py new file mode 100644 index 0000000000000000000000000000000000000000..ce36451a3af01cba7628ad7893183dd19283761d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/genericpath.py @@ -0,0 +1,155 @@ +""" +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +""" +import os +import stat + +__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', + 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', + 'samestat'] + + +# Does a path exist? +# This is false for dangling symbolic links on systems that support them. +def exists(path): + """Test whether a path exists. Returns False for broken symbolic links""" + try: + os.stat(path) + except (OSError, ValueError): + return False + return True + + +# This follows symbolic links, so both islink() and isdir() can be true +# for the same path on systems that support symlinks +def isfile(path): + """Test whether a path is a regular file""" + try: + st = os.stat(path) + except (OSError, ValueError): + return False + return stat.S_ISREG(st.st_mode) + + +# Is a path a directory? +# This follows symbolic links, so both islink() and isdir() +# can be true for the same path on systems that support symlinks +def isdir(s): + """Return true if the pathname refers to an existing directory.""" + try: + st = os.stat(s) + except (OSError, ValueError): + return False + return stat.S_ISDIR(st.st_mode) + + +def getsize(filename): + """Return the size of a file, reported by os.stat().""" + return os.stat(filename).st_size + + +def getmtime(filename): + """Return the last modification time of a file, reported by os.stat().""" + return os.stat(filename).st_mtime + + +def getatime(filename): + """Return the last access time of a file, reported by os.stat().""" + return os.stat(filename).st_atime + + +def getctime(filename): + """Return the metadata change time of a file, reported by os.stat().""" + return os.stat(filename).st_ctime + + +# Return the longest prefix of all list elements. +def commonprefix(m): + "Given a list of pathnames, returns the longest common leading component" + if not m: return '' + # Some people pass in a list of pathname parts to operate in an OS-agnostic + # fashion; don't try to translate in that case as that's an abuse of the + # API and they are already doing what they need to be OS-agnostic and so + # they most likely won't be using an os.PathLike object in the sublists. + if not isinstance(m[0], (list, tuple)): + m = tuple(map(os.fspath, m)) + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + return s1 + +# Are two stat buffers (obtained from stat, fstat or lstat) +# describing the same file? +def samestat(s1, s2): + """Test whether two stat buffers reference the same file""" + return (s1.st_ino == s2.st_ino and + s1.st_dev == s2.st_dev) + + +# Are two filenames really pointing to the same file? +def samefile(f1, f2): + """Test whether two pathnames reference the same actual file or directory + + This is determined by the device number and i-node number and + raises an exception if an os.stat() call on either pathname fails. + """ + s1 = os.stat(f1) + s2 = os.stat(f2) + return samestat(s1, s2) + + +# Are two open files really referencing the same file? +# (Not necessarily the same file descriptor!) +def sameopenfile(fp1, fp2): + """Test whether two open file objects reference the same file""" + s1 = os.fstat(fp1) + s2 = os.fstat(fp2) + return samestat(s1, s2) + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +# Generic implementation of splitext, to be parametrized with +# the separators +def _splitext(p, sep, altsep, extsep): + """Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.""" + # NOTE: This code must work for text and bytes strings. + + sepIndex = p.rfind(sep) + if altsep: + altsepIndex = p.rfind(altsep) + sepIndex = max(sepIndex, altsepIndex) + + dotIndex = p.rfind(extsep) + if dotIndex > sepIndex: + # skip all leading dots + filenameIndex = sepIndex + 1 + while filenameIndex < dotIndex: + if p[filenameIndex:filenameIndex+1] != extsep: + return p[:dotIndex], p[dotIndex:] + filenameIndex += 1 + + return p, p[:0] + +def _check_arg_types(funcname, *args): + hasstr = hasbytes = False + for s in args: + if isinstance(s, str): + hasstr = True + elif isinstance(s, bytes): + hasbytes = True + else: + raise TypeError(f'{funcname}() argument must be str, bytes, or ' + f'os.PathLike object, not {s.__class__.__name__!r}') from None + if hasstr and hasbytes: + raise TypeError("Can't mix strings and bytes in path components") from None diff --git a/evalkit_cambrian/lib/python3.10/hashlib.py b/evalkit_cambrian/lib/python3.10/hashlib.py new file mode 100644 index 0000000000000000000000000000000000000000..21a73f3bf6cb629625715cfd69e82ea45c74f93a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/hashlib.py @@ -0,0 +1,269 @@ +#. Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org) +# Licensed to PSF under a Contributor Agreement. +# + +__doc__ = """hashlib module - A common interface to many hash functions. + +new(name, data=b'', **kwargs) - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), blake2s(), +sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256. + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(data): Update the hash object with the bytes in data. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far as a bytes object. + - hexdigest(): Like digest() except the digest is returned as a string + of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of datas that share a common + initial substring. + +For example, to obtain the digest of the byte string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +""" + +# This tuple and __get_builtin_constructor() must be modified if a new +# always available algorithm is added. +__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', + 'blake2b', 'blake2s', + 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', + 'shake_128', 'shake_256') + + +algorithms_guaranteed = set(__always_supported) +algorithms_available = set(__always_supported) + +__all__ = __always_supported + ('new', 'algorithms_guaranteed', + 'algorithms_available', 'pbkdf2_hmac') + + +__builtin_constructor_cache = {} + +# Prefer our blake2 implementation +# OpenSSL 1.1.0 comes with a limited implementation of blake2b/s. The OpenSSL +# implementations neither support keyed blake2 (blake2 MAC) nor advanced +# features like salt, personalization, or tree hashing. OpenSSL hash-only +# variants are available as 'blake2b512' and 'blake2s256', though. +__block_openssl_constructor = { + 'blake2b', 'blake2s', +} + +def __get_builtin_constructor(name): + cache = __builtin_constructor_cache + constructor = cache.get(name) + if constructor is not None: + return constructor + try: + if name in {'SHA1', 'sha1'}: + import _sha1 + cache['SHA1'] = cache['sha1'] = _sha1.sha1 + elif name in {'MD5', 'md5'}: + import _md5 + cache['MD5'] = cache['md5'] = _md5.md5 + elif name in {'SHA256', 'sha256', 'SHA224', 'sha224'}: + import _sha256 + cache['SHA224'] = cache['sha224'] = _sha256.sha224 + cache['SHA256'] = cache['sha256'] = _sha256.sha256 + elif name in {'SHA512', 'sha512', 'SHA384', 'sha384'}: + import _sha512 + cache['SHA384'] = cache['sha384'] = _sha512.sha384 + cache['SHA512'] = cache['sha512'] = _sha512.sha512 + elif name in {'blake2b', 'blake2s'}: + import _blake2 + cache['blake2b'] = _blake2.blake2b + cache['blake2s'] = _blake2.blake2s + elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512'}: + import _sha3 + cache['sha3_224'] = _sha3.sha3_224 + cache['sha3_256'] = _sha3.sha3_256 + cache['sha3_384'] = _sha3.sha3_384 + cache['sha3_512'] = _sha3.sha3_512 + elif name in {'shake_128', 'shake_256'}: + import _sha3 + cache['shake_128'] = _sha3.shake_128 + cache['shake_256'] = _sha3.shake_256 + except ImportError: + pass # no extension module, this hash is unsupported. + + constructor = cache.get(name) + if constructor is not None: + return constructor + + raise ValueError('unsupported hash type ' + name) + + +def __get_openssl_constructor(name): + if name in __block_openssl_constructor: + # Prefer our builtin blake2 implementation. + return __get_builtin_constructor(name) + try: + # MD5, SHA1, and SHA2 are in all supported OpenSSL versions + # SHA3/shake are available in OpenSSL 1.1.1+ + f = getattr(_hashlib, 'openssl_' + name) + # Allow the C module to raise ValueError. The function will be + # defined but the hash not actually available. Don't fall back to + # builtin if the current security policy blocks a digest, bpo#40695. + f(usedforsecurity=False) + # Use the C function directly (very fast) + return f + except (AttributeError, ValueError): + return __get_builtin_constructor(name) + + +def __py_new(name, data=b'', **kwargs): + """new(name, data=b'', **kwargs) - Return a new hashing object using the + named algorithm; optionally initialized with data (which must be + a bytes-like object). + """ + return __get_builtin_constructor(name)(data, **kwargs) + + +def __hash_new(name, data=b'', **kwargs): + """new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be a bytes-like object). + """ + if name in __block_openssl_constructor: + # Prefer our builtin blake2 implementation. + return __get_builtin_constructor(name)(data, **kwargs) + try: + return _hashlib.new(name, data, **kwargs) + except ValueError: + # If the _hashlib module (OpenSSL) doesn't support the named + # hash, try using our builtin implementations. + # This allows for SHA224/256 and SHA384/512 support even though + # the OpenSSL library prior to 0.9.8 doesn't provide them. + return __get_builtin_constructor(name)(data) + + +try: + import _hashlib + new = __hash_new + __get_hash = __get_openssl_constructor + algorithms_available = algorithms_available.union( + _hashlib.openssl_md_meth_names) +except ImportError: + _hashlib = None + new = __py_new + __get_hash = __get_builtin_constructor + +try: + # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA + from _hashlib import pbkdf2_hmac +except ImportError: + from warnings import warn as _warn + _trans_5C = bytes((x ^ 0x5C) for x in range(256)) + _trans_36 = bytes((x ^ 0x36) for x in range(256)) + + def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): + """Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + """ + _warn( + "Python implementation of pbkdf2_hmac() is deprecated.", + category=DeprecationWarning, + stacklevel=2 + ) + if not isinstance(hash_name, str): + raise TypeError(hash_name) + + if not isinstance(password, (bytes, bytearray)): + password = bytes(memoryview(password)) + if not isinstance(salt, (bytes, bytearray)): + salt = bytes(memoryview(salt)) + + # Fast inline HMAC implementation + inner = new(hash_name) + outer = new(hash_name) + blocksize = getattr(inner, 'block_size', 64) + if len(password) > blocksize: + password = new(hash_name, password).digest() + password = password + b'\x00' * (blocksize - len(password)) + inner.update(password.translate(_trans_36)) + outer.update(password.translate(_trans_5C)) + + def prf(msg, inner=inner, outer=outer): + # PBKDF2_HMAC uses the password as key. We can re-use the same + # digest objects and just update copies to skip initialization. + icpy = inner.copy() + ocpy = outer.copy() + icpy.update(msg) + ocpy.update(icpy.digest()) + return ocpy.digest() + + if iterations < 1: + raise ValueError(iterations) + if dklen is None: + dklen = outer.digest_size + if dklen < 1: + raise ValueError(dklen) + + dkey = b'' + loop = 1 + from_bytes = int.from_bytes + while len(dkey) < dklen: + prev = prf(salt + loop.to_bytes(4, 'big')) + # endianness doesn't matter here as long to / from use the same + rkey = int.from_bytes(prev, 'big') + for i in range(iterations - 1): + prev = prf(prev) + # rkey = rkey ^ prev + rkey ^= from_bytes(prev, 'big') + loop += 1 + dkey += rkey.to_bytes(inner.digest_size, 'big') + + return dkey[:dklen] + +try: + # OpenSSL's scrypt requires OpenSSL 1.1+ + from _hashlib import scrypt +except ImportError: + pass + + +for __func_name in __always_supported: + # try them all, some may not work due to the OpenSSL + # version not supporting that algorithm. + try: + globals()[__func_name] = __get_hash(__func_name) + except ValueError: + import logging + logging.exception('code for hash %s was not found.', __func_name) + + +# Cleanup locals() +del __always_supported, __func_name, __get_hash +del __py_new, __hash_new, __get_openssl_constructor diff --git a/evalkit_cambrian/lib/python3.10/heapq.py b/evalkit_cambrian/lib/python3.10/heapq.py new file mode 100644 index 0000000000000000000000000000000000000000..fabefd87f8bf8c804e8eb3155c1aacbf05dd02bd --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/heapq.py @@ -0,0 +1,601 @@ +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + return returnitem + return lastelt + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + +def _heappop_max(heap): + """Maxheap version of a heappop.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup_max(heap, 0) + return returnitem + return lastelt + +def _heapreplace_max(heap, item): + """Maxheap version of a heappop followed by a heappush.""" + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup_max(heap, 0) + return returnitem + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +def merge(*iterables, key=None, reverse=False): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + ''' + + h = [] + h_append = h.append + + if reverse: + _heapify = _heapify_max + _heappop = _heappop_max + _heapreplace = _heapreplace_max + direction = -1 + else: + _heapify = heapify + _heappop = heappop + _heapreplace = heapreplace + direction = 1 + + if key is None: + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + h_append([next(), order * direction, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + value, order, next = s = h[0] + yield value + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + value, order, next = h[0] + yield value + yield from next.__self__ + return + + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + value = next() + h_append([key(value), order * direction, value, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + key_value, order, value, next = s = h[0] + yield value + value = next() + s[0] = key(value) + s[2] = value + _heapreplace(h, s) + except StopIteration: + _heappop(h) + if h: + key_value, order, value, next = h[0] + yield value + yield from next.__self__ + + +# Algorithm notes for nlargest() and nsmallest() +# ============================================== +# +# Make a single pass over the data while keeping the k most extreme values +# in a heap. Memory consumption is limited to keeping k values in a list. +# +# Measured performance for random inputs: +# +# number of comparisons +# n inputs k-extreme values (average of 5 trials) % more than min() +# ------------- ---------------- --------------------- ----------------- +# 1,000 100 3,317 231.7% +# 10,000 100 14,046 40.5% +# 100,000 100 105,749 5.7% +# 1,000,000 100 1,007,751 0.8% +# 10,000,000 100 10,009,401 0.1% +# +# Theoretical number of comparisons for k smallest of n random inputs: +# +# Step Comparisons Action +# ---- -------------------------- --------------------------- +# 1 1.66 * k heapify the first k-inputs +# 2 n - k compare remaining elements to top of heap +# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap +# 4 k * lg2(k) - (k/2) final sort of the k most extreme values +# +# Combining and simplifying for a rough estimate gives: +# +# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) +# +# Computing the number of comparisons for step 3: +# ----------------------------------------------- +# * For the i-th new value from the iterable, the probability of being in the +# k most extreme values is k/i. For example, the probability of the 101st +# value seen being in the 100 most extreme values is 100/101. +# * If the value is a new extreme value, the cost of inserting it into the +# heap is 1 + log(k, 2). +# * The probability times the cost gives: +# (k/i) * (1 + log(k, 2)) +# * Summing across the remaining n-k elements gives: +# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) +# * This reduces to: +# (H(n) - H(k)) * k * (1 + log(k, 2)) +# * Where H(n) is the n-th harmonic number estimated by: +# gamma = 0.5772156649 +# H(n) = log(n, e) + gamma + 1 / (2 * n) +# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence +# * Substituting the H(n) formula: +# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) +# +# Worst-case for step 3: +# ---------------------- +# In the worst case, the input data is reversed sorted so that every new element +# must be inserted in the heap: +# +# comparisons = 1.66 * k + log(k, 2) * (n - k) +# +# Alternative Algorithms +# ---------------------- +# Other algorithms were not used because they: +# 1) Took much more auxiliary memory, +# 2) Made multiple passes over the data. +# 3) Made more comparisons in common cases (small k, large n, semi-random input). +# See the more detailed comparison of approach at: +# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest + +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + + # Short-cut for n==1 is to use min() + if n == 1: + it = iter(iterable) + sentinel = object() + result = min(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + # put the range(n) first so that zip() doesn't + # consume one too many elements from the iterator + result = [(elem, i) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + if elem < top: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order += 1 + result.sort() + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + k = key(elem) + if k < top: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order += 1 + result.sort() + return [elem for (k, order, elem) in result] + +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() + if n == 1: + it = iter(iterable) + sentinel = object() + result = max(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + if top < elem: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + k = key(elem) + if top < k: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (k, order, elem) in result] + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass +try: + from _heapq import _heapreplace_max +except ImportError: + pass +try: + from _heapq import _heapify_max +except ImportError: + pass +try: + from _heapq import _heappop_max +except ImportError: + pass + + +if __name__ == "__main__": + + import doctest # pragma: no cover + print(doctest.testmod()) # pragma: no cover diff --git a/evalkit_cambrian/lib/python3.10/imghdr.py b/evalkit_cambrian/lib/python3.10/imghdr.py new file mode 100644 index 0000000000000000000000000000000000000000..6e01fd857469ad89e597009d8c1aa093f2f90313 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/imghdr.py @@ -0,0 +1,168 @@ +"""Recognize image file formats based on their first few bytes.""" + +from os import PathLike + +__all__ = ["what"] + +#-------------------------# +# Recognize image headers # +#-------------------------# + +def what(file, h=None): + f = None + try: + if h is None: + if isinstance(file, (str, PathLike)): + f = open(file, 'rb') + h = f.read(32) + else: + location = file.tell() + h = file.read(32) + file.seek(location) + for tf in tests: + res = tf(h, f) + if res: + return res + finally: + if f: f.close() + return None + + +#---------------------------------# +# Subroutines per image file type # +#---------------------------------# + +tests = [] + +def test_jpeg(h, f): + """JPEG data in JFIF or Exif format""" + if h[6:10] in (b'JFIF', b'Exif'): + return 'jpeg' + +tests.append(test_jpeg) + +def test_png(h, f): + if h.startswith(b'\211PNG\r\n\032\n'): + return 'png' + +tests.append(test_png) + +def test_gif(h, f): + """GIF ('87 and '89 variants)""" + if h[:6] in (b'GIF87a', b'GIF89a'): + return 'gif' + +tests.append(test_gif) + +def test_tiff(h, f): + """TIFF (can be in Motorola or Intel byte order)""" + if h[:2] in (b'MM', b'II'): + return 'tiff' + +tests.append(test_tiff) + +def test_rgb(h, f): + """SGI image library""" + if h.startswith(b'\001\332'): + return 'rgb' + +tests.append(test_rgb) + +def test_pbm(h, f): + """PBM (portable bitmap)""" + if len(h) >= 3 and \ + h[0] == ord(b'P') and h[1] in b'14' and h[2] in b' \t\n\r': + return 'pbm' + +tests.append(test_pbm) + +def test_pgm(h, f): + """PGM (portable graymap)""" + if len(h) >= 3 and \ + h[0] == ord(b'P') and h[1] in b'25' and h[2] in b' \t\n\r': + return 'pgm' + +tests.append(test_pgm) + +def test_ppm(h, f): + """PPM (portable pixmap)""" + if len(h) >= 3 and \ + h[0] == ord(b'P') and h[1] in b'36' and h[2] in b' \t\n\r': + return 'ppm' + +tests.append(test_ppm) + +def test_rast(h, f): + """Sun raster file""" + if h.startswith(b'\x59\xA6\x6A\x95'): + return 'rast' + +tests.append(test_rast) + +def test_xbm(h, f): + """X bitmap (X10 or X11)""" + if h.startswith(b'#define '): + return 'xbm' + +tests.append(test_xbm) + +def test_bmp(h, f): + if h.startswith(b'BM'): + return 'bmp' + +tests.append(test_bmp) + +def test_webp(h, f): + if h.startswith(b'RIFF') and h[8:12] == b'WEBP': + return 'webp' + +tests.append(test_webp) + +def test_exr(h, f): + if h.startswith(b'\x76\x2f\x31\x01'): + return 'exr' + +tests.append(test_exr) + +#--------------------# +# Small test program # +#--------------------# + +def test(): + import sys + recursive = 0 + if sys.argv[1:] and sys.argv[1] == '-r': + del sys.argv[1:2] + recursive = 1 + try: + if sys.argv[1:]: + testall(sys.argv[1:], recursive, 1) + else: + testall(['.'], recursive, 1) + except KeyboardInterrupt: + sys.stderr.write('\n[Interrupted]\n') + sys.exit(1) + +def testall(list, recursive, toplevel): + import sys + import os + for filename in list: + if os.path.isdir(filename): + print(filename + '/:', end=' ') + if recursive or toplevel: + print('recursing down:') + import glob + names = glob.glob(os.path.join(glob.escape(filename), '*')) + testall(names, recursive, 0) + else: + print('*** directory (use -r) ***') + else: + print(filename + ':', end=' ') + sys.stdout.flush() + try: + print(what(filename)) + except OSError: + print('*** not found ***') + +if __name__ == '__main__': + test() diff --git a/evalkit_cambrian/lib/python3.10/lzma.py b/evalkit_cambrian/lib/python3.10/lzma.py new file mode 100644 index 0000000000000000000000000000000000000000..800f52198fbb794077fe43425df83db44e13960d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lzma.py @@ -0,0 +1,356 @@ +"""Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +""" + +__all__ = [ + "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", + "CHECK_ID_MAX", "CHECK_UNKNOWN", + "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", + "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", + "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", + "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", + "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", + + "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", + "open", "compress", "decompress", "is_check_supported", +] + +import builtins +import io +import os +from _lzma import * +from _lzma import _encode_filter_properties, _decode_filter_properties +import _compression + + +_MODE_CLOSED = 0 +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class LZMAFile(_compression.BaseStream): + + """A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + """ + + def __init__(self, filename=None, mode="r", *, + format=None, check=-1, preset=None, filters=None): + """Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + """ + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + if mode in ("r", "rb"): + if check != -1: + raise ValueError("Cannot specify an integrity check " + "when opening a file for reading") + if preset is not None: + raise ValueError("Cannot specify a preset compression " + "level when opening a file for reading") + if format is None: + format = FORMAT_AUTO + mode_code = _MODE_READ + elif mode in ("w", "wb", "a", "ab", "x", "xb"): + if format is None: + format = FORMAT_XZ + mode_code = _MODE_WRITE + self._compressor = LZMACompressor(format=format, check=check, + preset=preset, filters=filters) + self._pos = 0 + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if isinstance(filename, (str, bytes, os.PathLike)): + if "b" not in mode: + mode += "b" + self._fp = builtins.open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _compression.DecompressReader(self._fp, LZMADecompressor, + trailing_error=LZMAError, format=format, filters=filters) + self._buffer = io.BufferedReader(raw) + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self._mode == _MODE_CLOSED: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + self._buffer = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, size=-1): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + return self._buffer.peek(size) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + self._check_can_read() + return self._buffer.readline(size) + + def write(self, data): + """Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", *, + format=None, check=-1, preset=None, filters=None, + encoding=None, errors=None, newline=None): + """Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + lz_mode = mode.replace("t", "") + binary_file = LZMAFile(filename, lz_mode, format=format, check=check, + preset=preset, filters=filters) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): + """Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + """ + comp = LZMACompressor(format, check, preset, filters) + return comp.compress(data) + comp.flush() + + +def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): + """Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + """ + results = [] + while True: + decomp = LZMADecompressor(format, memlimit, filters) + try: + res = decomp.decompress(data) + except LZMAError: + if results: + break # Leftover data is not a valid LZMA/XZ stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise LZMAError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + if not data: + break + return b"".join(results) diff --git a/evalkit_cambrian/lib/python3.10/mailcap.py b/evalkit_cambrian/lib/python3.10/mailcap.py new file mode 100644 index 0000000000000000000000000000000000000000..444c6408b54c6bf10fbb161e3ff099e88f6110af --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/mailcap.py @@ -0,0 +1,298 @@ +"""Mailcap file handling. See RFC 1524.""" + +import os +import warnings +import re + +__all__ = ["getcaps","findmatch"] + + +def lineno_sort_key(entry): + # Sort in ascending order, with unspecified entries at the end + if 'lineno' in entry: + return 0, entry['lineno'] + else: + return 1, 0 + +_find_unsafe = re.compile(r'[^\xa1-\U0010FFFF\w@+=:,./-]').search + +class UnsafeMailcapInput(Warning): + """Warning raised when refusing unsafe input""" + + +# Part 1: top-level interface. + +def getcaps(): + """Return a dictionary containing the mailcap database. + + The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain') + to a list of dictionaries corresponding to mailcap entries. The list + collects all the entries for that MIME type from all available mailcap + files. Each dictionary contains key-value pairs for that MIME type, + where the viewing command is stored with the key "view". + + """ + caps = {} + lineno = 0 + for mailcap in listmailcapfiles(): + try: + fp = open(mailcap, 'r') + except OSError: + continue + with fp: + morecaps, lineno = _readmailcapfile(fp, lineno) + for key, value in morecaps.items(): + if not key in caps: + caps[key] = value + else: + caps[key] = caps[key] + value + return caps + +def listmailcapfiles(): + """Return a list of all mailcap files found on the system.""" + # This is mostly a Unix thing, but we use the OS path separator anyway + if 'MAILCAPS' in os.environ: + pathstr = os.environ['MAILCAPS'] + mailcaps = pathstr.split(os.pathsep) + else: + if 'HOME' in os.environ: + home = os.environ['HOME'] + else: + # Don't bother with getpwuid() + home = '.' # Last resort + mailcaps = [home + '/.mailcap', '/etc/mailcap', + '/usr/etc/mailcap', '/usr/local/etc/mailcap'] + return mailcaps + + +# Part 2: the parser. +def readmailcapfile(fp): + """Read a mailcap file and return a dictionary keyed by MIME type.""" + warnings.warn('readmailcapfile is deprecated, use getcaps instead', + DeprecationWarning, 2) + caps, _ = _readmailcapfile(fp, None) + return caps + + +def _readmailcapfile(fp, lineno): + """Read a mailcap file and return a dictionary keyed by MIME type. + + Each MIME type is mapped to an entry consisting of a list of + dictionaries; the list will contain more than one such dictionary + if a given MIME type appears more than once in the mailcap file. + Each dictionary contains key-value pairs for that MIME type, where + the viewing command is stored with the key "view". + """ + caps = {} + while 1: + line = fp.readline() + if not line: break + # Ignore comments and blank lines + if line[0] == '#' or line.strip() == '': + continue + nextline = line + # Join continuation lines + while nextline[-2:] == '\\\n': + nextline = fp.readline() + if not nextline: nextline = '\n' + line = line[:-2] + nextline + # Parse the line + key, fields = parseline(line) + if not (key and fields): + continue + if lineno is not None: + fields['lineno'] = lineno + lineno += 1 + # Normalize the key + types = key.split('/') + for j in range(len(types)): + types[j] = types[j].strip() + key = '/'.join(types).lower() + # Update the database + if key in caps: + caps[key].append(fields) + else: + caps[key] = [fields] + return caps, lineno + +def parseline(line): + """Parse one entry in a mailcap file and return a dictionary. + + The viewing command is stored as the value with the key "view", + and the rest of the fields produce key-value pairs in the dict. + """ + fields = [] + i, n = 0, len(line) + while i < n: + field, i = parsefield(line, i, n) + fields.append(field) + i = i+1 # Skip semicolon + if len(fields) < 2: + return None, None + key, view, rest = fields[0], fields[1], fields[2:] + fields = {'view': view} + for field in rest: + i = field.find('=') + if i < 0: + fkey = field + fvalue = "" + else: + fkey = field[:i].strip() + fvalue = field[i+1:].strip() + if fkey in fields: + # Ignore it + pass + else: + fields[fkey] = fvalue + return key, fields + +def parsefield(line, i, n): + """Separate one key-value pair in a mailcap entry.""" + start = i + while i < n: + c = line[i] + if c == ';': + break + elif c == '\\': + i = i+2 + else: + i = i+1 + return line[start:i].strip(), i + + +# Part 3: using the database. + +def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]): + """Find a match for a mailcap entry. + + Return a tuple containing the command line, and the mailcap entry + used; (None, None) if no match is found. This may invoke the + 'test' command of several matching entries before deciding which + entry to use. + + """ + if _find_unsafe(filename): + msg = "Refusing to use mailcap with filename %r. Use a safe temporary filename." % (filename,) + warnings.warn(msg, UnsafeMailcapInput) + return None, None + entries = lookup(caps, MIMEtype, key) + # XXX This code should somehow check for the needsterminal flag. + for e in entries: + if 'test' in e: + test = subst(e['test'], filename, plist) + if test is None: + continue + if test and os.system(test) != 0: + continue + command = subst(e[key], MIMEtype, filename, plist) + if command is not None: + return command, e + return None, None + +def lookup(caps, MIMEtype, key=None): + entries = [] + if MIMEtype in caps: + entries = entries + caps[MIMEtype] + MIMEtypes = MIMEtype.split('/') + MIMEtype = MIMEtypes[0] + '/*' + if MIMEtype in caps: + entries = entries + caps[MIMEtype] + if key is not None: + entries = [e for e in entries if key in e] + entries = sorted(entries, key=lineno_sort_key) + return entries + +def subst(field, MIMEtype, filename, plist=[]): + # XXX Actually, this is Unix-specific + res = '' + i, n = 0, len(field) + while i < n: + c = field[i]; i = i+1 + if c != '%': + if c == '\\': + c = field[i:i+1]; i = i+1 + res = res + c + else: + c = field[i]; i = i+1 + if c == '%': + res = res + c + elif c == 's': + res = res + filename + elif c == 't': + if _find_unsafe(MIMEtype): + msg = "Refusing to substitute MIME type %r into a shell command." % (MIMEtype,) + warnings.warn(msg, UnsafeMailcapInput) + return None + res = res + MIMEtype + elif c == '{': + start = i + while i < n and field[i] != '}': + i = i+1 + name = field[start:i] + i = i+1 + param = findparam(name, plist) + if _find_unsafe(param): + msg = "Refusing to substitute parameter %r (%s) into a shell command" % (param, name) + warnings.warn(msg, UnsafeMailcapInput) + return None + res = res + param + # XXX To do: + # %n == number of parts if type is multipart/* + # %F == list of alternating type and filename for parts + else: + res = res + '%' + c + return res + +def findparam(name, plist): + name = name.lower() + '=' + n = len(name) + for p in plist: + if p[:n].lower() == name: + return p[n:] + return '' + + +# Part 4: test program. + +def test(): + import sys + caps = getcaps() + if not sys.argv[1:]: + show(caps) + return + for i in range(1, len(sys.argv), 2): + args = sys.argv[i:i+2] + if len(args) < 2: + print("usage: mailcap [MIMEtype file] ...") + return + MIMEtype = args[0] + file = args[1] + command, e = findmatch(caps, MIMEtype, 'view', file) + if not command: + print("No viewer found for", type) + else: + print("Executing:", command) + sts = os.system(command) + sts = os.waitstatus_to_exitcode(sts) + if sts: + print("Exit status:", sts) + +def show(caps): + print("Mailcap files:") + for fn in listmailcapfiles(): print("\t" + fn) + print() + if not caps: caps = getcaps() + print("Mailcap entries:") + print() + ckeys = sorted(caps) + for type in ckeys: + print(type) + entries = caps[type] + for e in entries: + keys = sorted(e) + for k in keys: + print(" %-15s" % k, e[k]) + print() + +if __name__ == '__main__': + test() diff --git a/evalkit_cambrian/lib/python3.10/opcode.py b/evalkit_cambrian/lib/python3.10/opcode.py new file mode 100644 index 0000000000000000000000000000000000000000..37e88e92df70ecc58ea9f59bea40fa20a209f916 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/opcode.py @@ -0,0 +1,216 @@ + +""" +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +""" + +__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", + "haslocal", "hascompare", "hasfree", "opname", "opmap", + "HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"] + +# It's a chicken-and-egg I'm afraid: +# We're imported before _opcode's made. +# With exception unheeded +# (stack_effect is not needed) +# Both our chickens and eggs are allayed. +# --Larry Hastings, 2013/11/23 + +try: + from _opcode import stack_effect + __all__.append('stack_effect') +except ImportError: + pass + +cmp_op = ('<', '<=', '==', '!=', '>', '>=') + +hasconst = [] +hasname = [] +hasjrel = [] +hasjabs = [] +haslocal = [] +hascompare = [] +hasfree = [] +hasnargs = [] # unused + +opmap = {} +opname = ['<%r>' % (op,) for op in range(256)] + +def def_op(name, op): + opname[op] = name + opmap[name] = op + +def name_op(name, op): + def_op(name, op) + hasname.append(op) + +def jrel_op(name, op): + def_op(name, op) + hasjrel.append(op) + +def jabs_op(name, op): + def_op(name, op) + hasjabs.append(op) + +# Instruction opcodes for compiled code +# Blank lines correspond to available opcodes + +def_op('POP_TOP', 1) +def_op('ROT_TWO', 2) +def_op('ROT_THREE', 3) +def_op('DUP_TOP', 4) +def_op('DUP_TOP_TWO', 5) +def_op('ROT_FOUR', 6) + +def_op('NOP', 9) +def_op('UNARY_POSITIVE', 10) +def_op('UNARY_NEGATIVE', 11) +def_op('UNARY_NOT', 12) + +def_op('UNARY_INVERT', 15) +def_op('BINARY_MATRIX_MULTIPLY', 16) +def_op('INPLACE_MATRIX_MULTIPLY', 17) + +def_op('BINARY_POWER', 19) +def_op('BINARY_MULTIPLY', 20) + +def_op('BINARY_MODULO', 22) +def_op('BINARY_ADD', 23) +def_op('BINARY_SUBTRACT', 24) +def_op('BINARY_SUBSCR', 25) +def_op('BINARY_FLOOR_DIVIDE', 26) +def_op('BINARY_TRUE_DIVIDE', 27) +def_op('INPLACE_FLOOR_DIVIDE', 28) +def_op('INPLACE_TRUE_DIVIDE', 29) +def_op('GET_LEN', 30) +def_op('MATCH_MAPPING', 31) +def_op('MATCH_SEQUENCE', 32) +def_op('MATCH_KEYS', 33) +def_op('COPY_DICT_WITHOUT_KEYS', 34) + +def_op('WITH_EXCEPT_START', 49) +def_op('GET_AITER', 50) +def_op('GET_ANEXT', 51) +def_op('BEFORE_ASYNC_WITH', 52) + +def_op('END_ASYNC_FOR', 54) +def_op('INPLACE_ADD', 55) +def_op('INPLACE_SUBTRACT', 56) +def_op('INPLACE_MULTIPLY', 57) + +def_op('INPLACE_MODULO', 59) +def_op('STORE_SUBSCR', 60) +def_op('DELETE_SUBSCR', 61) +def_op('BINARY_LSHIFT', 62) +def_op('BINARY_RSHIFT', 63) +def_op('BINARY_AND', 64) +def_op('BINARY_XOR', 65) +def_op('BINARY_OR', 66) +def_op('INPLACE_POWER', 67) +def_op('GET_ITER', 68) +def_op('GET_YIELD_FROM_ITER', 69) +def_op('PRINT_EXPR', 70) +def_op('LOAD_BUILD_CLASS', 71) +def_op('YIELD_FROM', 72) +def_op('GET_AWAITABLE', 73) +def_op('LOAD_ASSERTION_ERROR', 74) +def_op('INPLACE_LSHIFT', 75) +def_op('INPLACE_RSHIFT', 76) +def_op('INPLACE_AND', 77) +def_op('INPLACE_XOR', 78) +def_op('INPLACE_OR', 79) + +def_op('LIST_TO_TUPLE', 82) +def_op('RETURN_VALUE', 83) +def_op('IMPORT_STAR', 84) +def_op('SETUP_ANNOTATIONS', 85) +def_op('YIELD_VALUE', 86) +def_op('POP_BLOCK', 87) + +def_op('POP_EXCEPT', 89) + +HAVE_ARGUMENT = 90 # Opcodes from here have an argument: + +name_op('STORE_NAME', 90) # Index in name list +name_op('DELETE_NAME', 91) # "" +def_op('UNPACK_SEQUENCE', 92) # Number of tuple items +jrel_op('FOR_ITER', 93) +def_op('UNPACK_EX', 94) +name_op('STORE_ATTR', 95) # Index in name list +name_op('DELETE_ATTR', 96) # "" +name_op('STORE_GLOBAL', 97) # "" +name_op('DELETE_GLOBAL', 98) # "" +def_op('ROT_N', 99) +def_op('LOAD_CONST', 100) # Index in const list +hasconst.append(100) +name_op('LOAD_NAME', 101) # Index in name list +def_op('BUILD_TUPLE', 102) # Number of tuple items +def_op('BUILD_LIST', 103) # Number of list items +def_op('BUILD_SET', 104) # Number of set items +def_op('BUILD_MAP', 105) # Number of dict entries +name_op('LOAD_ATTR', 106) # Index in name list +def_op('COMPARE_OP', 107) # Comparison operator +hascompare.append(107) +name_op('IMPORT_NAME', 108) # Index in name list +name_op('IMPORT_FROM', 109) # Index in name list +jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip +jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code +jabs_op('JUMP_IF_TRUE_OR_POP', 112) # "" +jabs_op('JUMP_ABSOLUTE', 113) # "" +jabs_op('POP_JUMP_IF_FALSE', 114) # "" +jabs_op('POP_JUMP_IF_TRUE', 115) # "" +name_op('LOAD_GLOBAL', 116) # Index in name list +def_op('IS_OP', 117) +def_op('CONTAINS_OP', 118) +def_op('RERAISE', 119) + +jabs_op('JUMP_IF_NOT_EXC_MATCH', 121) +jrel_op('SETUP_FINALLY', 122) # Distance to target address + +def_op('LOAD_FAST', 124) # Local variable number +haslocal.append(124) +def_op('STORE_FAST', 125) # Local variable number +haslocal.append(125) +def_op('DELETE_FAST', 126) # Local variable number +haslocal.append(126) + +def_op('GEN_START', 129) # Kind of generator/coroutine +def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3) +def_op('CALL_FUNCTION', 131) # #args +def_op('MAKE_FUNCTION', 132) # Flags +def_op('BUILD_SLICE', 133) # Number of items + +def_op('LOAD_CLOSURE', 135) +hasfree.append(135) +def_op('LOAD_DEREF', 136) +hasfree.append(136) +def_op('STORE_DEREF', 137) +hasfree.append(137) +def_op('DELETE_DEREF', 138) +hasfree.append(138) + +def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs +def_op('CALL_FUNCTION_EX', 142) # Flags +jrel_op('SETUP_WITH', 143) +def_op('EXTENDED_ARG', 144) +EXTENDED_ARG = 144 +def_op('LIST_APPEND', 145) +def_op('SET_ADD', 146) +def_op('MAP_ADD', 147) +def_op('LOAD_CLASSDEREF', 148) +hasfree.append(148) + +def_op('MATCH_CLASS', 152) + +jrel_op('SETUP_ASYNC_WITH', 154) +def_op('FORMAT_VALUE', 155) +def_op('BUILD_CONST_KEY_MAP', 156) +def_op('BUILD_STRING', 157) + +name_op('LOAD_METHOD', 160) +def_op('CALL_METHOD', 161) +def_op('LIST_EXTEND', 162) +def_op('SET_UPDATE', 163) +def_op('DICT_MERGE', 164) +def_op('DICT_UPDATE', 165) + +del def_op, name_op, jrel_op, jabs_op diff --git a/evalkit_cambrian/lib/python3.10/pdb.py b/evalkit_cambrian/lib/python3.10/pdb.py new file mode 100644 index 0000000000000000000000000000000000000000..3bdb2ac36f719212f09c78c66473f1a3aea2ec07 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/pdb.py @@ -0,0 +1,1750 @@ +#! /usr/bin/env python3 + +""" +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +""" +# NOTE: the actual command documentation is collected from docstrings of the +# commands and is appended to __doc__ after the class has been defined. + +import os +import io +import re +import sys +import cmd +import bdb +import dis +import code +import glob +import pprint +import signal +import inspect +import tokenize +import traceback +import linecache + + +class Restart(Exception): + """Causes a debugger to be restarted for the debugged python program.""" + pass + +__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", + "post_mortem", "help"] + +def find_function(funcname, filename): + cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname)) + try: + fp = tokenize.open(filename) + except OSError: + return None + # consumer of this info expects the first line to be 1 + with fp: + for lineno, line in enumerate(fp, start=1): + if cre.match(line): + return funcname, filename, lineno + return None + +def lasti2lineno(code, lasti): + linestarts = list(dis.findlinestarts(code)) + linestarts.reverse() + for i, lineno in linestarts: + if lasti >= i: + return lineno + return 0 + + +class _rstr(str): + """String that doesn't quote its repr.""" + def __repr__(self): + return self + + +# Interaction prompt line will separate file and call info from code +# text using value of line_prefix string. A newline and arrow may +# be to your liking. You can set it once pdb is imported using the +# command "pdb.line_prefix = '\n% '". +# line_prefix = ': ' # Use this to get the old situation back +line_prefix = '\n-> ' # Probably a better default + +class Pdb(bdb.Bdb, cmd.Cmd): + + _previous_sigint_handler = None + + def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, + nosigint=False, readrc=True): + bdb.Bdb.__init__(self, skip=skip) + cmd.Cmd.__init__(self, completekey, stdin, stdout) + sys.audit("pdb.Pdb") + if stdout: + self.use_rawinput = 0 + self.prompt = '(Pdb) ' + self.aliases = {} + self.displaying = {} + self.mainpyfile = '' + self._wait_for_mainpyfile = False + self.tb_lineno = {} + # Try to load readline if it exists + try: + import readline + # remove some common file name delimiters + readline.set_completer_delims(' \t\n`@#$%^&*()=+[{]}\\|;:\'",<>?') + except ImportError: + pass + self.allow_kbdint = False + self.nosigint = nosigint + + # Read ~/.pdbrc and ./.pdbrc + self.rcLines = [] + if readrc: + try: + with open(os.path.expanduser('~/.pdbrc')) as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + try: + with open(".pdbrc") as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + + self.commands = {} # associates a command list to breakpoint numbers + self.commands_doprompt = {} # for each bp num, tells if the prompt + # must be disp. after execing the cmd list + self.commands_silent = {} # for each bp num, tells if the stack trace + # must be disp. after execing the cmd list + self.commands_defining = False # True while in the process of defining + # a command list + self.commands_bnum = None # The breakpoint number for which we are + # defining a list + + def sigint_handler(self, signum, frame): + if self.allow_kbdint: + raise KeyboardInterrupt + self.message("\nProgram interrupted. (Use 'cont' to resume).") + self.set_step() + self.set_trace(frame) + + def reset(self): + bdb.Bdb.reset(self) + self.forget() + + def forget(self): + self.lineno = None + self.stack = [] + self.curindex = 0 + self.curframe = None + self.tb_lineno.clear() + + def setup(self, f, tb): + self.forget() + self.stack, self.curindex = self.get_stack(f, tb) + while tb: + # when setting up post-mortem debugging with a traceback, save all + # the original line numbers to be displayed along the current line + # numbers (which can be different, e.g. due to finally clauses) + lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) + self.tb_lineno[tb.tb_frame] = lineno + tb = tb.tb_next + self.curframe = self.stack[self.curindex][0] + # The f_locals dictionary is updated from the actual frame + # locals whenever the .f_locals accessor is called, so we + # cache it here to ensure that modifications are not overwritten. + self.curframe_locals = self.curframe.f_locals + return self.execRcLines() + + # Can be executed earlier than 'setup' if desired + def execRcLines(self): + if not self.rcLines: + return + # local copy because of recursion + rcLines = self.rcLines + rcLines.reverse() + # execute every line only once + self.rcLines = [] + while rcLines: + line = rcLines.pop().strip() + if line and line[0] != '#': + if self.onecmd(line): + # if onecmd returns True, the command wants to exit + # from the interaction, save leftover rc lines + # to execute before next interaction + self.rcLines += reversed(rcLines) + return True + + # Override Bdb methods + + def user_call(self, frame, argument_list): + """This method is called when there is the remote possibility + that we ever need to stop in this function.""" + if self._wait_for_mainpyfile: + return + if self.stop_here(frame): + self.message('--Call--') + self.interaction(frame, None) + + def user_line(self, frame): + """This function is called when we stop or break at this line.""" + if self._wait_for_mainpyfile: + if (self.mainpyfile != self.canonic(frame.f_code.co_filename) + or frame.f_lineno <= 0): + return + self._wait_for_mainpyfile = False + if self.bp_commands(frame): + self.interaction(frame, None) + + def bp_commands(self, frame): + """Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.""" + # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit + if getattr(self, "currentbp", False) and \ + self.currentbp in self.commands: + currentbp = self.currentbp + self.currentbp = 0 + lastcmd_back = self.lastcmd + self.setup(frame, None) + for line in self.commands[currentbp]: + self.onecmd(line) + self.lastcmd = lastcmd_back + if not self.commands_silent[currentbp]: + self.print_stack_entry(self.stack[self.curindex]) + if self.commands_doprompt[currentbp]: + self._cmdloop() + self.forget() + return + return 1 + + def user_return(self, frame, return_value): + """This function is called when a return trap is set here.""" + if self._wait_for_mainpyfile: + return + frame.f_locals['__return__'] = return_value + self.message('--Return--') + self.interaction(frame, None) + + def user_exception(self, frame, exc_info): + """This function is called if an exception occurs, + but only if we are to stop at or just below this level.""" + if self._wait_for_mainpyfile: + return + exc_type, exc_value, exc_traceback = exc_info + frame.f_locals['__exception__'] = exc_type, exc_value + + # An 'Internal StopIteration' exception is an exception debug event + # issued by the interpreter when handling a subgenerator run with + # 'yield from' or a generator controlled by a for loop. No exception has + # actually occurred in this case. The debugger uses this debug event to + # stop when the debuggee is returning from such generators. + prefix = 'Internal ' if (not exc_traceback + and exc_type is StopIteration) else '' + self.message('%s%s' % (prefix, + traceback.format_exception_only(exc_type, exc_value)[-1].strip())) + self.interaction(frame, exc_traceback) + + # General interaction function + def _cmdloop(self): + while True: + try: + # keyboard interrupts allow for an easy way to cancel + # the current command, so allow them during interactive input + self.allow_kbdint = True + self.cmdloop() + self.allow_kbdint = False + break + except KeyboardInterrupt: + self.message('--KeyboardInterrupt--') + + # Called before loop, handles display expressions + def preloop(self): + displaying = self.displaying.get(self.curframe) + if displaying: + for expr, oldvalue in displaying.items(): + newvalue = self._getval_except(expr) + # check for identity first; this prevents custom __eq__ to + # be called at every loop, and also prevents instances whose + # fields are changed to be displayed + if newvalue is not oldvalue and newvalue != oldvalue: + displaying[expr] = newvalue + self.message('display %s: %r [old: %r]' % + (expr, newvalue, oldvalue)) + + def interaction(self, frame, traceback): + # Restore the previous signal handler at the Pdb prompt. + if Pdb._previous_sigint_handler: + try: + signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) + except ValueError: # ValueError: signal only works in main thread + pass + else: + Pdb._previous_sigint_handler = None + if self.setup(frame, traceback): + # no interaction desired at this time (happens if .pdbrc contains + # a command like "continue") + self.forget() + return + self.print_stack_entry(self.stack[self.curindex]) + self._cmdloop() + self.forget() + + def displayhook(self, obj): + """Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + """ + # reproduce the behavior of the standard displayhook, not printing None + if obj is not None: + self.message(repr(obj)) + + def default(self, line): + if line[:1] == '!': line = line[1:] + locals = self.curframe_locals + globals = self.curframe.f_globals + try: + code = compile(line + '\n', '', 'single') + save_stdout = sys.stdout + save_stdin = sys.stdin + save_displayhook = sys.displayhook + try: + sys.stdin = self.stdin + sys.stdout = self.stdout + sys.displayhook = self.displayhook + exec(code, globals, locals) + finally: + sys.stdout = save_stdout + sys.stdin = save_stdin + sys.displayhook = save_displayhook + except: + self._error_exc() + + def precmd(self, line): + """Handle alias expansion and ';;' separator.""" + if not line.strip(): + return line + args = line.split() + while args[0] in self.aliases: + line = self.aliases[args[0]] + ii = 1 + for tmpArg in args[1:]: + line = line.replace("%" + str(ii), + tmpArg) + ii += 1 + line = line.replace("%*", ' '.join(args[1:])) + args = line.split() + # split into ';;' separated commands + # unless it's an alias command + if args[0] != 'alias': + marker = line.find(';;') + if marker >= 0: + # queue up everything after marker + next = line[marker+2:].lstrip() + self.cmdqueue.append(next) + line = line[:marker].rstrip() + return line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + """ + if not self.commands_defining: + return cmd.Cmd.onecmd(self, line) + else: + return self.handle_command_def(line) + + def handle_command_def(self, line): + """Handles one command line during command list definition.""" + cmd, arg, line = self.parseline(line) + if not cmd: + return + if cmd == 'silent': + self.commands_silent[self.commands_bnum] = True + return # continue to handle other cmd def in the cmd list + elif cmd == 'end': + self.cmdqueue = [] + return 1 # end of cmd list + cmdlist = self.commands[self.commands_bnum] + if arg: + cmdlist.append(cmd+' '+arg) + else: + cmdlist.append(cmd) + # Determine if we must stop + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + func = self.default + # one of the resuming commands + if func.__name__ in self.commands_resuming: + self.commands_doprompt[self.commands_bnum] = False + self.cmdqueue = [] + return 1 + return + + # interface abstraction functions + + def message(self, msg): + print(msg, file=self.stdout) + + def error(self, msg): + print('***', msg, file=self.stdout) + + # Generic completion functions. Individual complete_foo methods can be + # assigned below to one of these functions. + + def _complete_location(self, text, line, begidx, endidx): + # Complete a file/module/function location for break/tbreak/clear. + if line.strip().endswith((':', ',')): + # Here comes a line number or a condition which we can't complete. + return [] + # First, try to find matching functions (i.e. expressions). + try: + ret = self._complete_expression(text, line, begidx, endidx) + except Exception: + ret = [] + # Then, try to complete file names as well. + globs = glob.glob(glob.escape(text) + '*') + for fn in globs: + if os.path.isdir(fn): + ret.append(fn + '/') + elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): + ret.append(fn + ':') + return ret + + def _complete_bpnumber(self, text, line, begidx, endidx): + # Complete a breakpoint number. (This would be more helpful if we could + # display additional info along with the completions, such as file/line + # of the breakpoint.) + return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) + if bp is not None and str(i).startswith(text)] + + def _complete_expression(self, text, line, begidx, endidx): + # Complete an arbitrary expression. + if not self.curframe: + return [] + # Collect globals and locals. It is usually not really sensible to also + # complete builtins, and they clutter the namespace quite heavily, so we + # leave them out. + ns = {**self.curframe.f_globals, **self.curframe_locals} + if '.' in text: + # Walk an attribute chain up to the last part, similar to what + # rlcompleter does. This will bail if any of the parts are not + # simple attribute access, which is what we want. + dotted = text.split('.') + try: + obj = ns[dotted[0]] + for part in dotted[1:-1]: + obj = getattr(obj, part) + except (KeyError, AttributeError): + return [] + prefix = '.'.join(dotted[:-1]) + '.' + return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] + else: + # Complete a simple name. + return [n for n in ns.keys() if n.startswith(text)] + + # Command definitions, called by cmdloop() + # The argument is the remaining string on the command line + # Return true to exit from the command loop + + def do_commands(self, arg): + """commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + """ + if not arg: + bnum = len(bdb.Breakpoint.bpbynumber) - 1 + else: + try: + bnum = int(arg) + except: + self.error("Usage: commands [bnum]\n ...\n end") + return + self.commands_bnum = bnum + # Save old definitions for the case of a keyboard interrupt. + if bnum in self.commands: + old_command_defs = (self.commands[bnum], + self.commands_doprompt[bnum], + self.commands_silent[bnum]) + else: + old_command_defs = None + self.commands[bnum] = [] + self.commands_doprompt[bnum] = True + self.commands_silent[bnum] = False + + prompt_back = self.prompt + self.prompt = '(com) ' + self.commands_defining = True + try: + self.cmdloop() + except KeyboardInterrupt: + # Restore old definitions. + if old_command_defs: + self.commands[bnum] = old_command_defs[0] + self.commands_doprompt[bnum] = old_command_defs[1] + self.commands_silent[bnum] = old_command_defs[2] + else: + del self.commands[bnum] + del self.commands_doprompt[bnum] + del self.commands_silent[bnum] + self.error('command definition aborted, old commands restored') + finally: + self.commands_defining = False + self.prompt = prompt_back + + complete_commands = _complete_bpnumber + + def do_break(self, arg, temporary = 0): + """b(reak) [ ([filename:]lineno | function) [, condition] ] + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + """ + if not arg: + if self.breaks: # There's at least one + self.message("Num Type Disp Enb Where") + for bp in bdb.Breakpoint.bpbynumber: + if bp: + self.message(bp.bpformat()) + return + # parse arguments; comma has lowest precedence + # and cannot occur in filename + filename = None + lineno = None + cond = None + comma = arg.find(',') + if comma > 0: + # parse stuff after comma: "condition" + cond = arg[comma+1:].lstrip() + arg = arg[:comma].rstrip() + # parse stuff before comma: [filename:]lineno | function + colon = arg.rfind(':') + funcname = None + if colon >= 0: + filename = arg[:colon].rstrip() + f = self.lookupmodule(filename) + if not f: + self.error('%r not found from sys.path' % filename) + return + else: + filename = f + arg = arg[colon+1:].lstrip() + try: + lineno = int(arg) + except ValueError: + self.error('Bad lineno: %s' % arg) + return + else: + # no colon; can be lineno or function + try: + lineno = int(arg) + except ValueError: + try: + func = eval(arg, + self.curframe.f_globals, + self.curframe_locals) + except: + func = arg + try: + if hasattr(func, '__func__'): + func = func.__func__ + code = func.__code__ + #use co_name to identify the bkpt (function names + #could be aliased, but co_name is invariant) + funcname = code.co_name + lineno = code.co_firstlineno + filename = code.co_filename + except: + # last thing to try + (ok, filename, ln) = self.lineinfo(arg) + if not ok: + self.error('The specified object %r is not a function ' + 'or was not found along sys.path.' % arg) + return + funcname = ok # ok contains a function name + lineno = int(ln) + if not filename: + filename = self.defaultFile() + # Check for reasonable breakpoint + line = self.checkline(filename, lineno) + if line: + # now set the break point + err = self.set_break(filename, line, temporary, cond, funcname) + if err: + self.error(err) + else: + bp = self.get_breaks(filename, line)[-1] + self.message("Breakpoint %d at %s:%d" % + (bp.number, bp.file, bp.line)) + + # To be overridden in derived debuggers + def defaultFile(self): + """Produce a reasonable default.""" + filename = self.curframe.f_code.co_filename + if filename == '' and self.mainpyfile: + filename = self.mainpyfile + return filename + + do_b = do_break + + complete_break = _complete_location + complete_b = _complete_location + + def do_tbreak(self, arg): + """tbreak [ ([filename:]lineno | function) [, condition] ] + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + """ + self.do_break(arg, 1) + + complete_tbreak = _complete_location + + def lineinfo(self, identifier): + failed = (None, None, None) + # Input is identifier, may be in single quotes + idstring = identifier.split("'") + if len(idstring) == 1: + # not in single quotes + id = idstring[0].strip() + elif len(idstring) == 3: + # quoted + id = idstring[1].strip() + else: + return failed + if id == '': return failed + parts = id.split('.') + # Protection for derived debuggers + if parts[0] == 'self': + del parts[0] + if len(parts) == 0: + return failed + # Best first guess at file to look at + fname = self.defaultFile() + if len(parts) == 1: + item = parts[0] + else: + # More than one part. + # First is module, second is method/class + f = self.lookupmodule(parts[0]) + if f: + fname = f + item = parts[1] + answer = find_function(item, fname) + return answer or failed + + def checkline(self, filename, lineno): + """Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + """ + # this method should be callable before starting debugging, so default + # to "no globals" if there is no current frame + frame = getattr(self, 'curframe', None) + globs = frame.f_globals if frame else None + line = linecache.getline(filename, lineno, globs) + if not line: + self.message('End of file') + return 0 + line = line.strip() + # Don't allow setting breakpoint at a blank line + if (not line or (line[0] == '#') or + (line[:3] == '"""') or line[:3] == "'''"): + self.error('Blank or comment') + return 0 + return lineno + + def do_enable(self, arg): + """enable bpnumber [bpnumber ...] + Enables the breakpoints given as a space separated list of + breakpoint numbers. + """ + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.enable() + self.message('Enabled %s' % bp) + + complete_enable = _complete_bpnumber + + def do_disable(self, arg): + """disable bpnumber [bpnumber ...] + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + """ + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.disable() + self.message('Disabled %s' % bp) + + complete_disable = _complete_bpnumber + + def do_condition(self, arg): + """condition bpnumber [condition] + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + """ + args = arg.split(' ', 1) + try: + cond = args[1] + except IndexError: + cond = None + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.cond = cond + if not cond: + self.message('Breakpoint %d is now unconditional.' % bp.number) + else: + self.message('New condition set for breakpoint %d.' % bp.number) + + complete_condition = _complete_bpnumber + + def do_ignore(self, arg): + """ignore bpnumber [count] + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + """ + args = arg.split() + try: + count = int(args[1].strip()) + except: + count = 0 + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.ignore = count + if count > 0: + if count > 1: + countstr = '%d crossings' % count + else: + countstr = '1 crossing' + self.message('Will ignore next %s of breakpoint %d.' % + (countstr, bp.number)) + else: + self.message('Will stop next time breakpoint %d is reached.' + % bp.number) + + complete_ignore = _complete_bpnumber + + def do_clear(self, arg): + """cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]] + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + """ + if not arg: + try: + reply = input('Clear all breaks? ') + except EOFError: + reply = 'no' + reply = reply.strip().lower() + if reply in ('y', 'yes'): + bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] + self.clear_all_breaks() + for bp in bplist: + self.message('Deleted %s' % bp) + return + if ':' in arg: + # Make sure it works for "clear C:\foo\bar.py:12" + i = arg.rfind(':') + filename = arg[:i] + arg = arg[i+1:] + try: + lineno = int(arg) + except ValueError: + err = "Invalid line number (%s)" % arg + else: + bplist = self.get_breaks(filename, lineno)[:] + err = self.clear_break(filename, lineno) + if err: + self.error(err) + else: + for bp in bplist: + self.message('Deleted %s' % bp) + return + numberlist = arg.split() + for i in numberlist: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + self.clear_bpbynumber(i) + self.message('Deleted %s' % bp) + do_cl = do_clear # 'c' is already an abbreviation for 'continue' + + complete_clear = _complete_location + complete_cl = _complete_location + + def do_where(self, arg): + """w(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + """ + self.print_stack_trace() + do_w = do_where + do_bt = do_where + + def _select_frame(self, number): + assert 0 <= number < len(self.stack) + self.curindex = number + self.curframe = self.stack[self.curindex][0] + self.curframe_locals = self.curframe.f_locals + self.print_stack_entry(self.stack[self.curindex]) + self.lineno = None + + def do_up(self, arg): + """u(p) [count] + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + """ + if self.curindex == 0: + self.error('Oldest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = 0 + else: + newframe = max(0, self.curindex - count) + self._select_frame(newframe) + do_u = do_up + + def do_down(self, arg): + """d(own) [count] + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + """ + if self.curindex + 1 == len(self.stack): + self.error('Newest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = len(self.stack) - 1 + else: + newframe = min(len(self.stack) - 1, self.curindex + count) + self._select_frame(newframe) + do_d = do_down + + def do_until(self, arg): + """unt(il) [lineno] + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + """ + if arg: + try: + lineno = int(arg) + except ValueError: + self.error('Error in argument: %r' % arg) + return + if lineno <= self.curframe.f_lineno: + self.error('"until" line number is smaller than current ' + 'line number') + return + else: + lineno = None + self.set_until(self.curframe, lineno) + return 1 + do_unt = do_until + + def do_step(self, arg): + """s(tep) + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + """ + self.set_step() + return 1 + do_s = do_step + + def do_next(self, arg): + """n(ext) + Continue execution until the next line in the current function + is reached or it returns. + """ + self.set_next(self.curframe) + return 1 + do_n = do_next + + def do_run(self, arg): + """run [args...] + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + """ + if arg: + import shlex + argv0 = sys.argv[0:1] + try: + sys.argv = shlex.split(arg) + except ValueError as e: + self.error('Cannot run %s: %s' % (arg, e)) + return + sys.argv[:0] = argv0 + # this is caught in the main debugger loop + raise Restart + + do_restart = do_run + + def do_return(self, arg): + """r(eturn) + Continue execution until the current function returns. + """ + self.set_return(self.curframe) + return 1 + do_r = do_return + + def do_continue(self, arg): + """c(ont(inue)) + Continue execution, only stop when a breakpoint is encountered. + """ + if not self.nosigint: + try: + Pdb._previous_sigint_handler = \ + signal.signal(signal.SIGINT, self.sigint_handler) + except ValueError: + # ValueError happens when do_continue() is invoked from + # a non-main thread in which case we just continue without + # SIGINT set. Would printing a message here (once) make + # sense? + pass + self.set_continue() + return 1 + do_c = do_cont = do_continue + + def do_jump(self, arg): + """j(ump) lineno + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + """ + if self.curindex + 1 != len(self.stack): + self.error('You can only jump within the bottom frame') + return + try: + arg = int(arg) + except ValueError: + self.error("The 'jump' command requires a line number") + else: + try: + # Do the jump, fix up our copy of the stack, and display the + # new position + self.curframe.f_lineno = arg + self.stack[self.curindex] = self.stack[self.curindex][0], arg + self.print_stack_entry(self.stack[self.curindex]) + except ValueError as e: + self.error('Jump failed: %s' % e) + do_j = do_jump + + def do_debug(self, arg): + """debug code + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + """ + sys.settrace(None) + globals = self.curframe.f_globals + locals = self.curframe_locals + p = Pdb(self.completekey, self.stdin, self.stdout) + p.prompt = "(%s) " % self.prompt.strip() + self.message("ENTERING RECURSIVE DEBUGGER") + try: + sys.call_tracing(p.run, (arg, globals, locals)) + except Exception: + self._error_exc() + self.message("LEAVING RECURSIVE DEBUGGER") + sys.settrace(self.trace_dispatch) + self.lastcmd = p.lastcmd + + complete_debug = _complete_expression + + def do_quit(self, arg): + """q(uit)\nexit + Quit from the debugger. The program being executed is aborted. + """ + self._user_requested_quit = True + self.set_quit() + return 1 + + do_q = do_quit + do_exit = do_quit + + def do_EOF(self, arg): + """EOF + Handles the receipt of EOF as a command. + """ + self.message('') + self._user_requested_quit = True + self.set_quit() + return 1 + + def do_args(self, arg): + """a(rgs) + Print the argument list of the current function. + """ + co = self.curframe.f_code + dict = self.curframe_locals + n = co.co_argcount + co.co_kwonlyargcount + if co.co_flags & inspect.CO_VARARGS: n = n+1 + if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 + for i in range(n): + name = co.co_varnames[i] + if name in dict: + self.message('%s = %r' % (name, dict[name])) + else: + self.message('%s = *** undefined ***' % (name,)) + do_a = do_args + + def do_retval(self, arg): + """retval + Print the return value for the last return of a function. + """ + if '__return__' in self.curframe_locals: + self.message(repr(self.curframe_locals['__return__'])) + else: + self.error('Not yet returned!') + do_rv = do_retval + + def _getval(self, arg): + try: + return eval(arg, self.curframe.f_globals, self.curframe_locals) + except: + self._error_exc() + raise + + def _getval_except(self, arg, frame=None): + try: + if frame is None: + return eval(arg, self.curframe.f_globals, self.curframe_locals) + else: + return eval(arg, frame.f_globals, frame.f_locals) + except: + exc_info = sys.exc_info()[:2] + err = traceback.format_exception_only(*exc_info)[-1].strip() + return _rstr('** raised %s **' % err) + + def _error_exc(self): + exc_info = sys.exc_info()[:2] + self.error(traceback.format_exception_only(*exc_info)[-1].strip()) + + def _msg_val_func(self, arg, func): + try: + val = self._getval(arg) + except: + return # _getval() has displayed the error + try: + self.message(func(val)) + except: + self._error_exc() + + def do_p(self, arg): + """p expression + Print the value of the expression. + """ + self._msg_val_func(arg, repr) + + def do_pp(self, arg): + """pp expression + Pretty-print the value of the expression. + """ + self._msg_val_func(arg, pprint.pformat) + + complete_print = _complete_expression + complete_p = _complete_expression + complete_pp = _complete_expression + + def do_list(self, arg): + """l(ist) [first [,last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + """ + self.lastcmd = 'list' + last = None + if arg and arg != '.': + try: + if ',' in arg: + first, last = arg.split(',') + first = int(first.strip()) + last = int(last.strip()) + if last < first: + # assume it's a count + last = first + last + else: + first = int(arg.strip()) + first = max(1, first - 5) + except ValueError: + self.error('Error in argument: %r' % arg) + return + elif self.lineno is None or arg == '.': + first = max(1, self.curframe.f_lineno - 5) + else: + first = self.lineno + 1 + if last is None: + last = first + 10 + filename = self.curframe.f_code.co_filename + # gh-93696: stdlib frozen modules provide a useful __file__ + # this workaround can be removed with the closure of gh-89815 + if filename.startswith("

+ + + + ''' % (bgcol, fgcol, title) + if prelude: + result = result + ''' + + +''' % (bgcol, marginalia, prelude, gap) + else: + result = result + ''' +''' % (bgcol, marginalia, gap) + + return result + '\n
 
+%s
%s%s
%s
%s%s%s
' % contents + + def bigsection(self, title, *args): + """Format a section with a big heading.""" + title = '%s' % title + return self.section(title, *args) + + def preformat(self, text): + """Format literal preformatted text.""" + text = self.escape(text.expandtabs()) + return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', + ' ', ' ', '\n', '
\n') + + def multicolumn(self, list, format, cols=4): + """Format a list of items into a multi-column list.""" + result = '' + rows = (len(list)+cols-1)//cols + for col in range(cols): + result = result + '' % (100//cols) + for i in range(rows*col, rows*col+rows): + if i < len(list): + result = result + format(list[i]) + '
\n' + result = result + '' + return '%s
' % result + + def grey(self, text): return '%s' % text + + def namelink(self, name, *dicts): + """Make a link for an identifier, given name-to-URL mappings.""" + for dict in dicts: + if name in dict: + return '
%s' % (dict[name], name) + return name + + def classlink(self, object, modname): + """Make a link for a class.""" + name, module = object.__name__, sys.modules.get(object.__module__) + if hasattr(module, name) and getattr(module, name) is object: + return '%s' % ( + module.__name__, name, classname(object, modname)) + return classname(object, modname) + + def modulelink(self, object): + """Make a link for a module.""" + return '%s' % (object.__name__, object.__name__) + + def modpkglink(self, modpkginfo): + """Make a link for a module or package to display in an index.""" + name, path, ispackage, shadowed = modpkginfo + if shadowed: + return self.grey(name) + if path: + url = '%s.%s.html' % (path, name) + else: + url = '%s.html' % name + if ispackage: + text = '%s (package)' % name + else: + text = name + return '%s' % (url, text) + + def filelink(self, url, path): + """Make a link to source file.""" + return '%s' % (url, path) + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?(\w+))') + while True: + match = pattern.search(text, here) + if not match: break + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif selfdot: + # Create a link for methods like 'self.method(...)' + # and use for attributes like 'self.attr' + if text[end:end+1] == '(': + results.append('self.' + self.namelink(name, methods)) + else: + results.append('self.%s' % name) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None): + """Produce HTML for a class tree as given by inspect.getclasstree().""" + result = '' + for entry in tree: + if type(entry) is type(()): + c, bases = entry + result = result + '

' + result = result + self.classlink(c, modname) + if bases and bases != (parent,): + parents = [] + for base in bases: + parents.append(self.classlink(base, modname)) + result = result + '(' + ', '.join(parents) + ')' + result = result + '\n
' + elif type(entry) is type([]): + result = result + '
\n%s
\n' % self.formattree( + entry, modname, c) + return '
\n%s
\n' % result + + def docmodule(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a module object.""" + name = object.__name__ # ignore the passed-in name + try: + all = object.__all__ + except AttributeError: + all = None + parts = name.split('.') + links = [] + for i in range(len(parts)-1): + links.append( + '%s' % + ('.'.join(parts[:i+1]), parts[i])) + linkedname = '.'.join(links + parts[-1:]) + head = '%s' % linkedname + try: + path = inspect.getabsfile(object) + url = urllib.parse.quote(path) + filelink = self.filelink(url, path) + except TypeError: + filelink = '(built-in)' + info = [] + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + info.append('version %s' % self.escape(version)) + if hasattr(object, '__date__'): + info.append(self.escape(str(object.__date__))) + if info: + head = head + ' (%s)' % ', '.join(info) + docloc = self.getdocloc(object) + if docloc is not None: + docloc = '
Module Reference' % locals() + else: + docloc = '' + result = self.heading( + head, '#ffffff', '#7799ee', + 'index
' + filelink + docloc) + + modules = inspect.getmembers(object, inspect.ismodule) + + classes, cdict = [], {} + for key, value in inspect.getmembers(object, _isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None or + (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + cdict[key] = cdict[value] = '#' + key + for key, value in classes: + for base in value.__bases__: + key, modname = base.__name__, base.__module__ + module = sys.modules.get(modname) + if modname != name and module and hasattr(module, key): + if getattr(module, key) is base: + if not key in cdict: + cdict[key] = cdict[base] = modname + '.html#' + key + funcs, fdict = [], {} + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None or + inspect.isbuiltin(value) or inspect.getmodule(value) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + fdict[key] = '#-' + key + if inspect.isfunction(value): fdict[value] = fdict[key] + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + doc = self.markup(getdoc(object), self.preformat, fdict, cdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + if hasattr(object, '__path__'): + modpkgs = [] + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs.append((modname, name, ispkg, 0)) + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + result = result + self.bigsection( + 'Package Contents', '#ffffff', '#aa55cc', contents) + elif modules: + contents = self.multicolumn( + modules, lambda t: self.modulelink(t[1])) + result = result + self.bigsection( + 'Modules', '#ffffff', '#aa55cc', contents) + + if classes: + classlist = [value for (key, value) in classes] + contents = [ + self.formattree(inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Classes', '#ffffff', '#ee77aa', ' '.join(contents)) + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Functions', '#ffffff', '#eeaa77', ' '.join(contents)) + if data: + contents = [] + for key, value in data: + contents.append(self.document(value, key)) + result = result + self.bigsection( + 'Data', '#ffffff', '#55aa55', '
\n'.join(contents)) + if hasattr(object, '__author__'): + contents = self.markup(str(object.__author__), self.preformat) + result = result + self.bigsection( + 'Author', '#ffffff', '#7799ee', contents) + if hasattr(object, '__credits__'): + contents = self.markup(str(object.__credits__), self.preformat) + result = result + self.bigsection( + 'Credits', '#ffffff', '#7799ee', contents) + + return result + + def docclass(self, object, name=None, mod=None, funcs={}, classes={}, + *ignored): + """Produce HTML documentation for a class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + contents = [] + push = contents.append + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('
\n') + self.needone = 1 + hr = HorizontalRule() + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + hr.maybe() + push('
Method resolution order:
\n') + for base in mro: + push('
%s
\n' % self.classlink(base, + object.__module__)) + push('
\n') + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, name, mod, + funcs, classes, mdict, object)) + push('\n') + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + base = self.docother(getattr(object, name), name, mod) + doc = getdoc(value) + if not doc: + push('
%s
\n' % base) + else: + doc = self.markup(getdoc(value), self.preformat, + funcs, classes, mdict) + doc = '
%s' % doc + push('
%s%s
\n' % (base, doc)) + push('\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + mdict = {} + for key, kind, homecls, value in attrs: + mdict[key] = anchor = '#' + name + '-' + key + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + pass + try: + # The value may not be hashable (e.g., a data attr with + # a dict or list value). + mdict[value] = anchor + except TypeError: + pass + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = 'defined here' + else: + tag = 'inherited from %s' % self.classlink(thisclass, + object.__module__) + tag += ':
\n' + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill('Methods %s' % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill('Class methods %s' % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill('Static methods %s' % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors('Data descriptors %s' % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata('Data and other attributes %s' % tag, attrs, + lambda t: t[1] == 'data') + assert attrs == [] + attrs = inherited + + contents = ''.join(contents) + + if name == realname: + title = 'class %s' % ( + name, realname) + else: + title = '%s = class %s' % ( + name, name, realname) + if bases: + parents = [] + for base in bases: + parents.append(self.classlink(base, object.__module__)) + title = title + '(%s)' % ', '.join(parents) + + decl = '' + try: + signature = inspect.signature(object) + except (ValueError, TypeError): + signature = None + if signature: + argspec = str(signature) + if argspec and argspec != '()': + decl = name + self.escape(argspec) + '\n\n' + + doc = getdoc(object) + if decl: + doc = decl + (doc or '') + doc = self.markup(doc, self.preformat, funcs, classes, mdict) + doc = doc and '%s
 
' % doc + + return self.section(title, '#000000', '#ffc8d8', contents, 3, doc) + + def formatvalue(self, object): + """Format an argument default value as text.""" + return self.grey('=' + self.repr(object)) + + def docroutine(self, object, name=None, mod=None, + funcs={}, classes={}, methods={}, cl=None): + """Produce HTML documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + anchor = (cl and cl.__name__ or '') + '-' + name + note = '' + skipdocs = 0 + if _is_bound_method(object): + imclass = object.__self__.__class__ + if cl: + if imclass is not cl: + note = ' from ' + self.classlink(imclass, mod) + else: + if object.__self__ is not None: + note = ' method of %s instance' % self.classlink( + object.__self__.__class__, mod) + else: + note = ' unbound %s method' % self.classlink(imclass,mod) + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = '%s' % (anchor, realname) + else: + if cl and inspect.getattr_static(cl, realname, []) is object: + reallink = '%s' % ( + cl.__name__ + '-' + realname, realname) + skipdocs = 1 + else: + reallink = realname + title = '%s = %s' % ( + anchor, name, reallink) + argspec = None + if inspect.isroutine(object): + try: + signature = inspect.signature(object) + except (ValueError, TypeError): + signature = None + if signature: + argspec = str(signature) + if realname == '': + title = '%s lambda ' % name + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + argspec = argspec[1:-1] # remove parentheses + if not argspec: + argspec = '(...)' + + decl = asyncqualifier + title + self.escape(argspec) + (note and + self.grey('%s' % note)) + + if skipdocs: + return '
%s
\n' % decl + else: + doc = self.markup( + getdoc(object), self.preformat, funcs, classes, methods) + doc = doc and '
%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docdata(self, object, name=None, mod=None, cl=None): + """Produce html documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push('
%s
\n' % name) + doc = self.markup(getdoc(object), self.preformat) + if doc: + push('
%s
\n' % doc) + push('
\n') + + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a data object.""" + lhs = name and '%s = ' % name or '' + return lhs + self.repr(object) + + def index(self, dir, shadowed=None): + """Generate an HTML index for a directory of modules.""" + modpkgs = [] + if shadowed is None: shadowed = {} + for importer, name, ispkg in pkgutil.iter_modules([dir]): + if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): + # ignore a module if its name contains a surrogate character + continue + modpkgs.append((name, '', ispkg, name in shadowed)) + shadowed[name] = 1 + + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + return self.bigsection(dir, '#ffffff', '#ee77aa', contents) + +# -------------------------------------------- text documentation generator + +class TextRepr(Repr): + """Class for safely making a text representation of a Python object.""" + def __init__(self): + Repr.__init__(self) + self.maxlist = self.maxtuple = 20 + self.maxdict = 10 + self.maxstring = self.maxother = 100 + + def repr1(self, x, level): + if hasattr(type(x), '__name__'): + methodname = 'repr_' + '_'.join(type(x).__name__.split()) + if hasattr(self, methodname): + return getattr(self, methodname)(x, level) + return cram(stripid(repr(x)), self.maxother) + + def repr_string(self, x, level): + test = cram(x, self.maxstring) + testrepr = repr(test) + if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): + # Backslashes are only literal in the string and are never + # needed to make any special characters, so show a raw string. + return 'r' + testrepr[0] + test + testrepr[0] + return testrepr + + repr_str = repr_string + + def repr_instance(self, x, level): + try: + return cram(stripid(repr(x)), self.maxstring) + except: + return '<%s instance>' % x.__class__.__name__ + +class TextDoc(Doc): + """Formatter class for text documentation.""" + + # ------------------------------------------- text formatting utilities + + _repr_instance = TextRepr() + repr = _repr_instance.repr + + def bold(self, text): + """Format a string in bold by overstriking.""" + return ''.join(ch + '\b' + ch for ch in text) + + def indent(self, text, prefix=' '): + """Indent text by prepending a given prefix to each line.""" + if not text: return '' + lines = [prefix + line for line in text.split('\n')] + if lines: lines[-1] = lines[-1].rstrip() + return '\n'.join(lines) + + def section(self, title, contents): + """Format a section with a given heading.""" + clean_contents = self.indent(contents).rstrip() + return self.bold(title) + '\n' + clean_contents + '\n\n' + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None, prefix=''): + """Render in text a class tree as returned by inspect.getclasstree().""" + result = '' + for entry in tree: + if type(entry) is type(()): + c, bases = entry + result = result + prefix + classname(c, modname) + if bases and bases != (parent,): + parents = (classname(c, modname) for c in bases) + result = result + '(%s)' % ', '.join(parents) + result = result + '\n' + elif type(entry) is type([]): + result = result + self.formattree( + entry, modname, c, prefix + ' ') + return result + + def docmodule(self, object, name=None, mod=None): + """Produce text documentation for a given module object.""" + name = object.__name__ # ignore the passed-in name + synop, desc = splitdoc(getdoc(object)) + result = self.section('NAME', name + (synop and ' - ' + synop)) + all = getattr(object, '__all__', None) + docloc = self.getdocloc(object) + if docloc is not None: + result = result + self.section('MODULE REFERENCE', docloc + """ + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +""") + + if desc: + result = result + self.section('DESCRIPTION', desc) + + classes = [] + for key, value in inspect.getmembers(object, _isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + funcs = [] + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None or + inspect.isbuiltin(value) or inspect.getmodule(value) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + modpkgs = [] + modpkgs_names = set() + if hasattr(object, '__path__'): + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs_names.add(modname) + if ispkg: + modpkgs.append(modname + ' (package)') + else: + modpkgs.append(modname) + + modpkgs.sort() + result = result + self.section( + 'PACKAGE CONTENTS', '\n'.join(modpkgs)) + + # Detect submodules as sometimes created by C extensions + submodules = [] + for key, value in inspect.getmembers(object, inspect.ismodule): + if value.__name__.startswith(name + '.') and key not in modpkgs_names: + submodules.append(key) + if submodules: + submodules.sort() + result = result + self.section( + 'SUBMODULES', '\n'.join(submodules)) + + if classes: + classlist = [value for key, value in classes] + contents = [self.formattree( + inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name)) + result = result + self.section('CLASSES', '\n'.join(contents)) + + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name)) + result = result + self.section('FUNCTIONS', '\n'.join(contents)) + + if data: + contents = [] + for key, value in data: + contents.append(self.docother(value, key, name, maxlen=70)) + result = result + self.section('DATA', '\n'.join(contents)) + + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + result = result + self.section('VERSION', version) + if hasattr(object, '__date__'): + result = result + self.section('DATE', str(object.__date__)) + if hasattr(object, '__author__'): + result = result + self.section('AUTHOR', str(object.__author__)) + if hasattr(object, '__credits__'): + result = result + self.section('CREDITS', str(object.__credits__)) + try: + file = inspect.getabsfile(object) + except TypeError: + file = '(built-in)' + result = result + self.section('FILE', file) + return result + + def docclass(self, object, name=None, mod=None, *ignored): + """Produce text documentation for a given class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + def makename(c, m=object.__module__): + return classname(c, m) + + if name == realname: + title = 'class ' + self.bold(realname) + else: + title = self.bold(name) + ' = class ' + realname + if bases: + parents = map(makename, bases) + title = title + '(%s)' % ', '.join(parents) + + contents = [] + push = contents.append + + try: + signature = inspect.signature(object) + except (ValueError, TypeError): + signature = None + if signature: + argspec = str(signature) + if argspec and argspec != '()': + push(name + argspec + '\n') + + doc = getdoc(object) + if doc: + push(doc + '\n') + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + push("Method resolution order:") + for base in mro: + push(' ' + makename(base)) + push('') + + # List the built-in subclasses, if any: + subclasses = sorted( + (str(cls.__name__) for cls in type.__subclasses__(object) + if not cls.__name__.startswith("_") and cls.__module__ == "builtins"), + key=str.lower + ) + no_of_subclasses = len(subclasses) + MAX_SUBCLASSES_TO_DISPLAY = 4 + if subclasses: + push("Built-in subclasses:") + for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: + push(' ' + subclassname) + if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: + push(' ... and ' + + str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + + ' other subclasses') + push('') + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('-' * 70) + self.needone = 1 + hr = HorizontalRule() + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, + name, mod, object)) + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + doc = getdoc(value) + try: + obj = getattr(object, name) + except AttributeError: + obj = homecls.__dict__[name] + push(self.docother(obj, name, mod, maxlen=70, doc=doc) + + '\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = "defined here" + else: + tag = "inherited from %s" % classname(thisclass, + object.__module__) + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill("Methods %s:\n" % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill("Class methods %s:\n" % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill("Static methods %s:\n" % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, + lambda t: t[1] == 'data') + + assert attrs == [] + attrs = inherited + + contents = '\n'.join(contents) + if not contents: + return title + '\n' + return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' + + def formatvalue(self, object): + """Format an argument default value as text.""" + return '=' + self.repr(object) + + def docroutine(self, object, name=None, mod=None, cl=None): + """Produce text documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + note = '' + skipdocs = 0 + if _is_bound_method(object): + imclass = object.__self__.__class__ + if cl: + if imclass is not cl: + note = ' from ' + classname(imclass, mod) + else: + if object.__self__ is not None: + note = ' method of %s instance' % classname( + object.__self__.__class__, mod) + else: + note = ' unbound %s method' % classname(imclass,mod) + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = self.bold(realname) + else: + if cl and inspect.getattr_static(cl, realname, []) is object: + skipdocs = 1 + title = self.bold(name) + ' = ' + realname + argspec = None + + if inspect.isroutine(object): + try: + signature = inspect.signature(object) + except (ValueError, TypeError): + signature = None + if signature: + argspec = str(signature) + if realname == '': + title = self.bold(name) + ' lambda ' + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + argspec = argspec[1:-1] # remove parentheses + if not argspec: + argspec = '(...)' + decl = asyncqualifier + title + argspec + note + + if skipdocs: + return decl + '\n' + else: + doc = getdoc(object) or '' + return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') + + def docdata(self, object, name=None, mod=None, cl=None): + """Produce text documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push(self.bold(name)) + push('\n') + doc = getdoc(object) or '' + if doc: + push(self.indent(doc)) + push('\n') + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None): + """Produce text documentation for a data object.""" + repr = self.repr(object) + if maxlen: + line = (name and name + ' = ' or '') + repr + chop = maxlen - len(line) + if chop < 0: repr = repr[:chop] + '...' + line = (name and self.bold(name) + ' = ' or '') + repr + if not doc: + doc = getdoc(object) + if doc: + line += '\n' + self.indent(str(doc)) + '\n' + return line + +class _PlainTextDoc(TextDoc): + """Subclass of TextDoc which overrides string styling""" + def bold(self, text): + return text + +# --------------------------------------------------------- user interfaces + +def pager(text): + """The first time this is called, determine what kind of pager to use.""" + global pager + pager = getpager() + pager(text) + +def getpager(): + """Decide what method to use for paging through text.""" + if not hasattr(sys.stdin, "isatty"): + return plainpager + if not hasattr(sys.stdout, "isatty"): + return plainpager + if not sys.stdin.isatty() or not sys.stdout.isatty(): + return plainpager + use_pager = os.environ.get('MANPAGER') or os.environ.get('PAGER') + if use_pager: + if sys.platform == 'win32': # pipes completely broken in Windows + return lambda text: tempfilepager(plain(text), use_pager) + elif os.environ.get('TERM') in ('dumb', 'emacs'): + return lambda text: pipepager(plain(text), use_pager) + else: + return lambda text: pipepager(text, use_pager) + if os.environ.get('TERM') in ('dumb', 'emacs'): + return plainpager + if sys.platform == 'win32': + return lambda text: tempfilepager(plain(text), 'more <') + if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: + return lambda text: pipepager(text, 'less') + + import tempfile + (fd, filename) = tempfile.mkstemp() + os.close(fd) + try: + if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: + return lambda text: pipepager(text, 'more') + else: + return ttypager + finally: + os.unlink(filename) + +def plain(text): + """Remove boldface formatting from text.""" + return re.sub('.\b', '', text) + +def pipepager(text, cmd): + """Page through text by feeding it to another program.""" + import subprocess + proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, + errors='backslashreplace') + try: + with proc.stdin as pipe: + try: + pipe.write(text) + except KeyboardInterrupt: + # We've hereby abandoned whatever text hasn't been written, + # but the pager is still in control of the terminal. + pass + except OSError: + pass # Ignore broken pipes caused by quitting the pager program. + while True: + try: + proc.wait() + break + except KeyboardInterrupt: + # Ignore ctl-c like the pager itself does. Otherwise the pager is + # left running and the terminal is in raw mode and unusable. + pass + +def tempfilepager(text, cmd): + """Page through text by invoking a program on a temporary file.""" + import tempfile + with tempfile.TemporaryDirectory() as tempdir: + filename = os.path.join(tempdir, 'pydoc.out') + with open(filename, 'w', errors='backslashreplace', + encoding=os.device_encoding(0) if + sys.platform == 'win32' else None + ) as file: + file.write(text) + os.system(cmd + ' "' + filename + '"') + +def _escape_stdout(text): + # Escape non-encodable characters to avoid encoding errors later + encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8' + return text.encode(encoding, 'backslashreplace').decode(encoding) + +def ttypager(text): + """Page through text on a text terminal.""" + lines = plain(_escape_stdout(text)).split('\n') + try: + import tty + fd = sys.stdin.fileno() + old = tty.tcgetattr(fd) + tty.setcbreak(fd) + getchar = lambda: sys.stdin.read(1) + except (ImportError, AttributeError, io.UnsupportedOperation): + tty = None + getchar = lambda: sys.stdin.readline()[:-1][:1] + + try: + try: + h = int(os.environ.get('LINES', 0)) + except ValueError: + h = 0 + if h <= 1: + h = 25 + r = inc = h - 1 + sys.stdout.write('\n'.join(lines[:inc]) + '\n') + while lines[r:]: + sys.stdout.write('-- more --') + sys.stdout.flush() + c = getchar() + + if c in ('q', 'Q'): + sys.stdout.write('\r \r') + break + elif c in ('\r', '\n'): + sys.stdout.write('\r \r' + lines[r] + '\n') + r = r + 1 + continue + if c in ('b', 'B', '\x1b'): + r = r - inc - inc + if r < 0: r = 0 + sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n') + r = r + inc + + finally: + if tty: + tty.tcsetattr(fd, tty.TCSAFLUSH, old) + +def plainpager(text): + """Simply print unformatted text. This is the ultimate fallback.""" + sys.stdout.write(plain(_escape_stdout(text))) + +def describe(thing): + """Produce a short description of the given thing.""" + if inspect.ismodule(thing): + if thing.__name__ in sys.builtin_module_names: + return 'built-in module ' + thing.__name__ + if hasattr(thing, '__path__'): + return 'package ' + thing.__name__ + else: + return 'module ' + thing.__name__ + if inspect.isbuiltin(thing): + return 'built-in function ' + thing.__name__ + if inspect.isgetsetdescriptor(thing): + return 'getset descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if inspect.ismemberdescriptor(thing): + return 'member descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if _isclass(thing): + return 'class ' + thing.__name__ + if inspect.isfunction(thing): + return 'function ' + thing.__name__ + if inspect.ismethod(thing): + return 'method ' + thing.__name__ + return type(thing).__name__ + +def locate(path, forceload=0): + """Locate an object by name or dotted path, importing as necessary.""" + parts = [part for part in path.split('.') if part] + module, n = None, 0 + while n < len(parts): + nextmodule = safeimport('.'.join(parts[:n+1]), forceload) + if nextmodule: module, n = nextmodule, n + 1 + else: break + if module: + object = module + else: + object = builtins + for part in parts[n:]: + try: + object = getattr(object, part) + except AttributeError: + return None + return object + +# --------------------------------------- interactive interpreter interface + +text = TextDoc() +plaintext = _PlainTextDoc() +html = HTMLDoc() + +def resolve(thing, forceload=0): + """Given an object or a path to an object, get the object and its name.""" + if isinstance(thing, str): + object = locate(thing, forceload) + if object is None: + raise ImportError('''\ +No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.''' % thing) + return object, thing + else: + name = getattr(thing, '__name__', None) + return thing, name if isinstance(name, str) else None + +def render_doc(thing, title='Python Library Documentation: %s', forceload=0, + renderer=None): + """Render text documentation, given an object or a path to an object.""" + if renderer is None: + renderer = text + object, name = resolve(thing, forceload) + desc = describe(object) + module = inspect.getmodule(object) + if name and '.' in name: + desc += ' in ' + name[:name.rfind('.')] + elif module and module is not object: + desc += ' in module ' + module.__name__ + + if not (inspect.ismodule(object) or + _isclass(object) or + inspect.isroutine(object) or + inspect.isdatadescriptor(object) or + _getdoc(object)): + # If the passed object is a piece of data or an instance, + # document its available methods instead of its value. + if hasattr(object, '__origin__'): + object = object.__origin__ + else: + object = type(object) + desc += ' object' + return title % desc + '\n\n' + renderer.document(object, name) + +def doc(thing, title='Python Library Documentation: %s', forceload=0, + output=None): + """Display text documentation, given an object or a path to an object.""" + try: + if output is None: + pager(render_doc(thing, title, forceload)) + else: + output.write(render_doc(thing, title, forceload, plaintext)) + except (ImportError, ErrorDuringImport) as value: + print(value) + +def writedoc(thing, forceload=0): + """Write HTML documentation to a file in the current directory.""" + try: + object, name = resolve(thing, forceload) + page = html.page(describe(object), html.document(object, name)) + with open(name + '.html', 'w', encoding='utf-8') as file: + file.write(page) + print('wrote', name + '.html') + except (ImportError, ErrorDuringImport) as value: + print(value) + +def writedocs(dir, pkgpath='', done=None): + """Write out HTML documentation for all modules in a directory tree.""" + if done is None: done = {} + for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): + writedoc(modname) + return + +class Helper: + + # These dictionaries map a topic name to either an alias, or a tuple + # (label, seealso-items). The "label" is the label of the corresponding + # section in the .rst file under Doc/ and an index into the dictionary + # in pydoc_data/topics.py. + # + # CAUTION: if you change one of these dictionaries, be sure to adapt the + # list of needed labels in Doc/tools/extensions/pyspecific.py and + # regenerate the pydoc_data/topics.py file by running + # make pydoc-topics + # in Doc/ and copying the output file into the Lib/ directory. + + keywords = { + 'False': '', + 'None': '', + 'True': '', + 'and': 'BOOLEAN', + 'as': 'with', + 'assert': ('assert', ''), + 'async': ('async', ''), + 'await': ('await', ''), + 'break': ('break', 'while for'), + 'class': ('class', 'CLASSES SPECIALMETHODS'), + 'continue': ('continue', 'while for'), + 'def': ('function', ''), + 'del': ('del', 'BASICMETHODS'), + 'elif': 'if', + 'else': ('else', 'while for'), + 'except': 'try', + 'finally': 'try', + 'for': ('for', 'break continue while'), + 'from': 'import', + 'global': ('global', 'nonlocal NAMESPACES'), + 'if': ('if', 'TRUTHVALUE'), + 'import': ('import', 'MODULES'), + 'in': ('in', 'SEQUENCEMETHODS'), + 'is': 'COMPARISON', + 'lambda': ('lambda', 'FUNCTIONS'), + 'nonlocal': ('nonlocal', 'global NAMESPACES'), + 'not': 'BOOLEAN', + 'or': 'BOOLEAN', + 'pass': ('pass', ''), + 'raise': ('raise', 'EXCEPTIONS'), + 'return': ('return', 'FUNCTIONS'), + 'try': ('try', 'EXCEPTIONS'), + 'while': ('while', 'break continue if TRUTHVALUE'), + 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), + 'yield': ('yield', ''), + } + # Either add symbols to this dictionary or to the symbols dictionary + # directly: Whichever is easier. They are merged later. + _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')] + _symbols_inverse = { + 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes), + 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', + '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), + 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), + 'UNARY' : ('-', '~'), + 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', + '^=', '<<=', '>>=', '**=', '//='), + 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), + 'COMPLEX' : ('j', 'J') + } + symbols = { + '%': 'OPERATORS FORMATTING', + '**': 'POWER', + ',': 'TUPLES LISTS FUNCTIONS', + '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', + '...': 'ELLIPSIS', + ':': 'SLICINGS DICTIONARYLITERALS', + '@': 'def class', + '\\': 'STRINGS', + '_': 'PRIVATENAMES', + '__': 'PRIVATENAMES SPECIALMETHODS', + '`': 'BACKQUOTES', + '(': 'TUPLES FUNCTIONS CALLS', + ')': 'TUPLES FUNCTIONS CALLS', + '[': 'LISTS SUBSCRIPTS SLICINGS', + ']': 'LISTS SUBSCRIPTS SLICINGS' + } + for topic, symbols_ in _symbols_inverse.items(): + for symbol in symbols_: + topics = symbols.get(symbol, topic) + if topic not in topics: + topics = topics + ' ' + topic + symbols[symbol] = topics + + topics = { + 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' + 'FUNCTIONS CLASSES MODULES FILES inspect'), + 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), + 'FORMATTING': ('formatstrings', 'OPERATORS'), + 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), + 'INTEGER': ('integers', 'int range'), + 'FLOAT': ('floating', 'float math'), + 'COMPLEX': ('imaginary', 'complex cmath'), + 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), + 'MAPPINGS': 'DICTIONARIES', + 'FUNCTIONS': ('typesfunctions', 'def TYPES'), + 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), + 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), + 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), + 'FRAMEOBJECTS': 'TYPES', + 'TRACEBACKS': 'TYPES', + 'NONE': ('bltin-null-object', ''), + 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), + 'SPECIALATTRIBUTES': ('specialattrs', ''), + 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), + 'MODULES': ('typesmodules', 'import'), + 'PACKAGES': 'import', + 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' + 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' + 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' + 'LISTS DICTIONARIES'), + 'OPERATORS': 'EXPRESSIONS', + 'PRECEDENCE': 'EXPRESSIONS', + 'OBJECTS': ('objects', 'TYPES'), + 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' + 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' + 'NUMBERMETHODS CLASSES'), + 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), + 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), + 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), + 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' + 'SPECIALMETHODS'), + 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), + 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' + 'SPECIALMETHODS'), + 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), + 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), + 'DYNAMICFEATURES': ('dynamic-features', ''), + 'SCOPING': 'NAMESPACES', + 'FRAMES': 'NAMESPACES', + 'EXCEPTIONS': ('exceptions', 'try except finally raise'), + 'CONVERSIONS': ('conversions', ''), + 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), + 'SPECIALIDENTIFIERS': ('id-classes', ''), + 'PRIVATENAMES': ('atom-identifiers', ''), + 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' + 'LISTLITERALS DICTIONARYLITERALS'), + 'TUPLES': 'SEQUENCES', + 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), + 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), + 'LISTLITERALS': ('lists', 'LISTS LITERALS'), + 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), + 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), + 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), + 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), + 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), + 'CALLS': ('calls', 'EXPRESSIONS'), + 'POWER': ('power', 'EXPRESSIONS'), + 'UNARY': ('unary', 'EXPRESSIONS'), + 'BINARY': ('binary', 'EXPRESSIONS'), + 'SHIFTING': ('shifting', 'EXPRESSIONS'), + 'BITWISE': ('bitwise', 'EXPRESSIONS'), + 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), + 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), + 'ASSERTION': 'assert', + 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), + 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), + 'DELETION': 'del', + 'RETURNING': 'return', + 'IMPORTING': 'import', + 'CONDITIONAL': 'if', + 'LOOPING': ('compound', 'for while break continue'), + 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), + 'DEBUGGING': ('debugger', 'pdb'), + 'CONTEXTMANAGERS': ('context-managers', 'with'), + } + + def __init__(self, input=None, output=None): + self._input = input + self._output = output + + @property + def input(self): + return self._input or sys.stdin + + @property + def output(self): + return self._output or sys.stdout + + def __repr__(self): + if inspect.stack()[1][3] == '?': + self() + return '' + return '<%s.%s instance>' % (self.__class__.__module__, + self.__class__.__qualname__) + + _GoInteractive = object() + def __call__(self, request=_GoInteractive): + if request is not self._GoInteractive: + self.help(request) + else: + self.intro() + self.interact() + self.output.write(''' +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +''') + + def interact(self): + self.output.write('\n') + while True: + try: + request = self.getline('help> ') + if not request: break + except (KeyboardInterrupt, EOFError): + break + request = request.strip() + + # Make sure significant trailing quoting marks of literals don't + # get deleted while cleaning input + if (len(request) > 2 and request[0] == request[-1] in ("'", '"') + and request[0] not in request[1:-1]): + request = request[1:-1] + if request.lower() in ('q', 'quit'): break + if request == 'help': + self.intro() + else: + self.help(request) + + def getline(self, prompt): + """Read one line, using input() when appropriate.""" + if self.input is sys.stdin: + return input(prompt) + else: + self.output.write(prompt) + self.output.flush() + return self.input.readline() + + def help(self, request): + if type(request) is type(''): + request = request.strip() + if request == 'keywords': self.listkeywords() + elif request == 'symbols': self.listsymbols() + elif request == 'topics': self.listtopics() + elif request == 'modules': self.listmodules() + elif request[:8] == 'modules ': + self.listmodules(request.split()[1]) + elif request in self.symbols: self.showsymbol(request) + elif request in ['True', 'False', 'None']: + # special case these keywords since they are objects too + doc(eval(request), 'Help on %s:') + elif request in self.keywords: self.showtopic(request) + elif request in self.topics: self.showtopic(request) + elif request: doc(request, 'Help on %s:', output=self._output) + else: doc(str, 'Help on %s:', output=self._output) + elif isinstance(request, Helper): self() + else: doc(request, 'Help on %s:', output=self._output) + self.output.write('\n') + + def intro(self): + self.output.write(''' +Welcome to Python {0}'s help utility! + +If this is your first time using Python, you should definitely check out +the tutorial on the internet at https://docs.python.org/{0}/tutorial/. + +Enter the name of any module, keyword, or topic to get help on writing +Python programs and using Python modules. To quit this help utility and +return to the interpreter, just type "quit". + +To get a list of available modules, keywords, symbols, or topics, type +"modules", "keywords", "symbols", or "topics". Each module also comes +with a one-line summary of what it does; to list the modules whose name +or summary contain a given string such as "spam", type "modules spam". +'''.format('%d.%d' % sys.version_info[:2])) + + def list(self, items, columns=4, width=80): + items = list(sorted(items)) + colw = width // columns + rows = (len(items) + columns - 1) // columns + for row in range(rows): + for col in range(columns): + i = col * rows + row + if i < len(items): + self.output.write(items[i]) + if col < columns - 1: + self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) + self.output.write('\n') + + def listkeywords(self): + self.output.write(''' +Here is a list of the Python keywords. Enter any keyword to get more help. + +''') + self.list(self.keywords.keys()) + + def listsymbols(self): + self.output.write(''' +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + +''') + self.list(self.symbols.keys()) + + def listtopics(self): + self.output.write(''' +Here is a list of available topics. Enter any topic name to get more help. + +''') + self.list(self.topics.keys()) + + def showtopic(self, topic, more_xrefs=''): + try: + import pydoc_data.topics + except ImportError: + self.output.write(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''') + return + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + if type(target) is type(''): + return self.showtopic(target, more_xrefs) + + label, xrefs = target + try: + doc = pydoc_data.topics.topics[label] + except KeyError: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + doc = doc.strip() + '\n' + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + if xrefs: + import textwrap + text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n' + wrapped_text = textwrap.wrap(text, 72) + doc += '\n%s\n' % '\n'.join(wrapped_text) + pager(doc) + + def _gettopic(self, topic, more_xrefs=''): + """Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + """ + try: + import pydoc_data.topics + except ImportError: + return(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''' , '') + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + raise ValueError('could not find topic') + if isinstance(target, str): + return self._gettopic(target, more_xrefs) + label, xrefs = target + doc = pydoc_data.topics.topics[label] + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + return doc, xrefs + + def showsymbol(self, symbol): + target = self.symbols[symbol] + topic, _, xrefs = target.partition(' ') + self.showtopic(topic, xrefs) + + def listmodules(self, key=''): + if key: + self.output.write(''' +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +'''.format(key)) + apropos(key) + else: + self.output.write(''' +Please wait a moment while I gather a list of all available modules... + +''') + modules = {} + def callback(path, modname, desc, modules=modules): + if modname and modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + if modname.find('.') < 0: + modules[modname] = 1 + def onerror(modname): + callback(None, modname, None) + ModuleScanner().run(callback, onerror=onerror) + self.list(modules.keys()) + self.output.write(''' +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +''') + +help = Helper() + +class ModuleScanner: + """An interruptible scanner that searches module synopses.""" + + def run(self, callback, key=None, completer=None, onerror=None): + if key: key = key.lower() + self.quit = False + seen = {} + + for modname in sys.builtin_module_names: + if modname != '__main__': + seen[modname] = 1 + if key is None: + callback(None, modname, '') + else: + name = __import__(modname).__doc__ or '' + desc = name.split('\n')[0] + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(None, modname, desc) + + for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): + if self.quit: + break + + if key is None: + callback(None, modname, '') + else: + try: + spec = pkgutil._get_spec(importer, modname) + except SyntaxError: + # raised by tests for bad coding cookies or BOM + continue + loader = spec.loader + if hasattr(loader, 'get_source'): + try: + source = loader.get_source(modname) + except Exception: + if onerror: + onerror(modname) + continue + desc = source_synopsis(io.StringIO(source)) or '' + if hasattr(loader, 'get_filename'): + path = loader.get_filename(modname) + else: + path = None + else: + try: + module = importlib._bootstrap._load(spec) + except ImportError: + if onerror: + onerror(modname) + continue + desc = module.__doc__.splitlines()[0] if module.__doc__ else '' + path = getattr(module,'__file__',None) + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(path, modname, desc) + + if completer: + completer() + +def apropos(key): + """Print all the one-line module summaries that contain a substring.""" + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + print(modname, desc and '- ' + desc) + def onerror(modname): + pass + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + ModuleScanner().run(callback, key, onerror=onerror) + +# --------------------------------------- enhanced web browser interface + +def _start_server(urlhandler, hostname, port): + """Start an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + """ + import http.server + import email.message + import select + import threading + + class DocHandler(http.server.BaseHTTPRequestHandler): + + def do_GET(self): + """Process a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + """ + if self.path.endswith('.css'): + content_type = 'text/css' + else: + content_type = 'text/html' + self.send_response(200) + self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) + self.end_headers() + self.wfile.write(self.urlhandler( + self.path, content_type).encode('utf-8')) + + def log_message(self, *args): + # Don't log messages. + pass + + class DocServer(http.server.HTTPServer): + + def __init__(self, host, port, callback): + self.host = host + self.address = (self.host, port) + self.callback = callback + self.base.__init__(self, self.address, self.handler) + self.quit = False + + def serve_until_quit(self): + while not self.quit: + rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) + if rd: + self.handle_request() + self.server_close() + + def server_activate(self): + self.base.server_activate(self) + if self.callback: + self.callback(self) + + class ServerThread(threading.Thread): + + def __init__(self, urlhandler, host, port): + self.urlhandler = urlhandler + self.host = host + self.port = int(port) + threading.Thread.__init__(self) + self.serving = False + self.error = None + + def run(self): + """Start the server.""" + try: + DocServer.base = http.server.HTTPServer + DocServer.handler = DocHandler + DocHandler.MessageClass = email.message.Message + DocHandler.urlhandler = staticmethod(self.urlhandler) + docsvr = DocServer(self.host, self.port, self.ready) + self.docserver = docsvr + docsvr.serve_until_quit() + except Exception as e: + self.error = e + + def ready(self, server): + self.serving = True + self.host = server.host + self.port = server.server_port + self.url = 'http://%s:%d/' % (self.host, self.port) + + def stop(self): + """Stop the server and this thread nicely""" + self.docserver.quit = True + self.join() + # explicitly break a reference cycle: DocServer.callback + # has indirectly a reference to ServerThread. + self.docserver = None + self.serving = False + self.url = None + + thread = ServerThread(urlhandler, hostname, port) + thread.start() + # Wait until thread.serving is True to make sure we are + # really up before returning. + while not thread.error and not thread.serving: + time.sleep(.01) + return thread + + +def _url_handler(url, content_type="text/html"): + """The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + """ + class _HTMLDoc(HTMLDoc): + + def page(self, title, contents): + """Format an HTML page.""" + css_path = "pydoc_data/_pydoc.css" + css_link = ( + '' % + css_path) + return '''\ + +Pydoc: %s + +%s%s
%s
+''' % (title, css_link, html_navbar(), contents) + + + html = _HTMLDoc() + + def html_navbar(): + version = html.escape("%s [%s, %s]" % (platform.python_version(), + platform.python_build()[0], + platform.python_compiler())) + return """ +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ """ % (version, html.escape(platform.platform(terse=True))) + + def html_index(): + """Module Index page.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'Index of Modules', + '#ffffff', '#7799ee') + names = [name for name in sys.builtin_module_names + if name != '__main__'] + contents = html.multicolumn(names, bltinlink) + contents = [heading, '

' + html.bigsection( + 'Built-in Modules', '#ffffff', '#ee77aa', contents)] + + seen = {} + for dir in sys.path: + contents.append(html.index(dir, seen)) + + contents.append( + '

pydoc by Ka-Ping Yee' + '<ping@lfw.org>') + return 'Index of Modules', ''.join(contents) + + def html_search(key): + """Search results page.""" + # scan for modules + search_result = [] + + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + search_result.append((modname, desc and '- ' + desc)) + + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + def onerror(modname): + pass + ModuleScanner().run(callback, key, onerror=onerror) + + # format page + def bltinlink(name): + return '%s' % (name, name) + + results = [] + heading = html.heading( + 'Search Results', + '#ffffff', '#7799ee') + for name, desc in search_result: + results.append(bltinlink(name) + desc) + contents = heading + html.bigsection( + 'key = %s' % key, '#ffffff', '#ee77aa', '
'.join(results)) + return 'Search Results', contents + + def html_topics(): + """Index of topic texts available.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'INDEX', + '#ffffff', '#7799ee') + names = sorted(Helper.topics.keys()) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Topics', '#ffffff', '#ee77aa', contents) + return 'Topics', contents + + def html_keywords(): + """Index of keywords.""" + heading = html.heading( + 'INDEX', + '#ffffff', '#7799ee') + names = sorted(Helper.keywords.keys()) + + def bltinlink(name): + return '%s' % (name, name) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Keywords', '#ffffff', '#ee77aa', contents) + return 'Keywords', contents + + def html_topicpage(topic): + """Topic or keyword help page.""" + buf = io.StringIO() + htmlhelp = Helper(buf, buf) + contents, xrefs = htmlhelp._gettopic(topic) + if topic in htmlhelp.keywords: + title = 'KEYWORD' + else: + title = 'TOPIC' + heading = html.heading( + '%s' % title, + '#ffffff', '#7799ee') + contents = '

%s
' % html.markup(contents) + contents = html.bigsection(topic , '#ffffff','#ee77aa', contents) + if xrefs: + xrefs = sorted(xrefs.split()) + + def bltinlink(name): + return '%s' % (name, name) + + xrefs = html.multicolumn(xrefs, bltinlink) + xrefs = html.section('Related help topics: ', + '#ffffff', '#ee77aa', xrefs) + return ('%s %s' % (title, topic), + ''.join((heading, contents, xrefs))) + + def html_getobj(url): + obj = locate(url, forceload=1) + if obj is None and url != 'None': + raise ValueError('could not find object') + title = describe(obj) + content = html.document(obj, url) + return title, content + + def html_error(url, exc): + heading = html.heading( + 'Error', + '#ffffff', '#7799ee') + contents = '
'.join(html.escape(line) for line in + format_exception_only(type(exc), exc)) + contents = heading + html.bigsection(url, '#ffffff', '#bb0000', + contents) + return "Error - %s" % url, contents + + def get_html_page(url): + """Generate an HTML page for url.""" + complete_url = url + if url.endswith('.html'): + url = url[:-5] + try: + if url in ("", "index"): + title, content = html_index() + elif url == "topics": + title, content = html_topics() + elif url == "keywords": + title, content = html_keywords() + elif '=' in url: + op, _, url = url.partition('=') + if op == "search?key": + title, content = html_search(url) + elif op == "topic?key": + # try topics first, then objects. + try: + title, content = html_topicpage(url) + except ValueError: + title, content = html_getobj(url) + elif op == "get?key": + # try objects first, then topics. + if url in ("", "index"): + title, content = html_index() + else: + try: + title, content = html_getobj(url) + except ValueError: + title, content = html_topicpage(url) + else: + raise ValueError('bad pydoc url') + else: + title, content = html_getobj(url) + except Exception as exc: + # Catch any errors and display them in an error page. + title, content = html_error(complete_url, exc) + return html.page(title, content) + + if url.startswith('/'): + url = url[1:] + if content_type == 'text/css': + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, url) + with open(css_path) as fp: + return ''.join(fp.readlines()) + elif content_type == 'text/html': + return get_html_page(url) + # Errors outside the url handler are caught by the server. + raise TypeError('unknown content type %r for url %s' % (content_type, url)) + + +def browse(port=0, *, open_browser=True, hostname='localhost'): + """Start the enhanced pydoc web server and open a web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + """ + import webbrowser + serverthread = _start_server(_url_handler, hostname, port) + if serverthread.error: + print(serverthread.error) + return + if serverthread.serving: + server_help_msg = 'Server commands: [b]rowser, [q]uit' + if open_browser: + webbrowser.open(serverthread.url) + try: + print('Server ready at', serverthread.url) + print(server_help_msg) + while serverthread.serving: + cmd = input('server> ') + cmd = cmd.lower() + if cmd == 'q': + break + elif cmd == 'b': + webbrowser.open(serverthread.url) + else: + print(server_help_msg) + except (KeyboardInterrupt, EOFError): + print() + finally: + if serverthread.serving: + serverthread.stop() + print('Server stopped') + + +# -------------------------------------------------- command-line interface + +def ispath(x): + return isinstance(x, str) and x.find(os.sep) >= 0 + +def _get_revised_path(given_path, argv0): + """Ensures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + """ + # Scripts may get the current directory in their path by default if they're + # run with the -m switch, or directly from the current directory. + # The interactive prompt also allows imports from the current directory. + + # Accordingly, if the current directory is already present, don't make + # any changes to the given_path + if '' in given_path or os.curdir in given_path or os.getcwd() in given_path: + return None + + # Otherwise, add the current directory to the given path, and remove the + # script directory (as long as the latter isn't also pydoc's directory. + stdlib_dir = os.path.dirname(__file__) + script_dir = os.path.dirname(argv0) + revised_path = given_path.copy() + if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir): + revised_path.remove(script_dir) + revised_path.insert(0, os.getcwd()) + return revised_path + + +# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself +def _adjust_cli_sys_path(): + """Ensures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + """ + revised_path = _get_revised_path(sys.path, sys.argv[0]) + if revised_path is not None: + sys.path[:] = revised_path + + +def cli(): + """Command-line interface (looks at sys.argv to decide what to do).""" + import getopt + class BadUsage(Exception): pass + + _adjust_cli_sys_path() + + try: + opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w') + writing = False + start_server = False + open_browser = False + port = 0 + hostname = 'localhost' + for opt, val in opts: + if opt == '-b': + start_server = True + open_browser = True + if opt == '-k': + apropos(val) + return + if opt == '-p': + start_server = True + port = val + if opt == '-w': + writing = True + if opt == '-n': + start_server = True + hostname = val + + if start_server: + browse(port, hostname=hostname, open_browser=open_browser) + return + + if not args: raise BadUsage + for arg in args: + if ispath(arg) and not os.path.exists(arg): + print('file %r does not exist' % arg) + break + try: + if ispath(arg) and os.path.isfile(arg): + arg = importfile(arg) + if writing: + if ispath(arg) and os.path.isdir(arg): + writedocs(arg) + else: + writedoc(arg) + else: + help.help(arg) + except ErrorDuringImport as value: + print(value) + + except (getopt.error, BadUsage): + cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0] + print("""pydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +""".format(cmd=cmd, sep=os.sep)) + +if __name__ == '__main__': + cli() diff --git a/evalkit_cambrian/lib/python3.10/random.py b/evalkit_cambrian/lib/python3.10/random.py new file mode 100644 index 0000000000000000000000000000000000000000..1310a2d9d0e07104ee8b67a0efc2e004bfe62277 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/random.py @@ -0,0 +1,930 @@ +"""Random variable generators. + + bytes + ----- + uniform bytes (values between 0 and 255) + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +from warnings import warn as _warn +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from math import tau as TWOPI, floor as _floor, isfinite as _isfinite +from os import urandom as _urandom +from _collections_abc import Set as _Set, Sequence as _Sequence +from operator import index as _index +from itertools import accumulate as _accumulate, repeat as _repeat +from bisect import bisect as _bisect +import os as _os +import _random + +try: + # hashlib is pretty heavy to load, try lean internal module first + from _sha512 import sha512 as _sha512 +except ImportError: + # fallback to official implementation + from hashlib import sha512 as _sha512 + +__all__ = [ + "Random", + "SystemRandom", + "betavariate", + "choice", + "choices", + "expovariate", + "gammavariate", + "gauss", + "getrandbits", + "getstate", + "lognormvariate", + "normalvariate", + "paretovariate", + "randbytes", + "randint", + "random", + "randrange", + "sample", + "seed", + "setstate", + "shuffle", + "triangular", + "uniform", + "vonmisesvariate", + "weibullvariate", +] + +NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2 ** -BPF +_ONE = 1 + + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None, version=2): + """Initialize internal state from a seed. + + The only supported seed types are None, int, float, + str, bytes, and bytearray. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + """ + + if version == 1 and isinstance(a, (str, bytes)): + a = a.decode('latin-1') if isinstance(a, bytes) else a + x = ord(a[0]) << 7 if a else 0 + for c in map(ord, a): + x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF + x ^= len(a) + a = -2 if x == -1 else x + + elif version == 2 and isinstance(a, (str, bytes, bytearray)): + if isinstance(a, str): + a = a.encode() + a = int.from_bytes(a + _sha512(a).digest(), 'big') + + elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)): + _warn('Seeding based on hashing is deprecated\n' + 'since Python 3.9 and will be removed in a subsequent ' + 'version. The only \n' + 'supported seed types are: None, ' + 'int, float, str, bytes, and bytearray.', + DeprecationWarning, 2) + + super().seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super().getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super().setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple(x % (2 ** 32) for x in internalstate) + except ValueError as e: + raise TypeError from e + super().setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + + + ## ------------------------------------------------------- + ## ---- Methods below this point do not need to be overridden or extended + ## ---- when subclassing for the purpose of using a different core generator. + + + ## -------------------- pickle support ------------------- + + # Issue 17489: Since __reduce__ was defined to fix #759889 this is no + # longer called; we leave it here because it has been here since random was + # rewritten back in 2001 and why risk breaking something. + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + + + ## ---- internal support method for evenly distributed integers ---- + + def __init_subclass__(cls, /, **kwargs): + """Control how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + """ + + for c in cls.__mro__: + if '_randbelow' in c.__dict__: + # just inherit it + break + if 'getrandbits' in c.__dict__: + cls._randbelow = cls._randbelow_with_getrandbits + break + if 'random' in c.__dict__: + cls._randbelow = cls._randbelow_without_getrandbits + break + + def _randbelow_with_getrandbits(self, n): + "Return a random int in the range [0,n). Returns 0 if n==0." + + if not n: + return 0 + getrandbits = self.getrandbits + k = n.bit_length() # don't use (n-1) here because n can be 1 + r = getrandbits(k) # 0 <= r < 2**k + while r >= n: + r = getrandbits(k) + return r + + def _randbelow_without_getrandbits(self, n, maxsize=1<= maxsize: + _warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large.\n" + "To remove the range limitation, add a getrandbits() method.") + return _floor(random() * n) + if n == 0: + return 0 + rem = maxsize % n + limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 + r = random() + while r >= limit: + r = random() + return _floor(r * maxsize) % n + + _randbelow = _randbelow_with_getrandbits + + + ## -------------------------------------------------------- + ## ---- Methods below this point generate custom distributions + ## ---- based on the methods defined above. They do not + ## ---- directly touch the underlying generator and only + ## ---- access randomness through the methods: random(), + ## ---- getrandbits(), or _randbelow(). + + + ## -------------------- bytes methods --------------------- + + def randbytes(self, n): + """Generate n random bytes.""" + return self.getrandbits(n * 8).to_bytes(n, 'little') + + + ## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=_ONE): + """Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + """ + + # This code is a bit messy to make it fast for the + # common case while still doing adequate error checking. + try: + istart = _index(start) + except TypeError: + istart = int(start) + if istart != start: + _warn('randrange() will raise TypeError in the future', + DeprecationWarning, 2) + raise ValueError("non-integer arg 1 for randrange()") + _warn('non-integer arguments to randrange() have been deprecated ' + 'since Python 3.10 and will be removed in a subsequent ' + 'version', + DeprecationWarning, 2) + if stop is None: + # We don't check for "step != 1" because it hasn't been + # type checked and converted to an integer yet. + if step is not _ONE: + raise TypeError('Missing a non-None stop argument') + if istart > 0: + return self._randbelow(istart) + raise ValueError("empty range for randrange()") + + # stop argument supplied. + try: + istop = _index(stop) + except TypeError: + istop = int(stop) + if istop != stop: + _warn('randrange() will raise TypeError in the future', + DeprecationWarning, 2) + raise ValueError("non-integer stop for randrange()") + _warn('non-integer arguments to randrange() have been deprecated ' + 'since Python 3.10 and will be removed in a subsequent ' + 'version', + DeprecationWarning, 2) + width = istop - istart + try: + istep = _index(step) + except TypeError: + istep = int(step) + if istep != step: + _warn('randrange() will raise TypeError in the future', + DeprecationWarning, 2) + raise ValueError("non-integer step for randrange()") + _warn('non-integer arguments to randrange() have been deprecated ' + 'since Python 3.10 and will be removed in a subsequent ' + 'version', + DeprecationWarning, 2) + # Fast path. + if istep == 1: + if width > 0: + return istart + self._randbelow(width) + raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width)) + + # Non-unit step argument supplied. + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError("zero step for randrange()") + if n <= 0: + raise ValueError("empty range for randrange()") + return istart + istep * self._randbelow(n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + + return self.randrange(a, b+1) + + + ## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + # raises IndexError if seq is empty + return seq[self._randbelow(len(seq))] + + def shuffle(self, x, random=None): + """Shuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + """ + + if random is None: + randbelow = self._randbelow + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = randbelow(i + 1) + x[i], x[j] = x[j], x[i] + else: + _warn('The *random* parameter to shuffle() has been deprecated\n' + 'since Python 3.9 and will be removed in a subsequent ' + 'version.', + DeprecationWarning, 2) + floor = _floor + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = floor(random() * (i + 1)) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k, *, counts=None): + """Chooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + Repeated elements can be specified one at a time or with the optional + counts parameter. For example: + + sample(['red', 'blue'], counts=[4, 2], k=5) + + is equivalent to: + + sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5) + + To choose a sample from a range of integers, use range() for the + population argument. This is especially fast and space efficient + for sampling from a large population: + + sample(range(10000000), 60) + + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + # The number of calls to _randbelow() is kept at or near k, the + # theoretical minimum. This is important because running time + # is dominated by _randbelow() and because it extracts the + # least entropy from the underlying random number generators. + + # Memory requirements are kept to the smaller of a k-length + # set or an n-length list. + + # There are other sampling algorithms that do not require + # auxiliary memory, but they were rejected because they made + # too many calls to _randbelow(), making them slower and + # causing them to eat more entropy than necessary. + + if not isinstance(population, _Sequence): + if isinstance(population, _Set): + _warn('Sampling from a set deprecated\n' + 'since Python 3.9 and will be removed in a subsequent version.', + DeprecationWarning, 2) + population = tuple(population) + else: + raise TypeError("Population must be a sequence. For dicts or sets, use sorted(d).") + n = len(population) + if counts is not None: + cum_counts = list(_accumulate(counts)) + if len(cum_counts) != n: + raise ValueError('The number of counts does not match the population') + total = cum_counts.pop() + if not isinstance(total, int): + raise TypeError('Counts must be integers') + if total <= 0: + raise ValueError('Total of counts must be greater than zero') + selections = self.sample(range(total), k=k) + bisect = _bisect + return [population[bisect(cum_counts, s)] for s in selections] + randbelow = self._randbelow + if not 0 <= k <= n: + raise ValueError("Sample larger than population or is negative") + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize: + # An n-length list is smaller than a k-length set. + # Invariant: non-selected at pool[0 : n-i] + pool = list(population) + for i in range(k): + j = randbelow(n - i) + result[i] = pool[j] + pool[j] = pool[n - i - 1] # move non-selected item into vacancy + else: + selected = set() + selected_add = selected.add + for i in range(k): + j = randbelow(n) + while j in selected: + j = randbelow(n) + selected_add(j) + result[i] = population[j] + return result + + def choices(self, population, weights=None, *, cum_weights=None, k=1): + """Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + """ + random = self.random + n = len(population) + if cum_weights is None: + if weights is None: + floor = _floor + n += 0.0 # convert to float for a small speed improvement + return [population[floor(random() * n)] for i in _repeat(None, k)] + try: + cum_weights = list(_accumulate(weights)) + except TypeError: + if not isinstance(weights, int): + raise + k = weights + raise TypeError( + f'The number of choices must be a keyword argument: {k=}' + ) from None + elif weights is not None: + raise TypeError('Cannot specify both weights and cumulative weights') + if len(cum_weights) != n: + raise ValueError('The number of weights does not match the population') + total = cum_weights[-1] + 0.0 # convert to float + if total <= 0.0: + raise ValueError('Total of weights must be greater than zero') + if not _isfinite(total): + raise ValueError('Total of weights must be finite') + bisect = _bisect + hi = n - 1 + return [population[bisect(cum_weights, random() * total, 0, hi)] + for i in _repeat(None, k)] + + + ## -------------------- real-valued distributions ------------------- + + def uniform(self, a, b): + "Get a random number in the range [a, b) or [a, b] depending on rounding." + return a + (b - a) * self.random() + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * _sqrt(u * c) + + def normalvariate(self, mu, sigma): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while True: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST * (u1 - 0.5) / u2 + zz = z * z / 4.0 + if zz <= -_log(u2): + break + return mu + z * sigma + + def gauss(self, mu, sigma): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z * sigma + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + + def expovariate(self, lambd): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + """ + # lambd: rate lambd = 1/mean + # ('lambda' is a Python reserved word) + + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + return -_log(1.0 - self.random()) / lambd + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while True: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + """ + # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError('gammavariate: alpha and beta must be > 0.0') + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while True: + u1 = random() + if not 1e-7 < u1 < 0.9999999: + continue + u2 = 1.0 - random() + v = _log(u1 / (1.0 - u1)) / ainv + x = alpha * _exp(v) + z = u1 * u1 * u2 + r = bbb + ccc * v - x + if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1/beta) + return -_log(1.0 - random()) * beta + + else: + # alpha is between 0 and 1 (exclusive) + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + while True: + u = random() + b = (_e + alpha) / _e + p = b * u + if p <= 1.0: + x = p ** (1.0 / alpha) + else: + x = -_log((b - p) / alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + """ + ## See + ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html + ## for Ivan Frohne's insightful analysis of why the original implementation: + ## + ## def betavariate(self, alpha, beta): + ## # Discrete Event Simulation in C, pp 87-88. + ## + ## y = self.expovariate(alpha) + ## z = self.expovariate(1.0/beta) + ## return z/(y+z) + ## + ## was dead wrong, and how it probably got that way. + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.0) + if y: + return y / (y + self.gammavariate(beta, 1.0)) + return 0.0 + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return u ** (-1.0 / alpha) + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * (-_log(u)) ** (1.0 / beta) + + +## ------------------------------------------------------------------ +## --------------- Operating System Random Source ------------------ + + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + + """ + + def random(self): + """Get the next random number in the range [0.0, 1.0).""" + return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k < 0: + raise ValueError('number of bits must be non-negative') + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(_urandom(numbytes), 'big') + return x >> (numbytes * 8 - k) # trim excess bits + + def randbytes(self, n): + """Generate n random bytes.""" + # os.urandom(n) fails with ValueError for n < 0 + # and returns an empty bytes string for n == 0. + return _urandom(n) + + def seed(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + + +# ---------------------------------------------------------------------- +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +# (both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +choices = _inst.choices +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +getrandbits = _inst.getrandbits +randbytes = _inst.randbytes + + +## ------------------------------------------------------ +## ----------------- test program ----------------------- + +def _test_generator(n, func, args): + from statistics import stdev, fmean as mean + from time import perf_counter + + t0 = perf_counter() + data = [func(*args) for i in _repeat(None, n)] + t1 = perf_counter() + + xbar = mean(data) + sigma = stdev(data, xbar) + low = min(data) + high = max(data) + + print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}') + print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high)) + + +def _test(N=2000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0)) + + +## ------------------------------------------------------ +## ------------------ fork support --------------------- + +if hasattr(_os, "fork"): + _os.register_at_fork(after_in_child=_inst.seed) + + +if __name__ == '__main__': + _test() diff --git a/evalkit_cambrian/lib/python3.10/runpy.py b/evalkit_cambrian/lib/python3.10/runpy.py new file mode 100644 index 0000000000000000000000000000000000000000..c7d3d8caad1611ed52f1be8d517ad2ac906f04db --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/runpy.py @@ -0,0 +1,321 @@ +"""runpy.py - locating and running Python code using the module namespace + +Provides support for locating and running Python scripts using the Python +module namespace instead of the native filesystem. + +This allows Python code to play nicely with non-filesystem based PEP 302 +importers when locating support scripts as well as when importing modules. +""" +# Written by Nick Coghlan +# to implement PEP 338 (Executing Modules as Scripts) + + +import sys +import importlib.machinery # importlib first so we can test #15386 via -m +import importlib.util +import io +import types +import os + +__all__ = [ + "run_module", "run_path", +] + +class _TempModule(object): + """Temporarily replace a module in sys.modules with an empty namespace""" + def __init__(self, mod_name): + self.mod_name = mod_name + self.module = types.ModuleType(mod_name) + self._saved_module = [] + + def __enter__(self): + mod_name = self.mod_name + try: + self._saved_module.append(sys.modules[mod_name]) + except KeyError: + pass + sys.modules[mod_name] = self.module + return self + + def __exit__(self, *args): + if self._saved_module: + sys.modules[self.mod_name] = self._saved_module[0] + else: + del sys.modules[self.mod_name] + self._saved_module = [] + +class _ModifiedArgv0(object): + def __init__(self, value): + self.value = value + self._saved_value = self._sentinel = object() + + def __enter__(self): + if self._saved_value is not self._sentinel: + raise RuntimeError("Already preserving saved value") + self._saved_value = sys.argv[0] + sys.argv[0] = self.value + + def __exit__(self, *args): + self.value = self._sentinel + sys.argv[0] = self._saved_value + +# TODO: Replace these helpers with importlib._bootstrap_external functions. +def _run_code(code, run_globals, init_globals=None, + mod_name=None, mod_spec=None, + pkg_name=None, script_name=None): + """Helper to run code in nominated namespace""" + if init_globals is not None: + run_globals.update(init_globals) + if mod_spec is None: + loader = None + fname = script_name + cached = None + else: + loader = mod_spec.loader + fname = mod_spec.origin + cached = mod_spec.cached + if pkg_name is None: + pkg_name = mod_spec.parent + run_globals.update(__name__ = mod_name, + __file__ = fname, + __cached__ = cached, + __doc__ = None, + __loader__ = loader, + __package__ = pkg_name, + __spec__ = mod_spec) + exec(code, run_globals) + return run_globals + +def _run_module_code(code, init_globals=None, + mod_name=None, mod_spec=None, + pkg_name=None, script_name=None): + """Helper to run code in new namespace with sys modified""" + fname = script_name if mod_spec is None else mod_spec.origin + with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname): + mod_globals = temp_module.module.__dict__ + _run_code(code, mod_globals, init_globals, + mod_name, mod_spec, pkg_name, script_name) + # Copy the globals of the temporary module, as they + # may be cleared when the temporary module goes away + return mod_globals.copy() + +# Helper to get the full name, spec and code for a module +def _get_module_details(mod_name, error=ImportError): + if mod_name.startswith("."): + raise error("Relative module names not supported") + pkg_name, _, _ = mod_name.rpartition(".") + if pkg_name: + # Try importing the parent to avoid catching initialization errors + try: + __import__(pkg_name) + except ImportError as e: + # If the parent or higher ancestor package is missing, let the + # error be raised by find_spec() below and then be caught. But do + # not allow other errors to be caught. + if e.name is None or (e.name != pkg_name and + not pkg_name.startswith(e.name + ".")): + raise + # Warn if the module has already been imported under its normal name + existing = sys.modules.get(mod_name) + if existing is not None and not hasattr(existing, "__path__"): + from warnings import warn + msg = "{mod_name!r} found in sys.modules after import of " \ + "package {pkg_name!r}, but prior to execution of " \ + "{mod_name!r}; this may result in unpredictable " \ + "behaviour".format(mod_name=mod_name, pkg_name=pkg_name) + warn(RuntimeWarning(msg)) + + try: + spec = importlib.util.find_spec(mod_name) + except (ImportError, AttributeError, TypeError, ValueError) as ex: + # This hack fixes an impedance mismatch between pkgutil and + # importlib, where the latter raises other errors for cases where + # pkgutil previously raised ImportError + msg = "Error while finding module specification for {!r} ({}: {})" + if mod_name.endswith(".py"): + msg += (f". Try using '{mod_name[:-3]}' instead of " + f"'{mod_name}' as the module name.") + raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex + if spec is None: + raise error("No module named %s" % mod_name) + if spec.submodule_search_locations is not None: + if mod_name == "__main__" or mod_name.endswith(".__main__"): + raise error("Cannot use package as __main__ module") + try: + pkg_main_name = mod_name + ".__main__" + return _get_module_details(pkg_main_name, error) + except error as e: + if mod_name not in sys.modules: + raise # No module loaded; being a package is irrelevant + raise error(("%s; %r is a package and cannot " + + "be directly executed") %(e, mod_name)) + loader = spec.loader + if loader is None: + raise error("%r is a namespace package and cannot be executed" + % mod_name) + try: + code = loader.get_code(mod_name) + except ImportError as e: + raise error(format(e)) from e + if code is None: + raise error("No code object available for %s" % mod_name) + return mod_name, spec, code + +class _Error(Exception): + """Error that _run_module_as_main() should report without a traceback""" + +# XXX ncoghlan: Should this be documented and made public? +# (Current thoughts: don't repeat the mistake that lead to its +# creation when run_module() no longer met the needs of +# mainmodule.c, but couldn't be changed because it was public) +def _run_module_as_main(mod_name, alter_argv=True): + """Runs the designated module in the __main__ namespace + + Note that the executed module will have full access to the + __main__ namespace. If this is not desirable, the run_module() + function should be used to run the module code in a fresh namespace. + + At the very least, these variables in __main__ will be overwritten: + __name__ + __file__ + __cached__ + __loader__ + __package__ + """ + try: + if alter_argv or mod_name != "__main__": # i.e. -m switch + mod_name, mod_spec, code = _get_module_details(mod_name, _Error) + else: # i.e. directory or zipfile execution + mod_name, mod_spec, code = _get_main_module_details(_Error) + except _Error as exc: + msg = "%s: %s" % (sys.executable, exc) + sys.exit(msg) + main_globals = sys.modules["__main__"].__dict__ + if alter_argv: + sys.argv[0] = mod_spec.origin + return _run_code(code, main_globals, None, + "__main__", mod_spec) + +def run_module(mod_name, init_globals=None, + run_name=None, alter_sys=False): + """Execute a module's code without importing it. + + mod_name -- an absolute module name or package name. + + Optional arguments: + init_globals -- dictionary used to pre-populate the module’s + globals dictionary before the code is executed. + + run_name -- if not None, this will be used for setting __name__; + otherwise, __name__ will be set to mod_name + '__main__' if the + named module is a package and to just mod_name otherwise. + + alter_sys -- if True, sys.argv[0] is updated with the value of + __file__ and sys.modules[__name__] is updated with a temporary + module object for the module being executed. Both are + restored to their original values before the function returns. + + Returns the resulting module globals dictionary. + """ + mod_name, mod_spec, code = _get_module_details(mod_name) + if run_name is None: + run_name = mod_name + if alter_sys: + return _run_module_code(code, init_globals, run_name, mod_spec) + else: + # Leave the sys module alone + return _run_code(code, {}, init_globals, run_name, mod_spec) + +def _get_main_module_details(error=ImportError): + # Helper that gives a nicer error message when attempting to + # execute a zipfile or directory by invoking __main__.py + # Also moves the standard __main__ out of the way so that the + # preexisting __loader__ entry doesn't cause issues + main_name = "__main__" + saved_main = sys.modules[main_name] + del sys.modules[main_name] + try: + return _get_module_details(main_name) + except ImportError as exc: + if main_name in str(exc): + raise error("can't find %r module in %r" % + (main_name, sys.path[0])) from exc + raise + finally: + sys.modules[main_name] = saved_main + + +def _get_code_from_file(run_name, fname): + # Check for a compiled file first + from pkgutil import read_code + decoded_path = os.path.abspath(os.fsdecode(fname)) + with io.open_code(decoded_path) as f: + code = read_code(f) + if code is None: + # That didn't work, so try it as normal source code + with io.open_code(decoded_path) as f: + code = compile(f.read(), fname, 'exec') + return code, fname + +def run_path(path_name, init_globals=None, run_name=None): + """Execute code located at the specified filesystem location. + + path_name -- filesystem location of a Python script, zipfile, + or directory containing a top level __main__.py script. + + Optional arguments: + init_globals -- dictionary used to pre-populate the module’s + globals dictionary before the code is executed. + + run_name -- if not None, this will be used to set __name__; + otherwise, '' will be used for __name__. + + Returns the resulting module globals dictionary. + """ + if run_name is None: + run_name = "" + pkg_name = run_name.rpartition(".")[0] + from pkgutil import get_importer + importer = get_importer(path_name) + # Trying to avoid importing imp so as to not consume the deprecation warning. + is_NullImporter = False + if type(importer).__module__ == 'imp': + if type(importer).__name__ == 'NullImporter': + is_NullImporter = True + if isinstance(importer, type(None)) or is_NullImporter: + # Not a valid sys.path entry, so run the code directly + # execfile() doesn't help as we want to allow compiled files + code, fname = _get_code_from_file(run_name, path_name) + return _run_module_code(code, init_globals, run_name, + pkg_name=pkg_name, script_name=fname) + else: + # Finder is defined for path, so add it to + # the start of sys.path + sys.path.insert(0, path_name) + try: + # Here's where things are a little different from the run_module + # case. There, we only had to replace the module in sys while the + # code was running and doing so was somewhat optional. Here, we + # have no choice and we have to remove it even while we read the + # code. If we don't do this, a __loader__ attribute in the + # existing __main__ module may prevent location of the new module. + mod_name, mod_spec, code = _get_main_module_details() + with _TempModule(run_name) as temp_module, \ + _ModifiedArgv0(path_name): + mod_globals = temp_module.module.__dict__ + return _run_code(code, mod_globals, init_globals, + run_name, mod_spec, pkg_name).copy() + finally: + try: + sys.path.remove(path_name) + except ValueError: + pass + + +if __name__ == "__main__": + # Run the module specified as the next command line argument + if len(sys.argv) < 2: + print("No module specified for execution", file=sys.stderr) + else: + del sys.argv[0] # Make the requested module sys.argv[0] + _run_module_as_main(sys.argv[0]) diff --git a/evalkit_cambrian/lib/python3.10/sched.py b/evalkit_cambrian/lib/python3.10/sched.py new file mode 100644 index 0000000000000000000000000000000000000000..14613cf29874da539490accd7d83abce51499964 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/sched.py @@ -0,0 +1,167 @@ +"""A generally useful event scheduler class. + +Each instance of this class manages its own queue. +No multi-threading is implied; you are supposed to hack that +yourself, or use a single instance per application. + +Each instance is parametrized with two functions, one that is +supposed to return the current time, one that is supposed to +implement a delay. You can implement real-time scheduling by +substituting time and sleep from built-in module time, or you can +implement simulated time by writing your own functions. This can +also be used to integrate scheduling with STDWIN events; the delay +function is allowed to modify the queue. Time can be expressed as +integers or floating point numbers, as long as it is consistent. + +Events are specified by tuples (time, priority, action, argument, kwargs). +As in UNIX, lower priority numbers mean higher priority; in this +way the queue can be maintained as a priority queue. Execution of the +event means calling the action function, passing it the argument +sequence in "argument" (remember that in Python, multiple function +arguments are be packed in a sequence) and keyword parameters in "kwargs". +The action function may be an instance method so it +has another way to reference private data (besides global variables). +""" + +import time +import heapq +from collections import namedtuple +from itertools import count +import threading +from time import monotonic as _time + +__all__ = ["scheduler"] + +Event = namedtuple('Event', 'time, priority, sequence, action, argument, kwargs') +Event.time.__doc__ = ('''Numeric type compatible with the return value of the +timefunc function passed to the constructor.''') +Event.priority.__doc__ = ('''Events scheduled for the same time will be executed +in the order of their priority.''') +Event.sequence.__doc__ = ('''A continually increasing sequence number that + separates events if time and priority are equal.''') +Event.action.__doc__ = ('''Executing the event means executing +action(*argument, **kwargs)''') +Event.argument.__doc__ = ('''argument is a sequence holding the positional +arguments for the action.''') +Event.kwargs.__doc__ = ('''kwargs is a dictionary holding the keyword +arguments for the action.''') + +_sentinel = object() + +class scheduler: + + def __init__(self, timefunc=_time, delayfunc=time.sleep): + """Initialize a new instance, passing the time and delay + functions""" + self._queue = [] + self._lock = threading.RLock() + self.timefunc = timefunc + self.delayfunc = delayfunc + self._sequence_generator = count() + + def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel): + """Enter a new event in the queue at an absolute time. + + Returns an ID for the event which can be used to remove it, + if necessary. + + """ + if kwargs is _sentinel: + kwargs = {} + + with self._lock: + event = Event(time, priority, next(self._sequence_generator), + action, argument, kwargs) + heapq.heappush(self._queue, event) + return event # The ID + + def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): + """A variant that specifies the time as a relative time. + + This is actually the more commonly used interface. + + """ + time = self.timefunc() + delay + return self.enterabs(time, priority, action, argument, kwargs) + + def cancel(self, event): + """Remove an event from the queue. + + This must be presented the ID as returned by enter(). + If the event is not in the queue, this raises ValueError. + + """ + with self._lock: + self._queue.remove(event) + heapq.heapify(self._queue) + + def empty(self): + """Check whether the queue is empty.""" + with self._lock: + return not self._queue + + def run(self, blocking=True): + """Execute events until the queue is empty. + If blocking is False executes the scheduled events due to + expire soonest (if any) and then return the deadline of the + next scheduled call in the scheduler. + + When there is a positive delay until the first event, the + delay function is called and the event is left in the queue; + otherwise, the event is removed from the queue and executed + (its action function is called, passing it the argument). If + the delay function returns prematurely, it is simply + restarted. + + It is legal for both the delay function and the action + function to modify the queue or to raise an exception; + exceptions are not caught but the scheduler's state remains + well-defined so run() may be called again. + + A questionable hack is added to allow other threads to run: + just after an event is executed, a delay of 0 is executed, to + avoid monopolizing the CPU when other threads are also + runnable. + + """ + # localize variable access to minimize overhead + # and to improve thread safety + lock = self._lock + q = self._queue + delayfunc = self.delayfunc + timefunc = self.timefunc + pop = heapq.heappop + while True: + with lock: + if not q: + break + (time, priority, sequence, action, + argument, kwargs) = q[0] + now = timefunc() + if time > now: + delay = True + else: + delay = False + pop(q) + if delay: + if not blocking: + return time - now + delayfunc(time - now) + else: + action(*argument, **kwargs) + delayfunc(0) # Let other threads run + + @property + def queue(self): + """An ordered list of upcoming events. + + Events are named tuples with fields for: + time, priority, action, arguments, kwargs + + """ + # Use heapq to sort the queue rather than using 'sorted(self._queue)'. + # With heapq, two events scheduled at the same time will show in + # the actual order they would be retrieved. + with self._lock: + events = self._queue[:] + return list(map(heapq.heappop, [events]*len(events))) diff --git a/evalkit_cambrian/lib/python3.10/signal.py b/evalkit_cambrian/lib/python3.10/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..50b215b29d2fadf6ccc38e860242d04e5d946fb5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/signal.py @@ -0,0 +1,92 @@ +import _signal +from _signal import * +from enum import IntEnum as _IntEnum + +_globals = globals() + +_IntEnum._convert_( + 'Signals', __name__, + lambda name: + name.isupper() + and (name.startswith('SIG') and not name.startswith('SIG_')) + or name.startswith('CTRL_')) + +_IntEnum._convert_( + 'Handlers', __name__, + lambda name: name in ('SIG_DFL', 'SIG_IGN')) + +if 'pthread_sigmask' in _globals: + _IntEnum._convert_( + 'Sigmasks', __name__, + lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK')) + + +def _int_to_enum(value, enum_klass): + """Convert a numeric value to an IntEnum member. + If it's not a known member, return the numeric value itself. + """ + try: + return enum_klass(value) + except ValueError: + return value + + +def _enum_to_int(value): + """Convert an IntEnum member to a numeric value. + If it's not an IntEnum member return the value itself. + """ + try: + return int(value) + except (ValueError, TypeError): + return value + + +# Similar to functools.wraps(), but only assign __doc__. +# __module__ should be preserved, +# __name__ and __qualname__ are already fine, +# __annotations__ is not set. +def _wraps(wrapped): + def decorator(wrapper): + wrapper.__doc__ = wrapped.__doc__ + return wrapper + return decorator + +@_wraps(_signal.signal) +def signal(signalnum, handler): + handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler)) + return _int_to_enum(handler, Handlers) + + +@_wraps(_signal.getsignal) +def getsignal(signalnum): + handler = _signal.getsignal(signalnum) + return _int_to_enum(handler, Handlers) + + +if 'pthread_sigmask' in _globals: + @_wraps(_signal.pthread_sigmask) + def pthread_sigmask(how, mask): + sigs_set = _signal.pthread_sigmask(how, mask) + return set(_int_to_enum(x, Signals) for x in sigs_set) + + +if 'sigpending' in _globals: + @_wraps(_signal.sigpending) + def sigpending(): + return {_int_to_enum(x, Signals) for x in _signal.sigpending()} + + +if 'sigwait' in _globals: + @_wraps(_signal.sigwait) + def sigwait(sigset): + retsig = _signal.sigwait(sigset) + return _int_to_enum(retsig, Signals) + + +if 'valid_signals' in _globals: + @_wraps(_signal.valid_signals) + def valid_signals(): + return {_int_to_enum(x, Signals) for x in _signal.valid_signals()} + + +del _globals, _wraps diff --git a/evalkit_cambrian/lib/python3.10/sndhdr.py b/evalkit_cambrian/lib/python3.10/sndhdr.py new file mode 100644 index 0000000000000000000000000000000000000000..96595c6974468213e0a93414af95f4981bb609c5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/sndhdr.py @@ -0,0 +1,257 @@ +"""Routines to help recognizing sound files. + +Function whathdr() recognizes various types of sound file headers. +It understands almost all headers that SOX can decode. + +The return tuple contains the following items, in this order: +- file type (as SOX understands it) +- sampling rate (0 if unknown or hard to decode) +- number of channels (0 if unknown or hard to decode) +- number of frames in the file (-1 if unknown or hard to decode) +- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW + +If the file doesn't have a recognizable type, it returns None. +If the file can't be opened, OSError is raised. + +To compute the total time, divide the number of frames by the +sampling rate (a frame contains a sample for each channel). + +Function what() calls whathdr(). (It used to also use some +heuristics for raw data, but this doesn't work very well.) + +Finally, the function test() is a simple main program that calls +what() for all files mentioned on the argument list. For directory +arguments it calls what() for all files in that directory. Default +argument is "." (testing all files in the current directory). The +option -r tells it to recurse down directories found inside +explicitly given directories. +""" + +# The file structure is top-down except that the test program and its +# subroutine come last. + +__all__ = ['what', 'whathdr'] + +from collections import namedtuple + +SndHeaders = namedtuple('SndHeaders', + 'filetype framerate nchannels nframes sampwidth') + +SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type +and will be one of the strings 'aifc', 'aiff', 'au','hcom', +'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""") +SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual +value or 0 if unknown or difficult to decode.""") +SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be +determined or if the value is difficult to decode.""") +SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number +of frames or -1.""") +SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or +'A' for A-LAW or 'U' for u-LAW.""") + +def what(filename): + """Guess the type of a sound file.""" + res = whathdr(filename) + return res + + +def whathdr(filename): + """Recognize sound headers.""" + with open(filename, 'rb') as f: + h = f.read(512) + for tf in tests: + res = tf(h, f) + if res: + return SndHeaders(*res) + return None + + +#-----------------------------------# +# Subroutines per sound header type # +#-----------------------------------# + +tests = [] + +def test_aifc(h, f): + import aifc + if not h.startswith(b'FORM'): + return None + if h[8:12] == b'AIFC': + fmt = 'aifc' + elif h[8:12] == b'AIFF': + fmt = 'aiff' + else: + return None + f.seek(0) + try: + a = aifc.open(f, 'r') + except (EOFError, aifc.Error): + return None + return (fmt, a.getframerate(), a.getnchannels(), + a.getnframes(), 8 * a.getsampwidth()) + +tests.append(test_aifc) + + +def test_au(h, f): + if h.startswith(b'.snd'): + func = get_long_be + elif h[:4] in (b'\0ds.', b'dns.'): + func = get_long_le + else: + return None + filetype = 'au' + hdr_size = func(h[4:8]) + data_size = func(h[8:12]) + encoding = func(h[12:16]) + rate = func(h[16:20]) + nchannels = func(h[20:24]) + sample_size = 1 # default + if encoding == 1: + sample_bits = 'U' + elif encoding == 2: + sample_bits = 8 + elif encoding == 3: + sample_bits = 16 + sample_size = 2 + else: + sample_bits = '?' + frame_size = sample_size * nchannels + if frame_size: + nframe = data_size / frame_size + else: + nframe = -1 + return filetype, rate, nchannels, nframe, sample_bits + +tests.append(test_au) + + +def test_hcom(h, f): + if h[65:69] != b'FSSD' or h[128:132] != b'HCOM': + return None + divisor = get_long_be(h[144:148]) + if divisor: + rate = 22050 / divisor + else: + rate = 0 + return 'hcom', rate, 1, -1, 8 + +tests.append(test_hcom) + + +def test_voc(h, f): + if not h.startswith(b'Creative Voice File\032'): + return None + sbseek = get_short_le(h[20:22]) + rate = 0 + if 0 <= sbseek < 500 and h[sbseek] == 1: + ratecode = 256 - h[sbseek+4] + if ratecode: + rate = int(1000000.0 / ratecode) + return 'voc', rate, 1, -1, 8 + +tests.append(test_voc) + + +def test_wav(h, f): + import wave + # 'RIFF' 'WAVE' 'fmt ' + if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ': + return None + f.seek(0) + try: + w = wave.open(f, 'r') + except (EOFError, wave.Error): + return None + return ('wav', w.getframerate(), w.getnchannels(), + w.getnframes(), 8*w.getsampwidth()) + +tests.append(test_wav) + + +def test_8svx(h, f): + if not h.startswith(b'FORM') or h[8:12] != b'8SVX': + return None + # Should decode it to get #channels -- assume always 1 + return '8svx', 0, 1, 0, 8 + +tests.append(test_8svx) + + +def test_sndt(h, f): + if h.startswith(b'SOUND'): + nsamples = get_long_le(h[8:12]) + rate = get_short_le(h[20:22]) + return 'sndt', rate, 1, nsamples, 8 + +tests.append(test_sndt) + + +def test_sndr(h, f): + if h.startswith(b'\0\0'): + rate = get_short_le(h[2:4]) + if 4000 <= rate <= 25000: + return 'sndr', rate, 1, -1, 8 + +tests.append(test_sndr) + + +#-------------------------------------------# +# Subroutines to extract numbers from bytes # +#-------------------------------------------# + +def get_long_be(b): + return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3] + +def get_long_le(b): + return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0] + +def get_short_be(b): + return (b[0] << 8) | b[1] + +def get_short_le(b): + return (b[1] << 8) | b[0] + + +#--------------------# +# Small test program # +#--------------------# + +def test(): + import sys + recursive = 0 + if sys.argv[1:] and sys.argv[1] == '-r': + del sys.argv[1:2] + recursive = 1 + try: + if sys.argv[1:]: + testall(sys.argv[1:], recursive, 1) + else: + testall(['.'], recursive, 1) + except KeyboardInterrupt: + sys.stderr.write('\n[Interrupted]\n') + sys.exit(1) + +def testall(list, recursive, toplevel): + import sys + import os + for filename in list: + if os.path.isdir(filename): + print(filename + '/:', end=' ') + if recursive or toplevel: + print('recursing down:') + import glob + names = glob.glob(os.path.join(glob.escape(filename), '*')) + testall(names, recursive, 0) + else: + print('*** directory (use -r) ***') + else: + print(filename + ':', end=' ') + sys.stdout.flush() + try: + print(what(filename)) + except OSError: + print('*** not found ***') + +if __name__ == '__main__': + test() diff --git a/evalkit_cambrian/lib/python3.10/socketserver.py b/evalkit_cambrian/lib/python3.10/socketserver.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9583d56a4d742aa04b075426cdb66b781ef0c5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/socketserver.py @@ -0,0 +1,844 @@ +"""Generic socket server classes. + +This module tries to capture the various aspects of defining a server: + +For socket-based servers: + +- address family: + - AF_INET{,6}: IP (Internet Protocol) sockets (default) + - AF_UNIX: Unix domain sockets + - others, e.g. AF_DECNET are conceivable (see +- socket type: + - SOCK_STREAM (reliable stream, e.g. TCP) + - SOCK_DGRAM (datagrams, e.g. UDP) + +For request-based servers (including socket-based): + +- client address verification before further looking at the request + (This is actually a hook for any processing that needs to look + at the request before anything else, e.g. logging) +- how to handle multiple requests: + - synchronous (one request is handled at a time) + - forking (each request is handled by a new process) + - threading (each request is handled by a new thread) + +The classes in this module favor the server type that is simplest to +write: a synchronous TCP/IP server. This is bad class design, but +saves some typing. (There's also the issue that a deep class hierarchy +slows down method lookups.) + +There are five classes in an inheritance diagram, four of which represent +synchronous servers of four types: + + +------------+ + | BaseServer | + +------------+ + | + v + +-----------+ +------------------+ + | TCPServer |------->| UnixStreamServer | + +-----------+ +------------------+ + | + v + +-----------+ +--------------------+ + | UDPServer |------->| UnixDatagramServer | + +-----------+ +--------------------+ + +Note that UnixDatagramServer derives from UDPServer, not from +UnixStreamServer -- the only difference between an IP and a Unix +stream server is the address family, which is simply repeated in both +unix server classes. + +Forking and threading versions of each type of server can be created +using the ForkingMixIn and ThreadingMixIn mix-in classes. For +instance, a threading UDP server class is created as follows: + + class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass + +The Mix-in class must come first, since it overrides a method defined +in UDPServer! Setting the various member variables also changes +the behavior of the underlying server mechanism. + +To implement a service, you must derive a class from +BaseRequestHandler and redefine its handle() method. You can then run +various versions of the service by combining one of the server classes +with your request handler class. + +The request handler class must be different for datagram or stream +services. This can be hidden by using the request handler +subclasses StreamRequestHandler or DatagramRequestHandler. + +Of course, you still have to use your head! + +For instance, it makes no sense to use a forking server if the service +contains state in memory that can be modified by requests (since the +modifications in the child process would never reach the initial state +kept in the parent process and passed to each child). In this case, +you can use a threading server, but you will probably have to use +locks to avoid two requests that come in nearly simultaneous to apply +conflicting changes to the server state. + +On the other hand, if you are building e.g. an HTTP server, where all +data is stored externally (e.g. in the file system), a synchronous +class will essentially render the service "deaf" while one request is +being handled -- which may be for a very long time if a client is slow +to read all the data it has requested. Here a threading or forking +server is appropriate. + +In some cases, it may be appropriate to process part of a request +synchronously, but to finish processing in a forked child depending on +the request data. This can be implemented by using a synchronous +server and doing an explicit fork in the request handler class +handle() method. + +Another approach to handling multiple simultaneous requests in an +environment that supports neither threads nor fork (or where these are +too expensive or inappropriate for the service) is to maintain an +explicit table of partially finished requests and to use a selector to +decide which request to work on next (or whether to handle a new +incoming request). This is particularly important for stream services +where each client can potentially be connected for a long time (if +threads or subprocesses cannot be used). + +Future work: +- Standard classes for Sun RPC (which uses either UDP or TCP) +- Standard mix-in classes to implement various authentication + and encryption schemes + +XXX Open problems: +- What to do with out-of-band data? + +BaseServer: +- split generic "request" functionality out into BaseServer class. + Copyright (C) 2000 Luke Kenneth Casson Leighton + + example: read entries from a SQL database (requires overriding + get_request() to return a table entry from the database). + entry is processed by a RequestHandlerClass. + +""" + +# Author of the BaseServer patch: Luke Kenneth Casson Leighton + +__version__ = "0.4" + + +import socket +import selectors +import os +import sys +import threading +from io import BufferedIOBase +from time import monotonic as time + +__all__ = ["BaseServer", "TCPServer", "UDPServer", + "ThreadingUDPServer", "ThreadingTCPServer", + "BaseRequestHandler", "StreamRequestHandler", + "DatagramRequestHandler", "ThreadingMixIn"] +if hasattr(os, "fork"): + __all__.extend(["ForkingUDPServer","ForkingTCPServer", "ForkingMixIn"]) +if hasattr(socket, "AF_UNIX"): + __all__.extend(["UnixStreamServer","UnixDatagramServer", + "ThreadingUnixStreamServer", + "ThreadingUnixDatagramServer"]) + +# poll/select have the advantage of not requiring any extra file descriptor, +# contrarily to epoll/kqueue (also, they require a single syscall). +if hasattr(selectors, 'PollSelector'): + _ServerSelector = selectors.PollSelector +else: + _ServerSelector = selectors.SelectSelector + + +class BaseServer: + + """Base class for server classes. + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you do not use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - server_close() + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - service_actions() + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - allow_reuse_address + + Instance variables: + + - RequestHandlerClass + - socket + + """ + + timeout = None + + def __init__(self, server_address, RequestHandlerClass): + """Constructor. May be extended, do not override.""" + self.server_address = server_address + self.RequestHandlerClass = RequestHandlerClass + self.__is_shut_down = threading.Event() + self.__shutdown_request = False + + def server_activate(self): + """Called by constructor to activate the server. + + May be overridden. + + """ + pass + + def serve_forever(self, poll_interval=0.5): + """Handle one request at a time until shutdown. + + Polls for shutdown every poll_interval seconds. Ignores + self.timeout. If you need to do periodic tasks, do them in + another thread. + """ + self.__is_shut_down.clear() + try: + # XXX: Consider using another file descriptor or connecting to the + # socket to wake this up instead of polling. Polling reduces our + # responsiveness to a shutdown request and wastes cpu at all other + # times. + with _ServerSelector() as selector: + selector.register(self, selectors.EVENT_READ) + + while not self.__shutdown_request: + ready = selector.select(poll_interval) + # bpo-35017: shutdown() called during select(), exit immediately. + if self.__shutdown_request: + break + if ready: + self._handle_request_noblock() + + self.service_actions() + finally: + self.__shutdown_request = False + self.__is_shut_down.set() + + def shutdown(self): + """Stops the serve_forever loop. + + Blocks until the loop has finished. This must be called while + serve_forever() is running in another thread, or it will + deadlock. + """ + self.__shutdown_request = True + self.__is_shut_down.wait() + + def service_actions(self): + """Called by the serve_forever() loop. + + May be overridden by a subclass / Mixin to implement any code that + needs to be run during the loop. + """ + pass + + # The distinction between handling, getting, processing and finishing a + # request is fairly arbitrary. Remember: + # + # - handle_request() is the top-level call. It calls selector.select(), + # get_request(), verify_request() and process_request() + # - get_request() is different for stream or datagram sockets + # - process_request() is the place that may fork a new process or create a + # new thread to finish the request + # - finish_request() instantiates the request handler class; this + # constructor will handle the request all by itself + + def handle_request(self): + """Handle one request, possibly blocking. + + Respects self.timeout. + """ + # Support people who used socket.settimeout() to escape + # handle_request before self.timeout was available. + timeout = self.socket.gettimeout() + if timeout is None: + timeout = self.timeout + elif self.timeout is not None: + timeout = min(timeout, self.timeout) + if timeout is not None: + deadline = time() + timeout + + # Wait until a request arrives or the timeout expires - the loop is + # necessary to accommodate early wakeups due to EINTR. + with _ServerSelector() as selector: + selector.register(self, selectors.EVENT_READ) + + while True: + ready = selector.select(timeout) + if ready: + return self._handle_request_noblock() + else: + if timeout is not None: + timeout = deadline - time() + if timeout < 0: + return self.handle_timeout() + + def _handle_request_noblock(self): + """Handle one request, without blocking. + + I assume that selector.select() has returned that the socket is + readable before this function was called, so there should be no risk of + blocking in get_request(). + """ + try: + request, client_address = self.get_request() + except OSError: + return + if self.verify_request(request, client_address): + try: + self.process_request(request, client_address) + except Exception: + self.handle_error(request, client_address) + self.shutdown_request(request) + except: + self.shutdown_request(request) + raise + else: + self.shutdown_request(request) + + def handle_timeout(self): + """Called if no new request arrives within self.timeout. + + Overridden by ForkingMixIn. + """ + pass + + def verify_request(self, request, client_address): + """Verify the request. May be overridden. + + Return True if we should proceed with this request. + + """ + return True + + def process_request(self, request, client_address): + """Call finish_request. + + Overridden by ForkingMixIn and ThreadingMixIn. + + """ + self.finish_request(request, client_address) + self.shutdown_request(request) + + def server_close(self): + """Called to clean-up the server. + + May be overridden. + + """ + pass + + def finish_request(self, request, client_address): + """Finish one request by instantiating RequestHandlerClass.""" + self.RequestHandlerClass(request, client_address, self) + + def shutdown_request(self, request): + """Called to shutdown and close an individual request.""" + self.close_request(request) + + def close_request(self, request): + """Called to clean up an individual request.""" + pass + + def handle_error(self, request, client_address): + """Handle an error gracefully. May be overridden. + + The default is to print a traceback and continue. + + """ + print('-'*40, file=sys.stderr) + print('Exception occurred during processing of request from', + client_address, file=sys.stderr) + import traceback + traceback.print_exc() + print('-'*40, file=sys.stderr) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.server_close() + + +class TCPServer(BaseServer): + + """Base class for various socket-based server classes. + + Defaults to synchronous IP stream (i.e., TCP). + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass, bind_and_activate=True) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you don't use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - request_queue_size (only for stream sockets) + - allow_reuse_address + + Instance variables: + + - server_address + - RequestHandlerClass + - socket + + """ + + address_family = socket.AF_INET + + socket_type = socket.SOCK_STREAM + + request_queue_size = 5 + + allow_reuse_address = False + + def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True): + """Constructor. May be extended, do not override.""" + BaseServer.__init__(self, server_address, RequestHandlerClass) + self.socket = socket.socket(self.address_family, + self.socket_type) + if bind_and_activate: + try: + self.server_bind() + self.server_activate() + except: + self.server_close() + raise + + def server_bind(self): + """Called by constructor to bind the socket. + + May be overridden. + + """ + if self.allow_reuse_address: + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.socket.bind(self.server_address) + self.server_address = self.socket.getsockname() + + def server_activate(self): + """Called by constructor to activate the server. + + May be overridden. + + """ + self.socket.listen(self.request_queue_size) + + def server_close(self): + """Called to clean-up the server. + + May be overridden. + + """ + self.socket.close() + + def fileno(self): + """Return socket file number. + + Interface required by selector. + + """ + return self.socket.fileno() + + def get_request(self): + """Get the request and client address from the socket. + + May be overridden. + + """ + return self.socket.accept() + + def shutdown_request(self, request): + """Called to shutdown and close an individual request.""" + try: + #explicitly shutdown. socket.close() merely releases + #the socket and waits for GC to perform the actual close. + request.shutdown(socket.SHUT_WR) + except OSError: + pass #some platforms may raise ENOTCONN here + self.close_request(request) + + def close_request(self, request): + """Called to clean up an individual request.""" + request.close() + + +class UDPServer(TCPServer): + + """UDP server class.""" + + allow_reuse_address = False + + socket_type = socket.SOCK_DGRAM + + max_packet_size = 8192 + + def get_request(self): + data, client_addr = self.socket.recvfrom(self.max_packet_size) + return (data, self.socket), client_addr + + def server_activate(self): + # No need to call listen() for UDP. + pass + + def shutdown_request(self, request): + # No need to shutdown anything. + self.close_request(request) + + def close_request(self, request): + # No need to close anything. + pass + +if hasattr(os, "fork"): + class ForkingMixIn: + """Mix-in class to handle each request in a new process.""" + + timeout = 300 + active_children = None + max_children = 40 + # If true, server_close() waits until all child processes complete. + block_on_close = True + + def collect_children(self, *, blocking=False): + """Internal routine to wait for children that have exited.""" + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. + while len(self.active_children) >= self.max_children: + try: + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except ChildProcessError: + # we don't have any children, we're done + self.active_children.clear() + except OSError: + break + + # Now reap all defunct children. + for pid in self.active_children.copy(): + try: + flags = 0 if blocking else os.WNOHANG + pid, _ = os.waitpid(pid, flags) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except ChildProcessError: + # someone else reaped it + self.active_children.discard(pid) + except OSError: + pass + + def handle_timeout(self): + """Wait for zombies after self.timeout seconds of inactivity. + + May be extended, do not override. + """ + self.collect_children() + + def service_actions(self): + """Collect the zombie child processes regularly in the ForkingMixIn. + + service_actions is called in the BaseServer's serve_forever loop. + """ + self.collect_children() + + def process_request(self, request, client_address): + """Fork a new subprocess to process the request.""" + pid = os.fork() + if pid: + # Parent process + if self.active_children is None: + self.active_children = set() + self.active_children.add(pid) + self.close_request(request) + return + else: + # Child process. + # This must never return, hence os._exit()! + status = 1 + try: + self.finish_request(request, client_address) + status = 0 + except Exception: + self.handle_error(request, client_address) + finally: + try: + self.shutdown_request(request) + finally: + os._exit(status) + + def server_close(self): + super().server_close() + self.collect_children(blocking=self.block_on_close) + + +class _Threads(list): + """ + Joinable list of all non-daemon threads. + """ + def append(self, thread): + self.reap() + if thread.daemon: + return + super().append(thread) + + def pop_all(self): + self[:], result = [], self[:] + return result + + def join(self): + for thread in self.pop_all(): + thread.join() + + def reap(self): + self[:] = (thread for thread in self if thread.is_alive()) + + +class _NoThreads: + """ + Degenerate version of _Threads. + """ + def append(self, thread): + pass + + def join(self): + pass + + +class ThreadingMixIn: + """Mix-in class to handle each request in a new thread.""" + + # Decides how threads will act upon termination of the + # main process + daemon_threads = False + # If true, server_close() waits until all non-daemonic threads terminate. + block_on_close = True + # Threads object + # used by server_close() to wait for all threads completion. + _threads = _NoThreads() + + def process_request_thread(self, request, client_address): + """Same as in BaseServer but as a thread. + + In addition, exception handling is done here. + + """ + try: + self.finish_request(request, client_address) + except Exception: + self.handle_error(request, client_address) + finally: + self.shutdown_request(request) + + def process_request(self, request, client_address): + """Start a new thread to process the request.""" + if self.block_on_close: + vars(self).setdefault('_threads', _Threads()) + t = threading.Thread(target = self.process_request_thread, + args = (request, client_address)) + t.daemon = self.daemon_threads + self._threads.append(t) + t.start() + + def server_close(self): + super().server_close() + self._threads.join() + + +if hasattr(os, "fork"): + class ForkingUDPServer(ForkingMixIn, UDPServer): pass + class ForkingTCPServer(ForkingMixIn, TCPServer): pass + +class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass +class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass + +if hasattr(socket, 'AF_UNIX'): + + class UnixStreamServer(TCPServer): + address_family = socket.AF_UNIX + + class UnixDatagramServer(UDPServer): + address_family = socket.AF_UNIX + + class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass + + class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass + +class BaseRequestHandler: + + """Base class for request handler classes. + + This class is instantiated for each request to be handled. The + constructor sets the instance variables request, client_address + and server, and then calls the handle() method. To implement a + specific service, all you need to do is to derive a class which + defines a handle() method. + + The handle() method can find the request as self.request, the + client address as self.client_address, and the server (in case it + needs access to per-server information) as self.server. Since a + separate instance is created for each request, the handle() method + can define other arbitrary instance variables. + + """ + + def __init__(self, request, client_address, server): + self.request = request + self.client_address = client_address + self.server = server + self.setup() + try: + self.handle() + finally: + self.finish() + + def setup(self): + pass + + def handle(self): + pass + + def finish(self): + pass + + +# The following two classes make it possible to use the same service +# class for stream or datagram servers. +# Each class sets up these instance variables: +# - rfile: a file object from which receives the request is read +# - wfile: a file object to which the reply is written +# When the handle() method returns, wfile is flushed properly + + +class StreamRequestHandler(BaseRequestHandler): + + """Define self.rfile and self.wfile for stream sockets.""" + + # Default buffer sizes for rfile, wfile. + # We default rfile to buffered because otherwise it could be + # really slow for large data (a getc() call per byte); we make + # wfile unbuffered because (a) often after a write() we want to + # read and we need to flush the line; (b) big writes to unbuffered + # files are typically optimized by stdio even when big reads + # aren't. + rbufsize = -1 + wbufsize = 0 + + # A timeout to apply to the request socket, if not None. + timeout = None + + # Disable nagle algorithm for this socket, if True. + # Use only when wbufsize != 0, to avoid small packets. + disable_nagle_algorithm = False + + def setup(self): + self.connection = self.request + if self.timeout is not None: + self.connection.settimeout(self.timeout) + if self.disable_nagle_algorithm: + self.connection.setsockopt(socket.IPPROTO_TCP, + socket.TCP_NODELAY, True) + self.rfile = self.connection.makefile('rb', self.rbufsize) + if self.wbufsize == 0: + self.wfile = _SocketWriter(self.connection) + else: + self.wfile = self.connection.makefile('wb', self.wbufsize) + + def finish(self): + if not self.wfile.closed: + try: + self.wfile.flush() + except socket.error: + # A final socket error may have occurred here, such as + # the local error ECONNABORTED. + pass + self.wfile.close() + self.rfile.close() + +class _SocketWriter(BufferedIOBase): + """Simple writable BufferedIOBase implementation for a socket + + Does not hold data in a buffer, avoiding any need to call flush().""" + + def __init__(self, sock): + self._sock = sock + + def writable(self): + return True + + def write(self, b): + self._sock.sendall(b) + with memoryview(b) as view: + return view.nbytes + + def fileno(self): + return self._sock.fileno() + +class DatagramRequestHandler(BaseRequestHandler): + + """Define self.rfile and self.wfile for datagram sockets.""" + + def setup(self): + from io import BytesIO + self.packet, self.socket = self.request + self.rfile = BytesIO(self.packet) + self.wfile = BytesIO() + + def finish(self): + self.socket.sendto(self.wfile.getvalue(), self.client_address) diff --git a/evalkit_cambrian/lib/python3.10/string.py b/evalkit_cambrian/lib/python3.10/string.py new file mode 100644 index 0000000000000000000000000000000000000000..489777b10c25df7ea1c53444f0df800022af56b4 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/string.py @@ -0,0 +1,280 @@ +"""A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +""" + +__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords", + "digits", "hexdigits", "octdigits", "printable", "punctuation", + "whitespace", "Formatter", "Template"] + +import _string + +# Some strings for ctype-style character classification +whitespace = ' \t\n\r\v\f' +ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz' +ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +ascii_letters = ascii_lowercase + ascii_uppercase +digits = '0123456789' +hexdigits = digits + 'abcdef' + 'ABCDEF' +octdigits = '01234567' +punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" +printable = digits + ascii_letters + punctuation + whitespace + +# Functions which aren't available as string methods. + +# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". +def capwords(s, sep=None): + """capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + """ + return (sep or ' ').join(x.capitalize() for x in s.split(sep)) + + +#################################################################### +import re as _re +from collections import ChainMap as _ChainMap + +_sentinel_dict = {} + +class Template: + """A string class for supporting $-substitutions.""" + + delimiter = '$' + # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but + # without the ASCII flag. We can't add re.ASCII to flags because of + # backward compatibility. So we use the ?a local flag and [a-z] pattern. + # See https://bugs.python.org/issue31672 + idpattern = r'(?a:[_a-z][_a-z0-9]*)' + braceidpattern = None + flags = _re.IGNORECASE + + def __init_subclass__(cls): + super().__init_subclass__() + if 'pattern' in cls.__dict__: + pattern = cls.pattern + else: + delim = _re.escape(cls.delimiter) + id = cls.idpattern + bid = cls.braceidpattern or cls.idpattern + pattern = fr""" + {delim}(?: + (?P{delim}) | # Escape sequence of two delimiters + (?P{id}) | # delimiter and a Python identifier + {{(?P{bid})}} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + """ + cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE) + + def __init__(self, template): + self.template = template + + # Search for $$, $identifier, ${identifier}, and any bare $'s + + def _invalid(self, mo): + i = mo.start('invalid') + lines = self.template[:i].splitlines(keepends=True) + if not lines: + colno = 1 + lineno = 1 + else: + colno = i - len(''.join(lines[:-1])) + lineno = len(lines) + raise ValueError('Invalid placeholder in string: line %d, col %d' % + (lineno, colno)) + + def substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + mapping = _ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + # Check the most common path first. + named = mo.group('named') or mo.group('braced') + if named is not None: + return str(mapping[named]) + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + self._invalid(mo) + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def safe_substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + mapping = _ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + named = mo.group('named') or mo.group('braced') + if named is not None: + try: + return str(mapping[named]) + except KeyError: + return mo.group() + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + return mo.group() + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + +# Initialize Template.pattern. __init_subclass__() is automatically called +# only for subclasses, not for the Template class itself. +Template.__init_subclass__() + + +######################################################################## +# the Formatter class +# see PEP 3101 for details and purpose of this class + +# The hard parts are reused from the C implementation. They're exposed as "_" +# prefixed methods of str. + +# The overall parser is implemented in _string.formatter_parser. +# The field name parser is implemented in _string.formatter_field_name_split + +class Formatter: + def format(self, format_string, /, *args, **kwargs): + return self.vformat(format_string, args, kwargs) + + def vformat(self, format_string, args, kwargs): + used_args = set() + result, _ = self._vformat(format_string, args, kwargs, used_args, 2) + self.check_unused_args(used_args, args, kwargs) + return result + + def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, + auto_arg_index=0): + if recursion_depth < 0: + raise ValueError('Max string recursion exceeded') + result = [] + for literal_text, field_name, format_spec, conversion in \ + self.parse(format_string): + + # output the literal text + if literal_text: + result.append(literal_text) + + # if there's a field, output it + if field_name is not None: + # this is some markup, find the object and do + # the formatting + + # handle arg indexing when empty field_names are given. + if field_name == '': + if auto_arg_index is False: + raise ValueError('cannot switch from manual field ' + 'specification to automatic field ' + 'numbering') + field_name = str(auto_arg_index) + auto_arg_index += 1 + elif field_name.isdigit(): + if auto_arg_index: + raise ValueError('cannot switch from manual field ' + 'specification to automatic field ' + 'numbering') + # disable auto arg incrementing, if it gets + # used later on, then an exception will be raised + auto_arg_index = False + + # given the field_name, find the object it references + # and the argument it came from + obj, arg_used = self.get_field(field_name, args, kwargs) + used_args.add(arg_used) + + # do any conversion on the resulting object + obj = self.convert_field(obj, conversion) + + # expand the format spec, if needed + format_spec, auto_arg_index = self._vformat( + format_spec, args, kwargs, + used_args, recursion_depth-1, + auto_arg_index=auto_arg_index) + + # format the object and append to the result + result.append(self.format_field(obj, format_spec)) + + return ''.join(result), auto_arg_index + + + def get_value(self, key, args, kwargs): + if isinstance(key, int): + return args[key] + else: + return kwargs[key] + + + def check_unused_args(self, used_args, args, kwargs): + pass + + + def format_field(self, value, format_spec): + return format(value, format_spec) + + + def convert_field(self, value, conversion): + # do any conversion on the resulting object + if conversion is None: + return value + elif conversion == 's': + return str(value) + elif conversion == 'r': + return repr(value) + elif conversion == 'a': + return ascii(value) + raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) + + + # returns an iterable that contains tuples of the form: + # (literal_text, field_name, format_spec, conversion) + # literal_text can be zero length + # field_name can be None, in which case there's no + # object to format and output + # if field_name is not None, it is looked up, formatted + # with format_spec and conversion and then used + def parse(self, format_string): + return _string.formatter_parser(format_string) + + + # given a field_name, find the object it references. + # field_name: the field being looked up, e.g. "0.name" + # or "lookup[3]" + # used_args: a set of which args have been used + # args, kwargs: as passed in to vformat + def get_field(self, field_name, args, kwargs): + first, rest = _string.formatter_field_name_split(field_name) + + obj = self.get_value(first, args, kwargs) + + # loop through the rest of the field_name, doing + # getattr or getitem as needed + for is_attr, i in rest: + if is_attr: + obj = getattr(obj, i) + else: + obj = obj[i] + + return obj, first diff --git a/evalkit_cambrian/lib/python3.10/textwrap.py b/evalkit_cambrian/lib/python3.10/textwrap.py new file mode 100644 index 0000000000000000000000000000000000000000..841de9baecf5d8a497b26d1648081d69a4612e5e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/textwrap.py @@ -0,0 +1,494 @@ +"""Text wrapping and filling. +""" + +# Copyright (C) 1999-2001 Gregory P. Ward. +# Copyright (C) 2002, 2003 Python Software Foundation. +# Written by Greg Ward + +import re + +__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten'] + +# Hardcode the recognized whitespace characters to the US-ASCII +# whitespace characters. The main reason for doing this is that +# some Unicode spaces (like \u00a0) are non-breaking whitespaces. +_whitespace = '\t\n\x0b\x0c\r ' + +class TextWrapper: + """ + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + """ + + unicode_whitespace_trans = {} + uspace = ord(' ') + for x in _whitespace: + unicode_whitespace_trans[ord(x)] = uspace + + # This funky little regex is just the trick for splitting + # text up into word-wrappable chunks. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! + # (after stripping out empty strings). + word_punct = r'[\w!"\'&.,?]' + letter = r'[^\d\W]' + whitespace = r'[%s]' % re.escape(_whitespace) + nowhitespace = '[^' + whitespace[1:] + wordsep_re = re.compile(r''' + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\Z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )''' % {'wp': word_punct, 'lt': letter, + 'ws': whitespace, 'nws': nowhitespace}, + re.VERBOSE) + del word_punct, letter, nowhitespace + + # This less funky little regex just split on recognized spaces. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ + wordsep_simple_re = re.compile(r'(%s+)' % whitespace) + del whitespace + + # XXX this is not locale- or charset-aware -- string.lowercase + # is US-ASCII only (and therefore English-only) + sentence_end_re = re.compile(r'[a-z]' # lowercase letter + r'[\.\!\?]' # sentence-ending punct. + r'[\"\']?' # optional end-of-quote + r'\Z') # end of chunk + + def __init__(self, + width=70, + initial_indent="", + subsequent_indent="", + expand_tabs=True, + replace_whitespace=True, + fix_sentence_endings=False, + break_long_words=True, + drop_whitespace=True, + break_on_hyphens=True, + tabsize=8, + *, + max_lines=None, + placeholder=' [...]'): + self.width = width + self.initial_indent = initial_indent + self.subsequent_indent = subsequent_indent + self.expand_tabs = expand_tabs + self.replace_whitespace = replace_whitespace + self.fix_sentence_endings = fix_sentence_endings + self.break_long_words = break_long_words + self.drop_whitespace = drop_whitespace + self.break_on_hyphens = break_on_hyphens + self.tabsize = tabsize + self.max_lines = max_lines + self.placeholder = placeholder + + + # -- Private methods ----------------------------------------------- + # (possibly useful for subclasses to override) + + def _munge_whitespace(self, text): + """_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" + becomes " foo bar baz". + """ + if self.expand_tabs: + text = text.expandtabs(self.tabsize) + if self.replace_whitespace: + text = text.translate(self.unicode_whitespace_trans) + return text + + + def _split(self, text): + """_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + """ + if self.break_on_hyphens is True: + chunks = self.wordsep_re.split(text) + else: + chunks = self.wordsep_simple_re.split(text) + chunks = [c for c in chunks if c] + return chunks + + def _fix_sentence_endings(self, chunks): + """_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + """ + i = 0 + patsearch = self.sentence_end_re.search + while i < len(chunks)-1: + if chunks[i+1] == " " and patsearch(chunks[i]): + chunks[i+1] = " " + i += 2 + else: + i += 1 + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + end = space_left + chunk = reversed_chunks[-1] + if self.break_on_hyphens and len(chunk) > space_left: + # break after last hyphen, but only if there are + # non-hyphens before it + hyphen = chunk.rfind('-', 0, space_left) + if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]): + end = hyphen + 1 + cur_line.append(chunk[:end]) + reversed_chunks[-1] = chunk[end:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if len(indent) + len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == '' and lines: + del chunks[-1] + + while chunks: + l = len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop()) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': + cur_len -= len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if (self.max_lines is None or + len(lines) + 1 < self.max_lines or + (not chunks or + self.drop_whitespace and + len(chunks) == 1 and + not chunks[0].strip()) and cur_len <= width): + # Convert current line back to a string and store it in + # list of all lines (return value). + lines.append(indent + ''.join(cur_line)) + else: + while cur_line: + if (cur_line[-1].strip() and + cur_len + len(self.placeholder) <= width): + cur_line.append(self.placeholder) + lines.append(indent + ''.join(cur_line)) + break + cur_len -= len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if (len(prev_line) + len(self.placeholder) <= + self.width): + lines[-1] = prev_line + self.placeholder + break + lines.append(indent + self.placeholder.lstrip()) + break + + return lines + + def _split_chunks(self, text): + text = self._munge_whitespace(text) + return self._split(text) + + # -- Public interface ---------------------------------------------- + + def wrap(self, text): + """wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + """ + chunks = self._split_chunks(text) + if self.fix_sentence_endings: + self._fix_sentence_endings(chunks) + return self._wrap_chunks(chunks) + + def fill(self, text): + """fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + """ + return "\n".join(self.wrap(text)) + + +# -- Convenience interface --------------------------------------------- + +def wrap(text, width=70, **kwargs): + """Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.wrap(text) + +def fill(text, width=70, **kwargs): + """Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.fill(text) + +def shorten(text, width, **kwargs): + """Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + """ + w = TextWrapper(width=width, max_lines=1, **kwargs) + return w.fill(' '.join(text.strip().split())) + + +# -- Loosely related functionality ------------------------------------- + +_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) +_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) + +def dedent(text): + """Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + """ + # Look for the longest leading string of spaces and tabs common to + # all lines. + margin = None + text = _whitespace_only_re.sub('', text) + indents = _leading_whitespace_re.findall(text) + for indent in indents: + if margin is None: + margin = indent + + # Current line more deeply indented than previous winner: + # no change (previous winner is still on top). + elif indent.startswith(margin): + pass + + # Current line consistent with and no deeper than previous winner: + # it's the new winner. + elif margin.startswith(indent): + margin = indent + + # Find the largest common whitespace between current line and previous + # winner. + else: + for i, (x, y) in enumerate(zip(margin, indent)): + if x != y: + margin = margin[:i] + break + + # sanity check (testing/debugging only) + if 0 and margin: + for line in text.split("\n"): + assert not line or line.startswith(margin), \ + "line = %r, margin = %r" % (line, margin) + + if margin: + text = re.sub(r'(?m)^' + margin, '', text) + return text + + +def indent(text, prefix, predicate=None): + """Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + """ + if predicate is None: + def predicate(line): + return line.strip() + + def prefixed_lines(): + for line in text.splitlines(True): + yield (prefix + line if predicate(line) else line) + return ''.join(prefixed_lines()) + + +if __name__ == "__main__": + #print dedent("\tfoo\n\tbar") + #print dedent(" \thello there\n \t how are you?") + print(dedent("Hello there.\n This is indented.")) diff --git a/evalkit_cambrian/lib/python3.10/this.py b/evalkit_cambrian/lib/python3.10/this.py new file mode 100644 index 0000000000000000000000000000000000000000..e68dd3ff39b04ff857420b98889bee590b344024 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/this.py @@ -0,0 +1,28 @@ +s = """Gur Mra bs Clguba, ol Gvz Crgref + +Ornhgvshy vf orggre guna htyl. +Rkcyvpvg vf orggre guna vzcyvpvg. +Fvzcyr vf orggre guna pbzcyrk. +Pbzcyrk vf orggre guna pbzcyvpngrq. +Syng vf orggre guna arfgrq. +Fcnefr vf orggre guna qrafr. +Ernqnovyvgl pbhagf. +Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf. +Nygubhtu cenpgvpnyvgl orngf chevgl. +Reebef fubhyq arire cnff fvyragyl. +Hayrff rkcyvpvgyl fvyraprq. +Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff. +Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg. +Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu. +Abj vf orggre guna arire. +Nygubhtu arire vf bsgra orggre guna *evtug* abj. +Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn. +Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn. +Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!""" + +d = {} +for c in (65, 97): + for i in range(26): + d[chr(i+c)] = chr((i+13) % 26 + c) + +print("".join([d.get(c, c) for c in s])) diff --git a/evalkit_cambrian/lib/python3.10/token.py b/evalkit_cambrian/lib/python3.10/token.py new file mode 100644 index 0000000000000000000000000000000000000000..9d0c0bf0fb0368e15170336a0c60b17f5fa4da40 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/token.py @@ -0,0 +1,137 @@ +"""Token constants.""" +# Auto-generated by Tools/scripts/generate_token.py + +__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF'] + +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +LBRACE = 25 +RBRACE = 26 +EQEQUAL = 27 +NOTEQUAL = 28 +LESSEQUAL = 29 +GREATEREQUAL = 30 +TILDE = 31 +CIRCUMFLEX = 32 +LEFTSHIFT = 33 +RIGHTSHIFT = 34 +DOUBLESTAR = 35 +PLUSEQUAL = 36 +MINEQUAL = 37 +STAREQUAL = 38 +SLASHEQUAL = 39 +PERCENTEQUAL = 40 +AMPEREQUAL = 41 +VBAREQUAL = 42 +CIRCUMFLEXEQUAL = 43 +LEFTSHIFTEQUAL = 44 +RIGHTSHIFTEQUAL = 45 +DOUBLESTAREQUAL = 46 +DOUBLESLASH = 47 +DOUBLESLASHEQUAL = 48 +AT = 49 +ATEQUAL = 50 +RARROW = 51 +ELLIPSIS = 52 +COLONEQUAL = 53 +OP = 54 +AWAIT = 55 +ASYNC = 56 +TYPE_IGNORE = 57 +TYPE_COMMENT = 58 +SOFT_KEYWORD = 59 +# These aren't used by the C tokenizer but are needed for tokenize.py +ERRORTOKEN = 60 +COMMENT = 61 +NL = 62 +ENCODING = 63 +N_TOKENS = 64 +# Special definitions for cooperation with parser +NT_OFFSET = 256 + +tok_name = {value: name + for name, value in globals().items() + if isinstance(value, int) and not name.startswith('_')} +__all__.extend(tok_name.values()) + +EXACT_TOKEN_TYPES = { + '!=': NOTEQUAL, + '%': PERCENT, + '%=': PERCENTEQUAL, + '&': AMPER, + '&=': AMPEREQUAL, + '(': LPAR, + ')': RPAR, + '*': STAR, + '**': DOUBLESTAR, + '**=': DOUBLESTAREQUAL, + '*=': STAREQUAL, + '+': PLUS, + '+=': PLUSEQUAL, + ',': COMMA, + '-': MINUS, + '-=': MINEQUAL, + '->': RARROW, + '.': DOT, + '...': ELLIPSIS, + '/': SLASH, + '//': DOUBLESLASH, + '//=': DOUBLESLASHEQUAL, + '/=': SLASHEQUAL, + ':': COLON, + ':=': COLONEQUAL, + ';': SEMI, + '<': LESS, + '<<': LEFTSHIFT, + '<<=': LEFTSHIFTEQUAL, + '<=': LESSEQUAL, + '=': EQUAL, + '==': EQEQUAL, + '>': GREATER, + '>=': GREATEREQUAL, + '>>': RIGHTSHIFT, + '>>=': RIGHTSHIFTEQUAL, + '@': AT, + '@=': ATEQUAL, + '[': LSQB, + ']': RSQB, + '^': CIRCUMFLEX, + '^=': CIRCUMFLEXEQUAL, + '{': LBRACE, + '|': VBAR, + '|=': VBAREQUAL, + '}': RBRACE, + '~': TILDE, +} + +def ISTERMINAL(x): + return x < NT_OFFSET + +def ISNONTERMINAL(x): + return x >= NT_OFFSET + +def ISEOF(x): + return x == ENDMARKER diff --git a/evalkit_cambrian/lib/python3.10/turtle.py b/evalkit_cambrian/lib/python3.10/turtle.py new file mode 100644 index 0000000000000000000000000000000000000000..d287c15543528a8c24609ea1ef7b479ffc82d35f --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/turtle.py @@ -0,0 +1,4141 @@ +# +# turtle.py: a Tkinter based turtle graphics module for Python +# Version 1.1b - 4. 5. 2009 +# +# Copyright (C) 2006 - 2010 Gregor Lingl +# email: glingl@aon.at +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + + +""" +Turtle graphics is a popular way for introducing programming to +kids. It was part of the original Logo programming language developed +by Wally Feurzig and Seymour Papert in 1966. + +Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it +the command turtle.forward(15), and it moves (on-screen!) 15 pixels in +the direction it is facing, drawing a line as it moves. Give it the +command turtle.right(25), and it rotates in-place 25 degrees clockwise. + +By combining together these and similar commands, intricate shapes and +pictures can easily be drawn. + +----- turtle.py + +This module is an extended reimplementation of turtle.py from the +Python standard distribution up to Python 2.5. (See: https://www.python.org) + +It tries to keep the merits of turtle.py and to be (nearly) 100% +compatible with it. This means in the first place to enable the +learning programmer to use all the commands, classes and methods +interactively when using the module from within IDLE run with +the -n switch. + +Roughly it has the following features added: + +- Better animation of the turtle movements, especially of turning the + turtle. So the turtles can more easily be used as a visual feedback + instrument by the (beginning) programmer. + +- Different turtle shapes, gif-images as turtle shapes, user defined + and user controllable turtle shapes, among them compound + (multicolored) shapes. Turtle shapes can be stretched and tilted, which + makes turtles very versatile geometrical objects. + +- Fine control over turtle movement and screen updates via delay(), + and enhanced tracer() and speed() methods. + +- Aliases for the most commonly used commands, like fd for forward etc., + following the early Logo traditions. This reduces the boring work of + typing long sequences of commands, which often occur in a natural way + when kids try to program fancy pictures on their first encounter with + turtle graphics. + +- Turtles now have an undo()-method with configurable undo-buffer. + +- Some simple commands/methods for creating event driven programs + (mouse-, key-, timer-events). Especially useful for programming games. + +- A scrollable Canvas class. The default scrollable Canvas can be + extended interactively as needed while playing around with the turtle(s). + +- A TurtleScreen class with methods controlling background color or + background image, window and canvas size and other properties of the + TurtleScreen. + +- There is a method, setworldcoordinates(), to install a user defined + coordinate-system for the TurtleScreen. + +- The implementation uses a 2-vector class named Vec2D, derived from tuple. + This class is public, so it can be imported by the application programmer, + which makes certain types of computations very natural and compact. + +- Appearance of the TurtleScreen and the Turtles at startup/import can be + configured by means of a turtle.cfg configuration file. + The default configuration mimics the appearance of the old turtle module. + +- If configured appropriately the module reads in docstrings from a docstring + dictionary in some different language, supplied separately and replaces + the English ones by those read in. There is a utility function + write_docstringdict() to write a dictionary with the original (English) + docstrings to disc, so it can serve as a template for translations. + +Behind the scenes there are some features included with possible +extensions in mind. These will be commented and documented elsewhere. + +""" + +_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009" + +# print(_ver) + +import tkinter as TK +import types +import math +import time +import inspect +import sys + +from os.path import isfile, split, join +from copy import deepcopy +from tkinter import simpledialog + +_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen', + 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D'] +_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye', + 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas', + 'getshapes', 'listen', 'mainloop', 'mode', 'numinput', + 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer', + 'register_shape', 'resetscreen', 'screensize', 'setup', + 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update', + 'window_height', 'window_width'] +_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk', + 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color', + 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd', + 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly', + 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown', + 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd', + 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position', + 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt', + 'seth', 'setheading', 'setpos', 'setposition', 'settiltangle', + 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle', + 'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards', + 'turtlesize', 'undo', 'undobufferentries', 'up', 'width', + 'write', 'xcor', 'ycor'] +_tg_utilities = ['write_docstringdict', 'done'] + +__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions + + _tg_utilities + ['Terminator']) # + _math_functions) + +_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos', + 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st', + 'turtlesize', 'up', 'width'] + +_CFG = {"width" : 0.5, # Screen + "height" : 0.75, + "canvwidth" : 400, + "canvheight": 300, + "leftright": None, + "topbottom": None, + "mode": "standard", # TurtleScreen + "colormode": 1.0, + "delay": 10, + "undobuffersize": 1000, # RawTurtle + "shape": "classic", + "pencolor" : "black", + "fillcolor" : "black", + "resizemode" : "noresize", + "visible" : True, + "language": "english", # docstrings + "exampleturtle": "turtle", + "examplescreen": "screen", + "title": "Python Turtle Graphics", + "using_IDLE": False + } + +def config_dict(filename): + """Convert content of config-file into dictionary.""" + with open(filename, "r") as f: + cfglines = f.readlines() + cfgdict = {} + for line in cfglines: + line = line.strip() + if not line or line.startswith("#"): + continue + try: + key, value = line.split("=") + except ValueError: + print("Bad line in config-file %s:\n%s" % (filename,line)) + continue + key = key.strip() + value = value.strip() + if value in ["True", "False", "None", "''", '""']: + value = eval(value) + else: + try: + if "." in value: + value = float(value) + else: + value = int(value) + except ValueError: + pass # value need not be converted + cfgdict[key] = value + return cfgdict + +def readconfig(cfgdict): + """Read config-files, change configuration-dict accordingly. + + If there is a turtle.cfg file in the current working directory, + read it from there. If this contains an importconfig-value, + say 'myway', construct filename turtle_mayway.cfg else use + turtle.cfg and read it from the import-directory, where + turtle.py is located. + Update configuration dictionary first according to config-file, + in the import directory, then according to config-file in the + current working directory. + If no config-file is found, the default configuration is used. + """ + default_cfg = "turtle.cfg" + cfgdict1 = {} + cfgdict2 = {} + if isfile(default_cfg): + cfgdict1 = config_dict(default_cfg) + if "importconfig" in cfgdict1: + default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"] + try: + head, tail = split(__file__) + cfg_file2 = join(head, default_cfg) + except Exception: + cfg_file2 = "" + if isfile(cfg_file2): + cfgdict2 = config_dict(cfg_file2) + _CFG.update(cfgdict2) + _CFG.update(cfgdict1) + +try: + readconfig(_CFG) +except Exception: + print ("No configfile read, reason unknown") + + +class Vec2D(tuple): + """A 2 dimensional vector class, used as a helper class + for implementing turtle graphics. + May be useful for turtle graphics programs also. + Derived from tuple, so a vector is a tuple! + + Provides (for a, b vectors, k number): + a+b vector addition + a-b vector subtraction + a*b inner product + k*a and a*k multiplication with scalar + |a| absolute value of a + a.rotate(angle) rotation + """ + def __new__(cls, x, y): + return tuple.__new__(cls, (x, y)) + def __add__(self, other): + return Vec2D(self[0]+other[0], self[1]+other[1]) + def __mul__(self, other): + if isinstance(other, Vec2D): + return self[0]*other[0]+self[1]*other[1] + return Vec2D(self[0]*other, self[1]*other) + def __rmul__(self, other): + if isinstance(other, int) or isinstance(other, float): + return Vec2D(self[0]*other, self[1]*other) + return NotImplemented + def __sub__(self, other): + return Vec2D(self[0]-other[0], self[1]-other[1]) + def __neg__(self): + return Vec2D(-self[0], -self[1]) + def __abs__(self): + return math.hypot(*self) + def rotate(self, angle): + """rotate self counterclockwise by angle + """ + perp = Vec2D(-self[1], self[0]) + angle = math.radians(angle) + c, s = math.cos(angle), math.sin(angle) + return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s) + def __getnewargs__(self): + return (self[0], self[1]) + def __repr__(self): + return "(%.2f,%.2f)" % self + + +############################################################################## +### From here up to line : Tkinter - Interface for turtle.py ### +### May be replaced by an interface to some different graphics toolkit ### +############################################################################## + +## helper functions for Scrolled Canvas, to forward Canvas-methods +## to ScrolledCanvas class + +def __methodDict(cls, _dict): + """helper function for Scrolled Canvas""" + baseList = list(cls.__bases__) + baseList.reverse() + for _super in baseList: + __methodDict(_super, _dict) + for key, value in cls.__dict__.items(): + if type(value) == types.FunctionType: + _dict[key] = value + +def __methods(cls): + """helper function for Scrolled Canvas""" + _dict = {} + __methodDict(cls, _dict) + return _dict.keys() + +__stringBody = ( + 'def %(method)s(self, *args, **kw): return ' + + 'self.%(attribute)s.%(method)s(*args, **kw)') + +def __forwardmethods(fromClass, toClass, toPart, exclude = ()): + ### MANY CHANGES ### + _dict_1 = {} + __methodDict(toClass, _dict_1) + _dict = {} + mfc = __methods(fromClass) + for ex in _dict_1.keys(): + if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc: + pass + else: + _dict[ex] = _dict_1[ex] + + for method, func in _dict.items(): + d = {'method': method, 'func': func} + if isinstance(toPart, str): + execString = \ + __stringBody % {'method' : method, 'attribute' : toPart} + exec(execString, d) + setattr(fromClass, method, d[method]) ### NEWU! + + +class ScrolledCanvas(TK.Frame): + """Modeled after the scrolled canvas class from Grayons's Tkinter book. + + Used as the default canvas, which pops up automatically when + using turtle graphics functions or the Turtle class. + """ + def __init__(self, master, width=500, height=350, + canvwidth=600, canvheight=500): + TK.Frame.__init__(self, master, width=width, height=height) + self._rootwindow = self.winfo_toplevel() + self.width, self.height = width, height + self.canvwidth, self.canvheight = canvwidth, canvheight + self.bg = "white" + self._canvas = TK.Canvas(master, width=width, height=height, + bg=self.bg, relief=TK.SUNKEN, borderwidth=2) + self.hscroll = TK.Scrollbar(master, command=self._canvas.xview, + orient=TK.HORIZONTAL) + self.vscroll = TK.Scrollbar(master, command=self._canvas.yview) + self._canvas.configure(xscrollcommand=self.hscroll.set, + yscrollcommand=self.vscroll.set) + self.rowconfigure(0, weight=1, minsize=0) + self.columnconfigure(0, weight=1, minsize=0) + self._canvas.grid(padx=1, in_ = self, pady=1, row=0, + column=0, rowspan=1, columnspan=1, sticky='news') + self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, + column=1, rowspan=1, columnspan=1, sticky='news') + self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, + column=0, rowspan=1, columnspan=1, sticky='news') + self.reset() + self._rootwindow.bind('', self.onResize) + + def reset(self, canvwidth=None, canvheight=None, bg = None): + """Adjust canvas and scrollbars according to given canvas size.""" + if canvwidth: + self.canvwidth = canvwidth + if canvheight: + self.canvheight = canvheight + if bg: + self.bg = bg + self._canvas.config(bg=bg, + scrollregion=(-self.canvwidth//2, -self.canvheight//2, + self.canvwidth//2, self.canvheight//2)) + self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) / + self.canvwidth) + self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) / + self.canvheight) + self.adjustScrolls() + + + def adjustScrolls(self): + """ Adjust scrollbars according to window- and canvas-size. + """ + cwidth = self._canvas.winfo_width() + cheight = self._canvas.winfo_height() + self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) + self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) + if cwidth < self.canvwidth or cheight < self.canvheight: + self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, + column=0, rowspan=1, columnspan=1, sticky='news') + self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, + column=1, rowspan=1, columnspan=1, sticky='news') + else: + self.hscroll.grid_forget() + self.vscroll.grid_forget() + + def onResize(self, event): + """self-explanatory""" + self.adjustScrolls() + + def bbox(self, *args): + """ 'forward' method, which canvas itself has inherited... + """ + return self._canvas.bbox(*args) + + def cget(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + return self._canvas.cget(*args, **kwargs) + + def config(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.config(*args, **kwargs) + + def bind(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.bind(*args, **kwargs) + + def unbind(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.unbind(*args, **kwargs) + + def focus_force(self): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.focus_force() + +__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas') + + +class _Root(TK.Tk): + """Root class for Screen based on Tkinter.""" + def __init__(self): + TK.Tk.__init__(self) + + def setupcanvas(self, width, height, cwidth, cheight): + self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight) + self._canvas.pack(expand=1, fill="both") + + def _getcanvas(self): + return self._canvas + + def set_geometry(self, width, height, startx, starty): + self.geometry("%dx%d%+d%+d"%(width, height, startx, starty)) + + def ondestroy(self, destroy): + self.wm_protocol("WM_DELETE_WINDOW", destroy) + + def win_width(self): + return self.winfo_screenwidth() + + def win_height(self): + return self.winfo_screenheight() + +Canvas = TK.Canvas + + +class TurtleScreenBase(object): + """Provide the basic graphics functionality. + Interface between Tkinter and turtle.py. + + To port turtle.py to some different graphics toolkit + a corresponding TurtleScreenBase class has to be implemented. + """ + + def _blankimage(self): + """return a blank image object + """ + img = TK.PhotoImage(width=1, height=1, master=self.cv) + img.blank() + return img + + def _image(self, filename): + """return an image object containing the + imagedata from a gif-file named filename. + """ + return TK.PhotoImage(file=filename, master=self.cv) + + def __init__(self, cv): + self.cv = cv + if isinstance(cv, ScrolledCanvas): + w = self.cv.canvwidth + h = self.cv.canvheight + else: # expected: ordinary TK.Canvas + w = int(self.cv.cget("width")) + h = int(self.cv.cget("height")) + self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 )) + self.canvwidth = w + self.canvheight = h + self.xscale = self.yscale = 1.0 + + def _createpoly(self): + """Create an invisible polygon item on canvas self.cv) + """ + return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="") + + def _drawpoly(self, polyitem, coordlist, fill=None, + outline=None, width=None, top=False): + """Configure polygonitem polyitem according to provided + arguments: + coordlist is sequence of coordinates + fill is filling color + outline is outline color + top is a boolean value, which specifies if polyitem + will be put on top of the canvas' displaylist so it + will not be covered by other items. + """ + cl = [] + for x, y in coordlist: + cl.append(x * self.xscale) + cl.append(-y * self.yscale) + self.cv.coords(polyitem, *cl) + if fill is not None: + self.cv.itemconfigure(polyitem, fill=fill) + if outline is not None: + self.cv.itemconfigure(polyitem, outline=outline) + if width is not None: + self.cv.itemconfigure(polyitem, width=width) + if top: + self.cv.tag_raise(polyitem) + + def _createline(self): + """Create an invisible line item on canvas self.cv) + """ + return self.cv.create_line(0, 0, 0, 0, fill="", width=2, + capstyle = TK.ROUND) + + def _drawline(self, lineitem, coordlist=None, + fill=None, width=None, top=False): + """Configure lineitem according to provided arguments: + coordlist is sequence of coordinates + fill is drawing color + width is width of drawn line. + top is a boolean value, which specifies if polyitem + will be put on top of the canvas' displaylist so it + will not be covered by other items. + """ + if coordlist is not None: + cl = [] + for x, y in coordlist: + cl.append(x * self.xscale) + cl.append(-y * self.yscale) + self.cv.coords(lineitem, *cl) + if fill is not None: + self.cv.itemconfigure(lineitem, fill=fill) + if width is not None: + self.cv.itemconfigure(lineitem, width=width) + if top: + self.cv.tag_raise(lineitem) + + def _delete(self, item): + """Delete graphics item from canvas. + If item is"all" delete all graphics items. + """ + self.cv.delete(item) + + def _update(self): + """Redraw graphics items on canvas + """ + self.cv.update() + + def _delay(self, delay): + """Delay subsequent canvas actions for delay ms.""" + self.cv.after(delay) + + def _iscolorstring(self, color): + """Check if the string color is a legal Tkinter color string. + """ + try: + rgb = self.cv.winfo_rgb(color) + ok = True + except TK.TclError: + ok = False + return ok + + def _bgcolor(self, color=None): + """Set canvas' backgroundcolor if color is not None, + else return backgroundcolor.""" + if color is not None: + self.cv.config(bg = color) + self._update() + else: + return self.cv.cget("bg") + + def _write(self, pos, txt, align, font, pencolor): + """Write txt at pos in canvas with specified font + and color. + Return text item and x-coord of right bottom corner + of text's bounding box.""" + x, y = pos + x = x * self.xscale + y = y * self.yscale + anchor = {"left":"sw", "center":"s", "right":"se" } + item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], + fill = pencolor, font = font) + x0, y0, x1, y1 = self.cv.bbox(item) + return item, x1-1 + +## def _dot(self, pos, size, color): +## """may be implemented for some other graphics toolkit""" + + def _onclick(self, item, fun, num=1, add=None): + """Bind fun to mouse-click event on turtle. + fun must be a function with two arguments, the coordinates + of the clicked point on the canvas. + num, the number of the mouse-button defaults to 1 + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.tag_bind(item, "" % num, eventfun, add) + + def _onrelease(self, item, fun, num=1, add=None): + """Bind fun to mouse-button-release event on turtle. + fun must be a function with two arguments, the coordinates + of the point on the canvas where mouse button is released. + num, the number of the mouse-button defaults to 1 + + If a turtle is clicked, first _onclick-event will be performed, + then _onscreensclick-event. + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.tag_bind(item, "" % num, + eventfun, add) + + def _ondrag(self, item, fun, num=1, add=None): + """Bind fun to mouse-move-event (with pressed mouse button) on turtle. + fun must be a function with two arguments, the coordinates of the + actual mouse position on the canvas. + num, the number of the mouse-button defaults to 1 + + Every sequence of mouse-move-events on a turtle is preceded by a + mouse-click event on that turtle. + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + try: + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + except Exception: + pass + self.cv.tag_bind(item, "" % num, eventfun, add) + + def _onscreenclick(self, fun, num=1, add=None): + """Bind fun to mouse-click event on canvas. + fun must be a function with two arguments, the coordinates + of the clicked point on the canvas. + num, the number of the mouse-button defaults to 1 + + If a turtle is clicked, first _onclick-event will be performed, + then _onscreensclick-event. + """ + if fun is None: + self.cv.unbind("" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.bind("" % num, eventfun, add) + + def _onkeyrelease(self, fun, key): + """Bind fun to key-release event of key. + Canvas must have focus. See method listen + """ + if fun is None: + self.cv.unbind("" % key, None) + else: + def eventfun(event): + fun() + self.cv.bind("" % key, eventfun) + + def _onkeypress(self, fun, key=None): + """If key is given, bind fun to key-press event of key. + Otherwise bind fun to any key-press. + Canvas must have focus. See method listen. + """ + if fun is None: + if key is None: + self.cv.unbind("", None) + else: + self.cv.unbind("" % key, None) + else: + def eventfun(event): + fun() + if key is None: + self.cv.bind("", eventfun) + else: + self.cv.bind("" % key, eventfun) + + def _listen(self): + """Set focus on canvas (in order to collect key-events) + """ + self.cv.focus_force() + + def _ontimer(self, fun, t): + """Install a timer, which calls fun after t milliseconds. + """ + if t == 0: + self.cv.after_idle(fun) + else: + self.cv.after(t, fun) + + def _createimage(self, image): + """Create and return image item on canvas. + """ + return self.cv.create_image(0, 0, image=image) + + def _drawimage(self, item, pos, image): + """Configure image item as to draw image object + at position (x,y) on canvas) + """ + x, y = pos + self.cv.coords(item, (x * self.xscale, -y * self.yscale)) + self.cv.itemconfig(item, image=image) + + def _setbgpic(self, item, image): + """Configure image item as to draw image object + at center of canvas. Set item to the first item + in the displaylist, so it will be drawn below + any other item .""" + self.cv.itemconfig(item, image=image) + self.cv.tag_lower(item) + + def _type(self, item): + """Return 'line' or 'polygon' or 'image' depending on + type of item. + """ + return self.cv.type(item) + + def _pointlist(self, item): + """returns list of coordinate-pairs of points of item + Example (for insiders): + >>> from turtle import * + >>> getscreen()._pointlist(getturtle().turtle._item) + [(0.0, 9.9999999999999982), (0.0, -9.9999999999999982), + (9.9999999999999982, 0.0)] + >>> """ + cl = self.cv.coords(item) + pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)] + return pl + + def _setscrollregion(self, srx1, sry1, srx2, sry2): + self.cv.config(scrollregion=(srx1, sry1, srx2, sry2)) + + def _rescale(self, xscalefactor, yscalefactor): + items = self.cv.find_all() + for item in items: + coordinates = list(self.cv.coords(item)) + newcoordlist = [] + while coordinates: + x, y = coordinates[:2] + newcoordlist.append(x * xscalefactor) + newcoordlist.append(y * yscalefactor) + coordinates = coordinates[2:] + self.cv.coords(item, *newcoordlist) + + def _resize(self, canvwidth=None, canvheight=None, bg=None): + """Resize the canvas the turtles are drawing on. Does + not alter the drawing window. + """ + # needs amendment + if not isinstance(self.cv, ScrolledCanvas): + return self.canvwidth, self.canvheight + if canvwidth is canvheight is bg is None: + return self.cv.canvwidth, self.cv.canvheight + if canvwidth is not None: + self.canvwidth = canvwidth + if canvheight is not None: + self.canvheight = canvheight + self.cv.reset(canvwidth, canvheight, bg) + + def _window_size(self): + """ Return the width and height of the turtle window. + """ + width = self.cv.winfo_width() + if width <= 1: # the window isn't managed by a geometry manager + width = self.cv['width'] + height = self.cv.winfo_height() + if height <= 1: # the window isn't managed by a geometry manager + height = self.cv['height'] + return width, height + + def mainloop(self): + """Starts event loop - calling Tkinter's mainloop function. + + No argument. + + Must be last statement in a turtle graphics program. + Must NOT be used if a script is run from within IDLE in -n mode + (No subprocess) - for interactive use of turtle graphics. + + Example (for a TurtleScreen instance named screen): + >>> screen.mainloop() + + """ + self.cv.tk.mainloop() + + def textinput(self, title, prompt): + """Pop up a dialog window for input of a string. + + Arguments: title is the title of the dialog window, + prompt is a text mostly describing what information to input. + + Return the string input + If the dialog is canceled, return None. + + Example (for a TurtleScreen instance named screen): + >>> screen.textinput("NIM", "Name of first player:") + + """ + return simpledialog.askstring(title, prompt, parent=self.cv) + + def numinput(self, title, prompt, default=None, minval=None, maxval=None): + """Pop up a dialog window for input of a number. + + Arguments: title is the title of the dialog window, + prompt is a text mostly describing what numerical information to input. + default: default value + minval: minimum value for input + maxval: maximum value for input + + The number input must be in the range minval .. maxval if these are + given. If not, a hint is issued and the dialog remains open for + correction. Return the number input. + If the dialog is canceled, return None. + + Example (for a TurtleScreen instance named screen): + >>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000) + + """ + return simpledialog.askfloat(title, prompt, initialvalue=default, + minvalue=minval, maxvalue=maxval, + parent=self.cv) + + +############################################################################## +### End of Tkinter - interface ### +############################################################################## + + +class Terminator (Exception): + """Will be raised in TurtleScreen.update, if _RUNNING becomes False. + + This stops execution of a turtle graphics script. + Main purpose: use in the Demo-Viewer turtle.Demo.py. + """ + pass + + +class TurtleGraphicsError(Exception): + """Some TurtleGraphics Error + """ + + +class Shape(object): + """Data structure modeling shapes. + + attribute _type is one of "polygon", "image", "compound" + attribute _data is - depending on _type a poygon-tuple, + an image or a list constructed using the addcomponent method. + """ + def __init__(self, type_, data=None): + self._type = type_ + if type_ == "polygon": + if isinstance(data, list): + data = tuple(data) + elif type_ == "image": + if isinstance(data, str): + if data.lower().endswith(".gif") and isfile(data): + data = TurtleScreen._image(data) + # else data assumed to be Photoimage + elif type_ == "compound": + data = [] + else: + raise TurtleGraphicsError("There is no shape type %s" % type_) + self._data = data + + def addcomponent(self, poly, fill, outline=None): + """Add component to a shape of type compound. + + Arguments: poly is a polygon, i. e. a tuple of number pairs. + fill is the fillcolor of the component, + outline is the outline color of the component. + + call (for a Shapeobject namend s): + -- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue") + + Example: + >>> poly = ((0,0),(10,-5),(0,10),(-10,-5)) + >>> s = Shape("compound") + >>> s.addcomponent(poly, "red", "blue") + >>> # .. add more components and then use register_shape() + """ + if self._type != "compound": + raise TurtleGraphicsError("Cannot add component to %s Shape" + % self._type) + if outline is None: + outline = fill + self._data.append([poly, fill, outline]) + + +class Tbuffer(object): + """Ring buffer used as undobuffer for RawTurtle objects.""" + def __init__(self, bufsize=10): + self.bufsize = bufsize + self.buffer = [[None]] * bufsize + self.ptr = -1 + self.cumulate = False + def reset(self, bufsize=None): + if bufsize is None: + for i in range(self.bufsize): + self.buffer[i] = [None] + else: + self.bufsize = bufsize + self.buffer = [[None]] * bufsize + self.ptr = -1 + def push(self, item): + if self.bufsize > 0: + if not self.cumulate: + self.ptr = (self.ptr + 1) % self.bufsize + self.buffer[self.ptr] = item + else: + self.buffer[self.ptr].append(item) + def pop(self): + if self.bufsize > 0: + item = self.buffer[self.ptr] + if item is None: + return None + else: + self.buffer[self.ptr] = [None] + self.ptr = (self.ptr - 1) % self.bufsize + return (item) + def nr_of_items(self): + return self.bufsize - self.buffer.count([None]) + def __repr__(self): + return str(self.buffer) + " " + str(self.ptr) + + + +class TurtleScreen(TurtleScreenBase): + """Provides screen oriented methods like bgcolor etc. + + Only relies upon the methods of TurtleScreenBase and NOT + upon components of the underlying graphics toolkit - + which is Tkinter in this case. + """ + _RUNNING = True + + def __init__(self, cv, mode=_CFG["mode"], + colormode=_CFG["colormode"], delay=_CFG["delay"]): + TurtleScreenBase.__init__(self, cv) + + self._shapes = { + "arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))), + "turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7), + (-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6), + (-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6), + (5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10), + (2,14))), + "circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88), + (5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51), + (-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0), + (-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09), + (-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51), + (5.88,-8.09), (8.09,-5.88), (9.51,-3.09))), + "square" : Shape("polygon", ((10,-10), (10,10), (-10,10), + (-10,-10))), + "triangle" : Shape("polygon", ((10,-5.77), (0,11.55), + (-10,-5.77))), + "classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))), + "blank" : Shape("image", self._blankimage()) + } + + self._bgpics = {"nopic" : ""} + + self._mode = mode + self._delayvalue = delay + self._colormode = _CFG["colormode"] + self._keys = [] + self.clear() + if sys.platform == 'darwin': + # Force Turtle window to the front on OS X. This is needed because + # the Turtle window will show behind the Terminal window when you + # start the demo from the command line. + rootwindow = cv.winfo_toplevel() + rootwindow.call('wm', 'attributes', '.', '-topmost', '1') + rootwindow.call('wm', 'attributes', '.', '-topmost', '0') + + def clear(self): + """Delete all drawings and all turtles from the TurtleScreen. + + No argument. + + Reset empty TurtleScreen to its initial state: white background, + no backgroundimage, no eventbindings and tracing on. + + Example (for a TurtleScreen instance named screen): + >>> screen.clear() + + Note: this method is not available as function. + """ + self._delayvalue = _CFG["delay"] + self._colormode = _CFG["colormode"] + self._delete("all") + self._bgpic = self._createimage("") + self._bgpicname = "nopic" + self._tracing = 1 + self._updatecounter = 0 + self._turtles = [] + self.bgcolor("white") + for btn in 1, 2, 3: + self.onclick(None, btn) + self.onkeypress(None) + for key in self._keys[:]: + self.onkey(None, key) + self.onkeypress(None, key) + Turtle._pen = None + + def mode(self, mode=None): + """Set turtle-mode ('standard', 'logo' or 'world') and perform reset. + + Optional argument: + mode -- one of the strings 'standard', 'logo' or 'world' + + Mode 'standard' is compatible with turtle.py. + Mode 'logo' is compatible with most Logo-Turtle-Graphics. + Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in + this mode angles appear distorted if x/y unit-ratio doesn't equal 1. + If mode is not given, return the current mode. + + Mode Initial turtle heading positive angles + ------------|-------------------------|------------------- + 'standard' to the right (east) counterclockwise + 'logo' upward (north) clockwise + + Examples: + >>> mode('logo') # resets turtle heading to north + >>> mode() + 'logo' + """ + if mode is None: + return self._mode + mode = mode.lower() + if mode not in ["standard", "logo", "world"]: + raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode) + self._mode = mode + if mode in ["standard", "logo"]: + self._setscrollregion(-self.canvwidth//2, -self.canvheight//2, + self.canvwidth//2, self.canvheight//2) + self.xscale = self.yscale = 1.0 + self.reset() + + def setworldcoordinates(self, llx, lly, urx, ury): + """Set up a user defined coordinate-system. + + Arguments: + llx -- a number, x-coordinate of lower left corner of canvas + lly -- a number, y-coordinate of lower left corner of canvas + urx -- a number, x-coordinate of upper right corner of canvas + ury -- a number, y-coordinate of upper right corner of canvas + + Set up user coodinat-system and switch to mode 'world' if necessary. + This performs a screen.reset. If mode 'world' is already active, + all drawings are redrawn according to the new coordinates. + + But ATTENTION: in user-defined coordinatesystems angles may appear + distorted. (see Screen.mode()) + + Example (for a TurtleScreen instance named screen): + >>> screen.setworldcoordinates(-10,-0.5,50,1.5) + >>> for _ in range(36): + ... left(10) + ... forward(0.5) + """ + if self.mode() != "world": + self.mode("world") + xspan = float(urx - llx) + yspan = float(ury - lly) + wx, wy = self._window_size() + self.screensize(wx-20, wy-20) + oldxscale, oldyscale = self.xscale, self.yscale + self.xscale = self.canvwidth / xspan + self.yscale = self.canvheight / yspan + srx1 = llx * self.xscale + sry1 = -ury * self.yscale + srx2 = self.canvwidth + srx1 + sry2 = self.canvheight + sry1 + self._setscrollregion(srx1, sry1, srx2, sry2) + self._rescale(self.xscale/oldxscale, self.yscale/oldyscale) + self.update() + + def register_shape(self, name, shape=None): + """Adds a turtle shape to TurtleScreen's shapelist. + + Arguments: + (1) name is the name of a gif-file and shape is None. + Installs the corresponding image shape. + !! Image-shapes DO NOT rotate when turning the turtle, + !! so they do not display the heading of the turtle! + (2) name is an arbitrary string and shape is a tuple + of pairs of coordinates. Installs the corresponding + polygon shape + (3) name is an arbitrary string and shape is a + (compound) Shape object. Installs the corresponding + compound shape. + To use a shape, you have to issue the command shape(shapename). + + call: register_shape("turtle.gif") + --or: register_shape("tri", ((0,0), (10,10), (-10,10))) + + Example (for a TurtleScreen instance named screen): + >>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3))) + + """ + if shape is None: + # image + if name.lower().endswith(".gif"): + shape = Shape("image", self._image(name)) + else: + raise TurtleGraphicsError("Bad arguments for register_shape.\n" + + "Use help(register_shape)" ) + elif isinstance(shape, tuple): + shape = Shape("polygon", shape) + ## else shape assumed to be Shape-instance + self._shapes[name] = shape + + def _colorstr(self, color): + """Return color string corresponding to args. + + Argument may be a string or a tuple of three + numbers corresponding to actual colormode, + i.e. in the range 0<=n<=colormode. + + If the argument doesn't represent a color, + an error is raised. + """ + if len(color) == 1: + color = color[0] + if isinstance(color, str): + if self._iscolorstring(color) or color == "": + return color + else: + raise TurtleGraphicsError("bad color string: %s" % str(color)) + try: + r, g, b = color + except (TypeError, ValueError): + raise TurtleGraphicsError("bad color arguments: %s" % str(color)) + if self._colormode == 1.0: + r, g, b = [round(255.0*x) for x in (r, g, b)] + if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): + raise TurtleGraphicsError("bad color sequence: %s" % str(color)) + return "#%02x%02x%02x" % (r, g, b) + + def _color(self, cstr): + if not cstr.startswith("#"): + return cstr + if len(cstr) == 7: + cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)] + elif len(cstr) == 4: + cl = [16*int(cstr[h], 16) for h in cstr[1:]] + else: + raise TurtleGraphicsError("bad colorstring: %s" % cstr) + return tuple(c * self._colormode/255 for c in cl) + + def colormode(self, cmode=None): + """Return the colormode or set it to 1.0 or 255. + + Optional argument: + cmode -- one of the values 1.0 or 255 + + r, g, b values of colortriples have to be in range 0..cmode. + + Example (for a TurtleScreen instance named screen): + >>> screen.colormode() + 1.0 + >>> screen.colormode(255) + >>> pencolor(240,160,80) + """ + if cmode is None: + return self._colormode + if cmode == 1.0: + self._colormode = float(cmode) + elif cmode == 255: + self._colormode = int(cmode) + + def reset(self): + """Reset all Turtles on the Screen to their initial state. + + No argument. + + Example (for a TurtleScreen instance named screen): + >>> screen.reset() + """ + for turtle in self._turtles: + turtle._setmode(self._mode) + turtle.reset() + + def turtles(self): + """Return the list of turtles on the screen. + + Example (for a TurtleScreen instance named screen): + >>> screen.turtles() + [] + """ + return self._turtles + + def bgcolor(self, *args): + """Set or return backgroundcolor of the TurtleScreen. + + Arguments (if given): a color string or three numbers + in the range 0..colormode or a 3-tuple of such numbers. + + Example (for a TurtleScreen instance named screen): + >>> screen.bgcolor("orange") + >>> screen.bgcolor() + 'orange' + >>> screen.bgcolor(0.5,0,0.5) + >>> screen.bgcolor() + '#800080' + """ + if args: + color = self._colorstr(args) + else: + color = None + color = self._bgcolor(color) + if color is not None: + color = self._color(color) + return color + + def tracer(self, n=None, delay=None): + """Turns turtle animation on/off and set delay for update drawings. + + Optional arguments: + n -- nonnegative integer + delay -- nonnegative integer + + If n is given, only each n-th regular screen update is really performed. + (Can be used to accelerate the drawing of complex graphics.) + Second arguments sets delay value (see RawTurtle.delay()) + + Example (for a TurtleScreen instance named screen): + >>> screen.tracer(8, 25) + >>> dist = 2 + >>> for i in range(200): + ... fd(dist) + ... rt(90) + ... dist += 2 + """ + if n is None: + return self._tracing + self._tracing = int(n) + self._updatecounter = 0 + if delay is not None: + self._delayvalue = int(delay) + if self._tracing: + self.update() + + def delay(self, delay=None): + """ Return or set the drawing delay in milliseconds. + + Optional argument: + delay -- positive integer + + Example (for a TurtleScreen instance named screen): + >>> screen.delay(15) + >>> screen.delay() + 15 + """ + if delay is None: + return self._delayvalue + self._delayvalue = int(delay) + + def _incrementudc(self): + """Increment update counter.""" + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + if self._tracing > 0: + self._updatecounter += 1 + self._updatecounter %= self._tracing + + def update(self): + """Perform a TurtleScreen update. + """ + tracing = self._tracing + self._tracing = True + for t in self.turtles(): + t._update_data() + t._drawturtle() + self._tracing = tracing + self._update() + + def window_width(self): + """ Return the width of the turtle window. + + Example (for a TurtleScreen instance named screen): + >>> screen.window_width() + 640 + """ + return self._window_size()[0] + + def window_height(self): + """ Return the height of the turtle window. + + Example (for a TurtleScreen instance named screen): + >>> screen.window_height() + 480 + """ + return self._window_size()[1] + + def getcanvas(self): + """Return the Canvas of this TurtleScreen. + + No argument. + + Example (for a Screen instance named screen): + >>> cv = screen.getcanvas() + >>> cv + + """ + return self.cv + + def getshapes(self): + """Return a list of names of all currently available turtle shapes. + + No argument. + + Example (for a TurtleScreen instance named screen): + >>> screen.getshapes() + ['arrow', 'blank', 'circle', ... , 'turtle'] + """ + return sorted(self._shapes.keys()) + + def onclick(self, fun, btn=1, add=None): + """Bind fun to mouse-click event on canvas. + + Arguments: + fun -- a function with two arguments, the coordinates of the + clicked point on the canvas. + btn -- the number of the mouse-button, defaults to 1 + + Example (for a TurtleScreen instance named screen) + + >>> screen.onclick(goto) + >>> # Subsequently clicking into the TurtleScreen will + >>> # make the turtle move to the clicked point. + >>> screen.onclick(None) + """ + self._onscreenclick(fun, btn, add) + + def onkey(self, fun, key): + """Bind fun to key-release event of key. + + Arguments: + fun -- a function with no arguments + key -- a string: key (e.g. "a") or key-symbol (e.g. "space") + + In order to be able to register key-events, TurtleScreen + must have focus. (See method listen.) + + Example (for a TurtleScreen instance named screen): + + >>> def f(): + ... fd(50) + ... lt(60) + ... + >>> screen.onkey(f, "Up") + >>> screen.listen() + + Subsequently the turtle can be moved by repeatedly pressing + the up-arrow key, consequently drawing a hexagon + + """ + if fun is None: + if key in self._keys: + self._keys.remove(key) + elif key not in self._keys: + self._keys.append(key) + self._onkeyrelease(fun, key) + + def onkeypress(self, fun, key=None): + """Bind fun to key-press event of key if key is given, + or to any key-press-event if no key is given. + + Arguments: + fun -- a function with no arguments + key -- a string: key (e.g. "a") or key-symbol (e.g. "space") + + In order to be able to register key-events, TurtleScreen + must have focus. (See method listen.) + + Example (for a TurtleScreen instance named screen + and a Turtle instance named turtle): + + >>> def f(): + ... fd(50) + ... lt(60) + ... + >>> screen.onkeypress(f, "Up") + >>> screen.listen() + + Subsequently the turtle can be moved by repeatedly pressing + the up-arrow key, or by keeping pressed the up-arrow key. + consequently drawing a hexagon. + """ + if fun is None: + if key in self._keys: + self._keys.remove(key) + elif key is not None and key not in self._keys: + self._keys.append(key) + self._onkeypress(fun, key) + + def listen(self, xdummy=None, ydummy=None): + """Set focus on TurtleScreen (in order to collect key-events) + + No arguments. + Dummy arguments are provided in order + to be able to pass listen to the onclick method. + + Example (for a TurtleScreen instance named screen): + >>> screen.listen() + """ + self._listen() + + def ontimer(self, fun, t=0): + """Install a timer, which calls fun after t milliseconds. + + Arguments: + fun -- a function with no arguments. + t -- a number >= 0 + + Example (for a TurtleScreen instance named screen): + + >>> running = True + >>> def f(): + ... if running: + ... fd(50) + ... lt(60) + ... screen.ontimer(f, 250) + ... + >>> f() # makes the turtle marching around + >>> running = False + """ + self._ontimer(fun, t) + + def bgpic(self, picname=None): + """Set background image or return name of current backgroundimage. + + Optional argument: + picname -- a string, name of a gif-file or "nopic". + + If picname is a filename, set the corresponding image as background. + If picname is "nopic", delete backgroundimage, if present. + If picname is None, return the filename of the current backgroundimage. + + Example (for a TurtleScreen instance named screen): + >>> screen.bgpic() + 'nopic' + >>> screen.bgpic("landscape.gif") + >>> screen.bgpic() + 'landscape.gif' + """ + if picname is None: + return self._bgpicname + if picname not in self._bgpics: + self._bgpics[picname] = self._image(picname) + self._setbgpic(self._bgpic, self._bgpics[picname]) + self._bgpicname = picname + + def screensize(self, canvwidth=None, canvheight=None, bg=None): + """Resize the canvas the turtles are drawing on. + + Optional arguments: + canvwidth -- positive integer, new width of canvas in pixels + canvheight -- positive integer, new height of canvas in pixels + bg -- colorstring or color-tuple, new backgroundcolor + If no arguments are given, return current (canvaswidth, canvasheight) + + Do not alter the drawing window. To observe hidden parts of + the canvas use the scrollbars. (Can make visible those parts + of a drawing, which were outside the canvas before!) + + Example (for a Turtle instance named turtle): + >>> turtle.screensize(2000,1500) + >>> # e.g. to search for an erroneously escaped turtle ;-) + """ + return self._resize(canvwidth, canvheight, bg) + + onscreenclick = onclick + resetscreen = reset + clearscreen = clear + addshape = register_shape + onkeyrelease = onkey + +class TNavigator(object): + """Navigation part of the RawTurtle. + Implements methods for turtle movement. + """ + START_ORIENTATION = { + "standard": Vec2D(1.0, 0.0), + "world" : Vec2D(1.0, 0.0), + "logo" : Vec2D(0.0, 1.0) } + DEFAULT_MODE = "standard" + DEFAULT_ANGLEOFFSET = 0 + DEFAULT_ANGLEORIENT = 1 + + def __init__(self, mode=DEFAULT_MODE): + self._angleOffset = self.DEFAULT_ANGLEOFFSET + self._angleOrient = self.DEFAULT_ANGLEORIENT + self._mode = mode + self.undobuffer = None + self.degrees() + self._mode = None + self._setmode(mode) + TNavigator.reset(self) + + def reset(self): + """reset turtle to its initial values + + Will be overwritten by parent class + """ + self._position = Vec2D(0.0, 0.0) + self._orient = TNavigator.START_ORIENTATION[self._mode] + + def _setmode(self, mode=None): + """Set turtle-mode to 'standard', 'world' or 'logo'. + """ + if mode is None: + return self._mode + if mode not in ["standard", "logo", "world"]: + return + self._mode = mode + if mode in ["standard", "world"]: + self._angleOffset = 0 + self._angleOrient = 1 + else: # mode == "logo": + self._angleOffset = self._fullcircle/4. + self._angleOrient = -1 + + def _setDegreesPerAU(self, fullcircle): + """Helper function for degrees() and radians()""" + self._fullcircle = fullcircle + self._degreesPerAU = 360/fullcircle + if self._mode == "standard": + self._angleOffset = 0 + else: + self._angleOffset = fullcircle/4. + + def degrees(self, fullcircle=360.0): + """ Set angle measurement units to degrees. + + Optional argument: + fullcircle - a number + + Set angle measurement units, i. e. set number + of 'degrees' for a full circle. Default value is + 360 degrees. + + Example (for a Turtle instance named turtle): + >>> turtle.left(90) + >>> turtle.heading() + 90 + + Change angle measurement unit to grad (also known as gon, + grade, or gradian and equals 1/100-th of the right angle.) + >>> turtle.degrees(400.0) + >>> turtle.heading() + 100 + + """ + self._setDegreesPerAU(fullcircle) + + def radians(self): + """ Set the angle measurement units to radians. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 90 + >>> turtle.radians() + >>> turtle.heading() + 1.5707963267948966 + """ + self._setDegreesPerAU(math.tau) + + def _go(self, distance): + """move turtle forward by specified distance""" + ende = self._position + self._orient * distance + self._goto(ende) + + def _rotate(self, angle): + """Turn turtle counterclockwise by specified angle if angle > 0.""" + angle *= self._degreesPerAU + self._orient = self._orient.rotate(angle) + + def _goto(self, end): + """move turtle to position end.""" + self._position = end + + def forward(self, distance): + """Move the turtle forward by the specified distance. + + Aliases: forward | fd + + Argument: + distance -- a number (integer or float) + + Move the turtle forward by the specified distance, in the direction + the turtle is headed. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 0.00) + >>> turtle.forward(25) + >>> turtle.position() + (25.00,0.00) + >>> turtle.forward(-75) + >>> turtle.position() + (-50.00,0.00) + """ + self._go(distance) + + def back(self, distance): + """Move the turtle backward by distance. + + Aliases: back | backward | bk + + Argument: + distance -- a number + + Move the turtle backward by distance, opposite to the direction the + turtle is headed. Do not change the turtle's heading. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 0.00) + >>> turtle.backward(30) + >>> turtle.position() + (-30.00, 0.00) + """ + self._go(-distance) + + def right(self, angle): + """Turn turtle right by angle units. + + Aliases: right | rt + + Argument: + angle -- a number (integer or float) + + Turn turtle right by angle units. (Units are by default degrees, + but can be set via the degrees() and radians() functions.) + Angle orientation depends on mode. (See this.) + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 22.0 + >>> turtle.right(45) + >>> turtle.heading() + 337.0 + """ + self._rotate(-angle) + + def left(self, angle): + """Turn turtle left by angle units. + + Aliases: left | lt + + Argument: + angle -- a number (integer or float) + + Turn turtle left by angle units. (Units are by default degrees, + but can be set via the degrees() and radians() functions.) + Angle orientation depends on mode. (See this.) + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 22.0 + >>> turtle.left(45) + >>> turtle.heading() + 67.0 + """ + self._rotate(angle) + + def pos(self): + """Return the turtle's current location (x,y), as a Vec2D-vector. + + Aliases: pos | position + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (0.00, 240.00) + """ + return self._position + + def xcor(self): + """ Return the turtle's x coordinate. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> reset() + >>> turtle.left(60) + >>> turtle.forward(100) + >>> print turtle.xcor() + 50.0 + """ + return self._position[0] + + def ycor(self): + """ Return the turtle's y coordinate + --- + No arguments. + + Example (for a Turtle instance named turtle): + >>> reset() + >>> turtle.left(60) + >>> turtle.forward(100) + >>> print turtle.ycor() + 86.6025403784 + """ + return self._position[1] + + + def goto(self, x, y=None): + """Move turtle to an absolute position. + + Aliases: setpos | setposition | goto: + + Arguments: + x -- a number or a pair/vector of numbers + y -- a number None + + call: goto(x, y) # two coordinates + --or: goto((x, y)) # a pair (tuple) of coordinates + --or: goto(vec) # e.g. as returned by pos() + + Move turtle to an absolute position. If the pen is down, + a line will be drawn. The turtle's orientation does not change. + + Example (for a Turtle instance named turtle): + >>> tp = turtle.pos() + >>> tp + (0.00, 0.00) + >>> turtle.setpos(60,30) + >>> turtle.pos() + (60.00,30.00) + >>> turtle.setpos((20,80)) + >>> turtle.pos() + (20.00,80.00) + >>> turtle.setpos(tp) + >>> turtle.pos() + (0.00,0.00) + """ + if y is None: + self._goto(Vec2D(*x)) + else: + self._goto(Vec2D(x, y)) + + def home(self): + """Move turtle to the origin - coordinates (0,0). + + No arguments. + + Move turtle to the origin - coordinates (0,0) and set its + heading to its start-orientation (which depends on mode). + + Example (for a Turtle instance named turtle): + >>> turtle.home() + """ + self.goto(0, 0) + self.setheading(0) + + def setx(self, x): + """Set the turtle's first coordinate to x + + Argument: + x -- a number (integer or float) + + Set the turtle's first coordinate to x, leave second coordinate + unchanged. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 240.00) + >>> turtle.setx(10) + >>> turtle.position() + (10.00, 240.00) + """ + self._goto(Vec2D(x, self._position[1])) + + def sety(self, y): + """Set the turtle's second coordinate to y + + Argument: + y -- a number (integer or float) + + Set the turtle's first coordinate to x, second coordinate remains + unchanged. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 40.00) + >>> turtle.sety(-10) + >>> turtle.position() + (0.00, -10.00) + """ + self._goto(Vec2D(self._position[0], y)) + + def distance(self, x, y=None): + """Return the distance from the turtle to (x,y) in turtle step units. + + Arguments: + x -- a number or a pair/vector of numbers or a turtle instance + y -- a number None None + + call: distance(x, y) # two coordinates + --or: distance((x, y)) # a pair (tuple) of coordinates + --or: distance(vec) # e.g. as returned by pos() + --or: distance(mypen) # where mypen is another turtle + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (0.00, 0.00) + >>> turtle.distance(30,40) + 50.0 + >>> pen = Turtle() + >>> pen.forward(77) + >>> turtle.distance(pen) + 77.0 + """ + if y is not None: + pos = Vec2D(x, y) + if isinstance(x, Vec2D): + pos = x + elif isinstance(x, tuple): + pos = Vec2D(*x) + elif isinstance(x, TNavigator): + pos = x._position + return abs(pos - self._position) + + def towards(self, x, y=None): + """Return the angle of the line from the turtle's position to (x, y). + + Arguments: + x -- a number or a pair/vector of numbers or a turtle instance + y -- a number None None + + call: distance(x, y) # two coordinates + --or: distance((x, y)) # a pair (tuple) of coordinates + --or: distance(vec) # e.g. as returned by pos() + --or: distance(mypen) # where mypen is another turtle + + Return the angle, between the line from turtle-position to position + specified by x, y and the turtle's start orientation. (Depends on + modes - "standard" or "logo") + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (10.00, 10.00) + >>> turtle.towards(0,0) + 225.0 + """ + if y is not None: + pos = Vec2D(x, y) + if isinstance(x, Vec2D): + pos = x + elif isinstance(x, tuple): + pos = Vec2D(*x) + elif isinstance(x, TNavigator): + pos = x._position + x, y = pos - self._position + result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 + result /= self._degreesPerAU + return (self._angleOffset + self._angleOrient*result) % self._fullcircle + + def heading(self): + """ Return the turtle's current heading. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.left(67) + >>> turtle.heading() + 67.0 + """ + x, y = self._orient + result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 + result /= self._degreesPerAU + return (self._angleOffset + self._angleOrient*result) % self._fullcircle + + def setheading(self, to_angle): + """Set the orientation of the turtle to to_angle. + + Aliases: setheading | seth + + Argument: + to_angle -- a number (integer or float) + + Set the orientation of the turtle to to_angle. + Here are some common directions in degrees: + + standard - mode: logo-mode: + -------------------|-------------------- + 0 - east 0 - north + 90 - north 90 - east + 180 - west 180 - south + 270 - south 270 - west + + Example (for a Turtle instance named turtle): + >>> turtle.setheading(90) + >>> turtle.heading() + 90 + """ + angle = (to_angle - self.heading())*self._angleOrient + full = self._fullcircle + angle = (angle+full/2.)%full - full/2. + self._rotate(angle) + + def circle(self, radius, extent = None, steps = None): + """ Draw a circle with given radius. + + Arguments: + radius -- a number + extent (optional) -- a number + steps (optional) -- an integer + + Draw a circle with given radius. The center is radius units left + of the turtle; extent - an angle - determines which part of the + circle is drawn. If extent is not given, draw the entire circle. + If extent is not a full circle, one endpoint of the arc is the + current pen position. Draw the arc in counterclockwise direction + if radius is positive, otherwise in clockwise direction. Finally + the direction of the turtle is changed by the amount of extent. + + As the circle is approximated by an inscribed regular polygon, + steps determines the number of steps to use. If not given, + it will be calculated automatically. Maybe used to draw regular + polygons. + + call: circle(radius) # full circle + --or: circle(radius, extent) # arc + --or: circle(radius, extent, steps) + --or: circle(radius, steps=6) # 6-sided polygon + + Example (for a Turtle instance named turtle): + >>> turtle.circle(50) + >>> turtle.circle(120, 180) # semicircle + """ + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + speed = self.speed() + if extent is None: + extent = self._fullcircle + if steps is None: + frac = abs(extent)/self._fullcircle + steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac) + w = 1.0 * extent / steps + w2 = 0.5 * w + l = 2.0 * radius * math.sin(math.radians(w2)*self._degreesPerAU) + if radius < 0: + l, w, w2 = -l, -w, -w2 + tr = self._tracer() + dl = self._delay() + if speed == 0: + self._tracer(0, 0) + else: + self.speed(0) + self._rotate(w2) + for i in range(steps): + self.speed(speed) + self._go(l) + self.speed(0) + self._rotate(w) + self._rotate(-w2) + if speed == 0: + self._tracer(tr, dl) + self.speed(speed) + if self.undobuffer: + self.undobuffer.cumulate = False + +## three dummy methods to be implemented by child class: + + def speed(self, s=0): + """dummy method - to be overwritten by child class""" + def _tracer(self, a=None, b=None): + """dummy method - to be overwritten by child class""" + def _delay(self, n=None): + """dummy method - to be overwritten by child class""" + + fd = forward + bk = back + backward = back + rt = right + lt = left + position = pos + setpos = goto + setposition = goto + seth = setheading + + +class TPen(object): + """Drawing part of the RawTurtle. + Implements drawing properties. + """ + def __init__(self, resizemode=_CFG["resizemode"]): + self._resizemode = resizemode # or "user" or "noresize" + self.undobuffer = None + TPen._reset(self) + + def _reset(self, pencolor=_CFG["pencolor"], + fillcolor=_CFG["fillcolor"]): + self._pensize = 1 + self._shown = True + self._pencolor = pencolor + self._fillcolor = fillcolor + self._drawing = True + self._speed = 3 + self._stretchfactor = (1., 1.) + self._shearfactor = 0. + self._tilt = 0. + self._shapetrafo = (1., 0., 0., 1.) + self._outlinewidth = 1 + + def resizemode(self, rmode=None): + """Set resizemode to one of the values: "auto", "user", "noresize". + + (Optional) Argument: + rmode -- one of the strings "auto", "user", "noresize" + + Different resizemodes have the following effects: + - "auto" adapts the appearance of the turtle + corresponding to the value of pensize. + - "user" adapts the appearance of the turtle according to the + values of stretchfactor and outlinewidth (outline), + which are set by shapesize() + - "noresize" no adaption of the turtle's appearance takes place. + If no argument is given, return current resizemode. + resizemode("user") is called by a call of shapesize with arguments. + + + Examples (for a Turtle instance named turtle): + >>> turtle.resizemode("noresize") + >>> turtle.resizemode() + 'noresize' + """ + if rmode is None: + return self._resizemode + rmode = rmode.lower() + if rmode in ["auto", "user", "noresize"]: + self.pen(resizemode=rmode) + + def pensize(self, width=None): + """Set or return the line thickness. + + Aliases: pensize | width + + Argument: + width -- positive number + + Set the line thickness to width or return it. If resizemode is set + to "auto" and turtleshape is a polygon, that polygon is drawn with + the same line thickness. If no argument is given, current pensize + is returned. + + Example (for a Turtle instance named turtle): + >>> turtle.pensize() + 1 + >>> turtle.pensize(10) # from here on lines of width 10 are drawn + """ + if width is None: + return self._pensize + self.pen(pensize=width) + + + def penup(self): + """Pull the pen up -- no drawing when moving. + + Aliases: penup | pu | up + + No argument + + Example (for a Turtle instance named turtle): + >>> turtle.penup() + """ + if not self._drawing: + return + self.pen(pendown=False) + + def pendown(self): + """Pull the pen down -- drawing when moving. + + Aliases: pendown | pd | down + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.pendown() + """ + if self._drawing: + return + self.pen(pendown=True) + + def isdown(self): + """Return True if pen is down, False if it's up. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.penup() + >>> turtle.isdown() + False + >>> turtle.pendown() + >>> turtle.isdown() + True + """ + return self._drawing + + def speed(self, speed=None): + """ Return or set the turtle's speed. + + Optional argument: + speed -- an integer in the range 0..10 or a speedstring (see below) + + Set the turtle's speed to an integer value in the range 0 .. 10. + If no argument is given: return current speed. + + If input is a number greater than 10 or smaller than 0.5, + speed is set to 0. + Speedstrings are mapped to speedvalues in the following way: + 'fastest' : 0 + 'fast' : 10 + 'normal' : 6 + 'slow' : 3 + 'slowest' : 1 + speeds from 1 to 10 enforce increasingly faster animation of + line drawing and turtle turning. + + Attention: + speed = 0 : *no* animation takes place. forward/back makes turtle jump + and likewise left/right make the turtle turn instantly. + + Example (for a Turtle instance named turtle): + >>> turtle.speed(3) + """ + speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 } + if speed is None: + return self._speed + if speed in speeds: + speed = speeds[speed] + elif 0.5 < speed < 10.5: + speed = int(round(speed)) + else: + speed = 0 + self.pen(speed=speed) + + def color(self, *args): + """Return or set the pencolor and fillcolor. + + Arguments: + Several input formats are allowed. + They use 0, 1, 2, or 3 arguments as follows: + + color() + Return the current pencolor and the current fillcolor + as a pair of color specification strings as are returned + by pencolor and fillcolor. + color(colorstring), color((r,g,b)), color(r,g,b) + inputs as in pencolor, set both, fillcolor and pencolor, + to the given value. + color(colorstring1, colorstring2), + color((r1,g1,b1), (r2,g2,b2)) + equivalent to pencolor(colorstring1) and fillcolor(colorstring2) + and analogously, if the other input format is used. + + If turtleshape is a polygon, outline and interior of that polygon + is drawn with the newly set colors. + For more info see: pencolor, fillcolor + + Example (for a Turtle instance named turtle): + >>> turtle.color('red', 'green') + >>> turtle.color() + ('red', 'green') + >>> colormode(255) + >>> color((40, 80, 120), (160, 200, 240)) + >>> color() + ('#285078', '#a0c8f0') + """ + if args: + l = len(args) + if l == 1: + pcolor = fcolor = args[0] + elif l == 2: + pcolor, fcolor = args + elif l == 3: + pcolor = fcolor = args + pcolor = self._colorstr(pcolor) + fcolor = self._colorstr(fcolor) + self.pen(pencolor=pcolor, fillcolor=fcolor) + else: + return self._color(self._pencolor), self._color(self._fillcolor) + + def pencolor(self, *args): + """ Return or set the pencolor. + + Arguments: + Four input formats are allowed: + - pencolor() + Return the current pencolor as color specification string, + possibly in hex-number format (see example). + May be used as input to another color/pencolor/fillcolor call. + - pencolor(colorstring) + s is a Tk color specification string, such as "red" or "yellow" + - pencolor((r, g, b)) + *a tuple* of r, g, and b, which represent, an RGB color, + and each of r, g, and b are in the range 0..colormode, + where colormode is either 1.0 or 255 + - pencolor(r, g, b) + r, g, and b represent an RGB color, and each of r, g, and b + are in the range 0..colormode + + If turtleshape is a polygon, the outline of that polygon is drawn + with the newly set pencolor. + + Example (for a Turtle instance named turtle): + >>> turtle.pencolor('brown') + >>> tup = (0.2, 0.8, 0.55) + >>> turtle.pencolor(tup) + >>> turtle.pencolor() + '#33cc8c' + """ + if args: + color = self._colorstr(args) + if color == self._pencolor: + return + self.pen(pencolor=color) + else: + return self._color(self._pencolor) + + def fillcolor(self, *args): + """ Return or set the fillcolor. + + Arguments: + Four input formats are allowed: + - fillcolor() + Return the current fillcolor as color specification string, + possibly in hex-number format (see example). + May be used as input to another color/pencolor/fillcolor call. + - fillcolor(colorstring) + s is a Tk color specification string, such as "red" or "yellow" + - fillcolor((r, g, b)) + *a tuple* of r, g, and b, which represent, an RGB color, + and each of r, g, and b are in the range 0..colormode, + where colormode is either 1.0 or 255 + - fillcolor(r, g, b) + r, g, and b represent an RGB color, and each of r, g, and b + are in the range 0..colormode + + If turtleshape is a polygon, the interior of that polygon is drawn + with the newly set fillcolor. + + Example (for a Turtle instance named turtle): + >>> turtle.fillcolor('violet') + >>> col = turtle.pencolor() + >>> turtle.fillcolor(col) + >>> turtle.fillcolor(0, .5, 0) + """ + if args: + color = self._colorstr(args) + if color == self._fillcolor: + return + self.pen(fillcolor=color) + else: + return self._color(self._fillcolor) + + def showturtle(self): + """Makes the turtle visible. + + Aliases: showturtle | st + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + >>> turtle.showturtle() + """ + self.pen(shown=True) + + def hideturtle(self): + """Makes the turtle invisible. + + Aliases: hideturtle | ht + + No argument. + + It's a good idea to do this while you're in the + middle of a complicated drawing, because hiding + the turtle speeds up the drawing observably. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + """ + self.pen(shown=False) + + def isvisible(self): + """Return True if the Turtle is shown, False if it's hidden. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + >>> print turtle.isvisible(): + False + """ + return self._shown + + def pen(self, pen=None, **pendict): + """Return or set the pen's attributes. + + Arguments: + pen -- a dictionary with some or all of the below listed keys. + **pendict -- one or more keyword-arguments with the below + listed keys as keywords. + + Return or set the pen's attributes in a 'pen-dictionary' + with the following key/value pairs: + "shown" : True/False + "pendown" : True/False + "pencolor" : color-string or color-tuple + "fillcolor" : color-string or color-tuple + "pensize" : positive number + "speed" : number in range 0..10 + "resizemode" : "auto" or "user" or "noresize" + "stretchfactor": (positive number, positive number) + "shearfactor": number + "outline" : positive number + "tilt" : number + + This dictionary can be used as argument for a subsequent + pen()-call to restore the former pen-state. Moreover one + or more of these attributes can be provided as keyword-arguments. + This can be used to set several pen attributes in one statement. + + + Examples (for a Turtle instance named turtle): + >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) + >>> turtle.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + >>> penstate=turtle.pen() + >>> turtle.color("yellow","") + >>> turtle.penup() + >>> turtle.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + >>> p.pen(penstate, fillcolor="green") + >>> p.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + """ + _pd = {"shown" : self._shown, + "pendown" : self._drawing, + "pencolor" : self._pencolor, + "fillcolor" : self._fillcolor, + "pensize" : self._pensize, + "speed" : self._speed, + "resizemode" : self._resizemode, + "stretchfactor" : self._stretchfactor, + "shearfactor" : self._shearfactor, + "outline" : self._outlinewidth, + "tilt" : self._tilt + } + + if not (pen or pendict): + return _pd + + if isinstance(pen, dict): + p = pen + else: + p = {} + p.update(pendict) + + _p_buf = {} + for key in p: + _p_buf[key] = _pd[key] + + if self.undobuffer: + self.undobuffer.push(("pen", _p_buf)) + + newLine = False + if "pendown" in p: + if self._drawing != p["pendown"]: + newLine = True + if "pencolor" in p: + if isinstance(p["pencolor"], tuple): + p["pencolor"] = self._colorstr((p["pencolor"],)) + if self._pencolor != p["pencolor"]: + newLine = True + if "pensize" in p: + if self._pensize != p["pensize"]: + newLine = True + if newLine: + self._newLine() + if "pendown" in p: + self._drawing = p["pendown"] + if "pencolor" in p: + self._pencolor = p["pencolor"] + if "pensize" in p: + self._pensize = p["pensize"] + if "fillcolor" in p: + if isinstance(p["fillcolor"], tuple): + p["fillcolor"] = self._colorstr((p["fillcolor"],)) + self._fillcolor = p["fillcolor"] + if "speed" in p: + self._speed = p["speed"] + if "resizemode" in p: + self._resizemode = p["resizemode"] + if "stretchfactor" in p: + sf = p["stretchfactor"] + if isinstance(sf, (int, float)): + sf = (sf, sf) + self._stretchfactor = sf + if "shearfactor" in p: + self._shearfactor = p["shearfactor"] + if "outline" in p: + self._outlinewidth = p["outline"] + if "shown" in p: + self._shown = p["shown"] + if "tilt" in p: + self._tilt = p["tilt"] + if "stretchfactor" in p or "tilt" in p or "shearfactor" in p: + scx, scy = self._stretchfactor + shf = self._shearfactor + sa, ca = math.sin(self._tilt), math.cos(self._tilt) + self._shapetrafo = ( scx*ca, scy*(shf*ca + sa), + -scx*sa, scy*(ca - shf*sa)) + self._update() + +## three dummy methods to be implemented by child class: + + def _newLine(self, usePos = True): + """dummy method - to be overwritten by child class""" + def _update(self, count=True, forced=False): + """dummy method - to be overwritten by child class""" + def _color(self, args): + """dummy method - to be overwritten by child class""" + def _colorstr(self, args): + """dummy method - to be overwritten by child class""" + + width = pensize + up = penup + pu = penup + pd = pendown + down = pendown + st = showturtle + ht = hideturtle + + +class _TurtleImage(object): + """Helper class: Datatype to store Turtle attributes + """ + + def __init__(self, screen, shapeIndex): + self.screen = screen + self._type = None + self._setshape(shapeIndex) + + def _setshape(self, shapeIndex): + screen = self.screen + self.shapeIndex = shapeIndex + if self._type == "polygon" == screen._shapes[shapeIndex]._type: + return + if self._type == "image" == screen._shapes[shapeIndex]._type: + return + if self._type in ["image", "polygon"]: + screen._delete(self._item) + elif self._type == "compound": + for item in self._item: + screen._delete(item) + self._type = screen._shapes[shapeIndex]._type + if self._type == "polygon": + self._item = screen._createpoly() + elif self._type == "image": + self._item = screen._createimage(screen._shapes["blank"]._data) + elif self._type == "compound": + self._item = [screen._createpoly() for item in + screen._shapes[shapeIndex]._data] + + +class RawTurtle(TPen, TNavigator): + """Animation part of the RawTurtle. + Puts RawTurtle upon a TurtleScreen and provides tools for + its animation. + """ + screens = [] + + def __init__(self, canvas=None, + shape=_CFG["shape"], + undobuffersize=_CFG["undobuffersize"], + visible=_CFG["visible"]): + if isinstance(canvas, _Screen): + self.screen = canvas + elif isinstance(canvas, TurtleScreen): + if canvas not in RawTurtle.screens: + RawTurtle.screens.append(canvas) + self.screen = canvas + elif isinstance(canvas, (ScrolledCanvas, Canvas)): + for screen in RawTurtle.screens: + if screen.cv == canvas: + self.screen = screen + break + else: + self.screen = TurtleScreen(canvas) + RawTurtle.screens.append(self.screen) + else: + raise TurtleGraphicsError("bad canvas argument %s" % canvas) + + screen = self.screen + TNavigator.__init__(self, screen.mode()) + TPen.__init__(self) + screen._turtles.append(self) + self.drawingLineItem = screen._createline() + self.turtle = _TurtleImage(screen, shape) + self._poly = None + self._creatingPoly = False + self._fillitem = self._fillpath = None + self._shown = visible + self._hidden_from_screen = False + self.currentLineItem = screen._createline() + self.currentLine = [self._position] + self.items = [self.currentLineItem] + self.stampItems = [] + self._undobuffersize = undobuffersize + self.undobuffer = Tbuffer(undobuffersize) + self._update() + + def reset(self): + """Delete the turtle's drawings and restore its default values. + + No argument. + + Delete the turtle's drawings from the screen, re-center the turtle + and set variables to the default values. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00,-22.00) + >>> turtle.heading() + 100.0 + >>> turtle.reset() + >>> turtle.position() + (0.00,0.00) + >>> turtle.heading() + 0.0 + """ + TNavigator.reset(self) + TPen._reset(self) + self._clear() + self._drawturtle() + self._update() + + def setundobuffer(self, size): + """Set or disable undobuffer. + + Argument: + size -- an integer or None + + If size is an integer an empty undobuffer of given size is installed. + Size gives the maximum number of turtle-actions that can be undone + by the undo() function. + If size is None, no undobuffer is present. + + Example (for a Turtle instance named turtle): + >>> turtle.setundobuffer(42) + """ + if size is None or size <= 0: + self.undobuffer = None + else: + self.undobuffer = Tbuffer(size) + + def undobufferentries(self): + """Return count of entries in the undobuffer. + + No argument. + + Example (for a Turtle instance named turtle): + >>> while undobufferentries(): + ... undo() + """ + if self.undobuffer is None: + return 0 + return self.undobuffer.nr_of_items() + + def _clear(self): + """Delete all of pen's drawings""" + self._fillitem = self._fillpath = None + for item in self.items: + self.screen._delete(item) + self.currentLineItem = self.screen._createline() + self.currentLine = [] + if self._drawing: + self.currentLine.append(self._position) + self.items = [self.currentLineItem] + self.clearstamps() + self.setundobuffer(self._undobuffersize) + + + def clear(self): + """Delete the turtle's drawings from the screen. Do not move turtle. + + No arguments. + + Delete the turtle's drawings from the screen. Do not move turtle. + State and position of the turtle as well as drawings of other + turtles are not affected. + + Examples (for a Turtle instance named turtle): + >>> turtle.clear() + """ + self._clear() + self._update() + + def _update_data(self): + self.screen._incrementudc() + if self.screen._updatecounter != 0: + return + if len(self.currentLine)>1: + self.screen._drawline(self.currentLineItem, self.currentLine, + self._pencolor, self._pensize) + + def _update(self): + """Perform a Turtle-data update. + """ + screen = self.screen + if screen._tracing == 0: + return + elif screen._tracing == 1: + self._update_data() + self._drawturtle() + screen._update() # TurtleScreenBase + screen._delay(screen._delayvalue) # TurtleScreenBase + else: + self._update_data() + if screen._updatecounter == 0: + for t in screen.turtles(): + t._drawturtle() + screen._update() + + def _tracer(self, flag=None, delay=None): + """Turns turtle animation on/off and set delay for update drawings. + + Optional arguments: + n -- nonnegative integer + delay -- nonnegative integer + + If n is given, only each n-th regular screen update is really performed. + (Can be used to accelerate the drawing of complex graphics.) + Second arguments sets delay value (see RawTurtle.delay()) + + Example (for a Turtle instance named turtle): + >>> turtle.tracer(8, 25) + >>> dist = 2 + >>> for i in range(200): + ... turtle.fd(dist) + ... turtle.rt(90) + ... dist += 2 + """ + return self.screen.tracer(flag, delay) + + def _color(self, args): + return self.screen._color(args) + + def _colorstr(self, args): + return self.screen._colorstr(args) + + def _cc(self, args): + """Convert colortriples to hexstrings. + """ + if isinstance(args, str): + return args + try: + r, g, b = args + except (TypeError, ValueError): + raise TurtleGraphicsError("bad color arguments: %s" % str(args)) + if self.screen._colormode == 1.0: + r, g, b = [round(255.0*x) for x in (r, g, b)] + if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): + raise TurtleGraphicsError("bad color sequence: %s" % str(args)) + return "#%02x%02x%02x" % (r, g, b) + + def clone(self): + """Create and return a clone of the turtle. + + No argument. + + Create and return a clone of the turtle with same position, heading + and turtle properties. + + Example (for a Turtle instance named mick): + mick = Turtle() + joe = mick.clone() + """ + screen = self.screen + self._newLine(self._drawing) + + turtle = self.turtle + self.screen = None + self.turtle = None # too make self deepcopy-able + + q = deepcopy(self) + + self.screen = screen + self.turtle = turtle + + q.screen = screen + q.turtle = _TurtleImage(screen, self.turtle.shapeIndex) + + screen._turtles.append(q) + ttype = screen._shapes[self.turtle.shapeIndex]._type + if ttype == "polygon": + q.turtle._item = screen._createpoly() + elif ttype == "image": + q.turtle._item = screen._createimage(screen._shapes["blank"]._data) + elif ttype == "compound": + q.turtle._item = [screen._createpoly() for item in + screen._shapes[self.turtle.shapeIndex]._data] + q.currentLineItem = screen._createline() + q._update() + return q + + def shape(self, name=None): + """Set turtle shape to shape with given name / return current shapename. + + Optional argument: + name -- a string, which is a valid shapename + + Set turtle shape to shape with given name or, if name is not given, + return name of current shape. + Shape with name must exist in the TurtleScreen's shape dictionary. + Initially there are the following polygon shapes: + 'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'. + To learn about how to deal with shapes see Screen-method register_shape. + + Example (for a Turtle instance named turtle): + >>> turtle.shape() + 'arrow' + >>> turtle.shape("turtle") + >>> turtle.shape() + 'turtle' + """ + if name is None: + return self.turtle.shapeIndex + if not name in self.screen.getshapes(): + raise TurtleGraphicsError("There is no shape named %s" % name) + self.turtle._setshape(name) + self._update() + + def shapesize(self, stretch_wid=None, stretch_len=None, outline=None): + """Set/return turtle's stretchfactors/outline. Set resizemode to "user". + + Optional arguments: + stretch_wid : positive number + stretch_len : positive number + outline : positive number + + Return or set the pen's attributes x/y-stretchfactors and/or outline. + Set resizemode to "user". + If and only if resizemode is set to "user", the turtle will be displayed + stretched according to its stretchfactors: + stretch_wid is stretchfactor perpendicular to orientation + stretch_len is stretchfactor in direction of turtles orientation. + outline determines the width of the shapes's outline. + + Examples (for a Turtle instance named turtle): + >>> turtle.resizemode("user") + >>> turtle.shapesize(5, 5, 12) + >>> turtle.shapesize(outline=8) + """ + if stretch_wid is stretch_len is outline is None: + stretch_wid, stretch_len = self._stretchfactor + return stretch_wid, stretch_len, self._outlinewidth + if stretch_wid == 0 or stretch_len == 0: + raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero") + if stretch_wid is not None: + if stretch_len is None: + stretchfactor = stretch_wid, stretch_wid + else: + stretchfactor = stretch_wid, stretch_len + elif stretch_len is not None: + stretchfactor = self._stretchfactor[0], stretch_len + else: + stretchfactor = self._stretchfactor + if outline is None: + outline = self._outlinewidth + self.pen(resizemode="user", + stretchfactor=stretchfactor, outline=outline) + + def shearfactor(self, shear=None): + """Set or return the current shearfactor. + + Optional argument: shear -- number, tangent of the shear angle + + Shear the turtleshape according to the given shearfactor shear, + which is the tangent of the shear angle. DO NOT change the + turtle's heading (direction of movement). + If shear is not given: return the current shearfactor, i. e. the + tangent of the shear angle, by which lines parallel to the + heading of the turtle are sheared. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.shearfactor(0.5) + >>> turtle.shearfactor() + >>> 0.5 + """ + if shear is None: + return self._shearfactor + self.pen(resizemode="user", shearfactor=shear) + + def settiltangle(self, angle): + """Rotate the turtleshape to point in the specified direction + + Argument: angle -- number + + Rotate the turtleshape to point in the direction specified by angle, + regardless of its current tilt-angle. DO NOT change the turtle's + heading (direction of movement). + + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.settiltangle(45) + >>> stamp() + >>> turtle.fd(50) + >>> turtle.settiltangle(-45) + >>> stamp() + >>> turtle.fd(50) + """ + tilt = -angle * self._degreesPerAU * self._angleOrient + tilt = math.radians(tilt) % math.tau + self.pen(resizemode="user", tilt=tilt) + + def tiltangle(self, angle=None): + """Set or return the current tilt-angle. + + Optional argument: angle -- number + + Rotate the turtleshape to point in the direction specified by angle, + regardless of its current tilt-angle. DO NOT change the turtle's + heading (direction of movement). + If angle is not given: return the current tilt-angle, i. e. the angle + between the orientation of the turtleshape and the heading of the + turtle (its direction of movement). + + (Incorrectly marked as deprecated since Python 3.1, it is really + settiltangle that is deprecated.) + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.tilt(45) + >>> turtle.tiltangle() + """ + if angle is None: + tilt = -math.degrees(self._tilt) * self._angleOrient + return (tilt / self._degreesPerAU) % self._fullcircle + else: + self.settiltangle(angle) + + def tilt(self, angle): + """Rotate the turtleshape by angle. + + Argument: + angle - a number + + Rotate the turtleshape by angle from its current tilt-angle, + but do NOT change the turtle's heading (direction of movement). + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.tilt(30) + >>> turtle.fd(50) + >>> turtle.tilt(30) + >>> turtle.fd(50) + """ + self.settiltangle(angle + self.tiltangle()) + + def shapetransform(self, t11=None, t12=None, t21=None, t22=None): + """Set or return the current transformation matrix of the turtle shape. + + Optional arguments: t11, t12, t21, t22 -- numbers. + + If none of the matrix elements are given, return the transformation + matrix. + Otherwise set the given elements and transform the turtleshape + according to the matrix consisting of first row t11, t12 and + second row t21, 22. + Modify stretchfactor, shearfactor and tiltangle according to the + given matrix. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("square") + >>> turtle.shapesize(4,2) + >>> turtle.shearfactor(-0.5) + >>> turtle.shapetransform() + (4.0, -1.0, -0.0, 2.0) + """ + if t11 is t12 is t21 is t22 is None: + return self._shapetrafo + m11, m12, m21, m22 = self._shapetrafo + if t11 is not None: m11 = t11 + if t12 is not None: m12 = t12 + if t21 is not None: m21 = t21 + if t22 is not None: m22 = t22 + if t11 * t22 - t12 * t21 == 0: + raise TurtleGraphicsError("Bad shape transform matrix: must not be singular") + self._shapetrafo = (m11, m12, m21, m22) + alfa = math.atan2(-m21, m11) % math.tau + sa, ca = math.sin(alfa), math.cos(alfa) + a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22, + sa*m11 + ca*m21, sa*m12 + ca*m22) + self._stretchfactor = a11, a22 + self._shearfactor = a12/a22 + self._tilt = alfa + self.pen(resizemode="user") + + + def _polytrafo(self, poly): + """Computes transformed polygon shapes from a shape + according to current position and heading. + """ + screen = self.screen + p0, p1 = self._position + e0, e1 = self._orient + e = Vec2D(e0, e1 * screen.yscale / screen.xscale) + e0, e1 = (1.0 / abs(e)) * e + return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale) + for (x, y) in poly] + + def get_shapepoly(self): + """Return the current shape polygon as tuple of coordinate pairs. + + No argument. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("square") + >>> turtle.shapetransform(4, -1, 0, 2) + >>> turtle.get_shapepoly() + ((50, -20), (30, 20), (-50, 20), (-30, -20)) + + """ + shape = self.screen._shapes[self.turtle.shapeIndex] + if shape._type == "polygon": + return self._getshapepoly(shape._data, shape._type == "compound") + # else return None + + def _getshapepoly(self, polygon, compound=False): + """Calculate transformed shape polygon according to resizemode + and shapetransform. + """ + if self._resizemode == "user" or compound: + t11, t12, t21, t22 = self._shapetrafo + elif self._resizemode == "auto": + l = max(1, self._pensize/5.0) + t11, t12, t21, t22 = l, 0, 0, l + elif self._resizemode == "noresize": + return polygon + return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon) + + def _drawturtle(self): + """Manages the correct rendering of the turtle with respect to + its shape, resizemode, stretch and tilt etc.""" + screen = self.screen + shape = screen._shapes[self.turtle.shapeIndex] + ttype = shape._type + titem = self.turtle._item + if self._shown and screen._updatecounter == 0 and screen._tracing > 0: + self._hidden_from_screen = False + tshape = shape._data + if ttype == "polygon": + if self._resizemode == "noresize": w = 1 + elif self._resizemode == "auto": w = self._pensize + else: w =self._outlinewidth + shape = self._polytrafo(self._getshapepoly(tshape)) + fc, oc = self._fillcolor, self._pencolor + screen._drawpoly(titem, shape, fill=fc, outline=oc, + width=w, top=True) + elif ttype == "image": + screen._drawimage(titem, self._position, tshape) + elif ttype == "compound": + for item, (poly, fc, oc) in zip(titem, tshape): + poly = self._polytrafo(self._getshapepoly(poly, True)) + screen._drawpoly(item, poly, fill=self._cc(fc), + outline=self._cc(oc), width=self._outlinewidth, top=True) + else: + if self._hidden_from_screen: + return + if ttype == "polygon": + screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "") + elif ttype == "image": + screen._drawimage(titem, self._position, + screen._shapes["blank"]._data) + elif ttype == "compound": + for item in titem: + screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "") + self._hidden_from_screen = True + +############################## stamp stuff ############################### + + def stamp(self): + """Stamp a copy of the turtleshape onto the canvas and return its id. + + No argument. + + Stamp a copy of the turtle shape onto the canvas at the current + turtle position. Return a stamp_id for that stamp, which can be + used to delete it by calling clearstamp(stamp_id). + + Example (for a Turtle instance named turtle): + >>> turtle.color("blue") + >>> turtle.stamp() + 13 + >>> turtle.fd(50) + """ + screen = self.screen + shape = screen._shapes[self.turtle.shapeIndex] + ttype = shape._type + tshape = shape._data + if ttype == "polygon": + stitem = screen._createpoly() + if self._resizemode == "noresize": w = 1 + elif self._resizemode == "auto": w = self._pensize + else: w =self._outlinewidth + shape = self._polytrafo(self._getshapepoly(tshape)) + fc, oc = self._fillcolor, self._pencolor + screen._drawpoly(stitem, shape, fill=fc, outline=oc, + width=w, top=True) + elif ttype == "image": + stitem = screen._createimage("") + screen._drawimage(stitem, self._position, tshape) + elif ttype == "compound": + stitem = [] + for element in tshape: + item = screen._createpoly() + stitem.append(item) + stitem = tuple(stitem) + for item, (poly, fc, oc) in zip(stitem, tshape): + poly = self._polytrafo(self._getshapepoly(poly, True)) + screen._drawpoly(item, poly, fill=self._cc(fc), + outline=self._cc(oc), width=self._outlinewidth, top=True) + self.stampItems.append(stitem) + self.undobuffer.push(("stamp", stitem)) + return stitem + + def _clearstamp(self, stampid): + """does the work for clearstamp() and clearstamps() + """ + if stampid in self.stampItems: + if isinstance(stampid, tuple): + for subitem in stampid: + self.screen._delete(subitem) + else: + self.screen._delete(stampid) + self.stampItems.remove(stampid) + # Delete stampitem from undobuffer if necessary + # if clearstamp is called directly. + item = ("stamp", stampid) + buf = self.undobuffer + if item not in buf.buffer: + return + index = buf.buffer.index(item) + buf.buffer.remove(item) + if index <= buf.ptr: + buf.ptr = (buf.ptr - 1) % buf.bufsize + buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None]) + + def clearstamp(self, stampid): + """Delete stamp with given stampid + + Argument: + stampid - an integer, must be return value of previous stamp() call. + + Example (for a Turtle instance named turtle): + >>> turtle.color("blue") + >>> astamp = turtle.stamp() + >>> turtle.fd(50) + >>> turtle.clearstamp(astamp) + """ + self._clearstamp(stampid) + self._update() + + def clearstamps(self, n=None): + """Delete all or first/last n of turtle's stamps. + + Optional argument: + n -- an integer + + If n is None, delete all of pen's stamps, + else if n > 0 delete first n stamps + else if n < 0 delete last n stamps. + + Example (for a Turtle instance named turtle): + >>> for i in range(8): + ... turtle.stamp(); turtle.fd(30) + ... + >>> turtle.clearstamps(2) + >>> turtle.clearstamps(-2) + >>> turtle.clearstamps() + """ + if n is None: + toDelete = self.stampItems[:] + elif n >= 0: + toDelete = self.stampItems[:n] + else: + toDelete = self.stampItems[n:] + for item in toDelete: + self._clearstamp(item) + self._update() + + def _goto(self, end): + """Move the pen to the point end, thereby drawing a line + if pen is down. All other methods for turtle movement depend + on this one. + """ + ## Version with undo-stuff + go_modes = ( self._drawing, + self._pencolor, + self._pensize, + isinstance(self._fillpath, list)) + screen = self.screen + undo_entry = ("go", self._position, end, go_modes, + (self.currentLineItem, + self.currentLine[:], + screen._pointlist(self.currentLineItem), + self.items[:]) + ) + if self.undobuffer: + self.undobuffer.push(undo_entry) + start = self._position + if self._speed and screen._tracing == 1: + diff = (end-start) + diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 + nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) + delta = diff * (1.0/nhops) + for n in range(1, nhops): + if n == 1: + top = True + else: + top = False + self._position = start + delta * n + if self._drawing: + screen._drawline(self.drawingLineItem, + (start, self._position), + self._pencolor, self._pensize, top) + self._update() + if self._drawing: + screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), + fill="", width=self._pensize) + # Turtle now at end, + if self._drawing: # now update currentLine + self.currentLine.append(end) + if isinstance(self._fillpath, list): + self._fillpath.append(end) + ###### vererbung!!!!!!!!!!!!!!!!!!!!!! + self._position = end + if self._creatingPoly: + self._poly.append(end) + if len(self.currentLine) > 42: # 42! answer to the ultimate question + # of life, the universe and everything + self._newLine() + self._update() #count=True) + + def _undogoto(self, entry): + """Reverse a _goto. Used for undo() + """ + old, new, go_modes, coodata = entry + drawing, pc, ps, filling = go_modes + cLI, cL, pl, items = coodata + screen = self.screen + if abs(self._position - new) > 0.5: + print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!") + # restore former situation + self.currentLineItem = cLI + self.currentLine = cL + + if pl == [(0, 0), (0, 0)]: + usepc = "" + else: + usepc = pc + screen._drawline(cLI, pl, fill=usepc, width=ps) + + todelete = [i for i in self.items if (i not in items) and + (screen._type(i) == "line")] + for i in todelete: + screen._delete(i) + self.items.remove(i) + + start = old + if self._speed and screen._tracing == 1: + diff = old - new + diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 + nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) + delta = diff * (1.0/nhops) + for n in range(1, nhops): + if n == 1: + top = True + else: + top = False + self._position = new + delta * n + if drawing: + screen._drawline(self.drawingLineItem, + (start, self._position), + pc, ps, top) + self._update() + if drawing: + screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), + fill="", width=ps) + # Turtle now at position old, + self._position = old + ## if undo is done during creating a polygon, the last vertex + ## will be deleted. if the polygon is entirely deleted, + ## creatingPoly will be set to False. + ## Polygons created before the last one will not be affected by undo() + if self._creatingPoly: + if len(self._poly) > 0: + self._poly.pop() + if self._poly == []: + self._creatingPoly = False + self._poly = None + if filling: + if self._fillpath == []: + self._fillpath = None + print("Unwahrscheinlich in _undogoto!") + elif self._fillpath is not None: + self._fillpath.pop() + self._update() #count=True) + + def _rotate(self, angle): + """Turns pen clockwise by angle. + """ + if self.undobuffer: + self.undobuffer.push(("rot", angle, self._degreesPerAU)) + angle *= self._degreesPerAU + neworient = self._orient.rotate(angle) + tracing = self.screen._tracing + if tracing == 1 and self._speed > 0: + anglevel = 3.0 * self._speed + steps = 1 + int(abs(angle)/anglevel) + delta = 1.0*angle/steps + for _ in range(steps): + self._orient = self._orient.rotate(delta) + self._update() + self._orient = neworient + self._update() + + def _newLine(self, usePos=True): + """Closes current line item and starts a new one. + Remark: if current line became too long, animation + performance (via _drawline) slowed down considerably. + """ + if len(self.currentLine) > 1: + self.screen._drawline(self.currentLineItem, self.currentLine, + self._pencolor, self._pensize) + self.currentLineItem = self.screen._createline() + self.items.append(self.currentLineItem) + else: + self.screen._drawline(self.currentLineItem, top=True) + self.currentLine = [] + if usePos: + self.currentLine = [self._position] + + def filling(self): + """Return fillstate (True if filling, False else). + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.begin_fill() + >>> if turtle.filling(): + ... turtle.pensize(5) + ... else: + ... turtle.pensize(3) + """ + return isinstance(self._fillpath, list) + + def begin_fill(self): + """Called just before drawing a shape to be filled. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.color("black", "red") + >>> turtle.begin_fill() + >>> turtle.circle(60) + >>> turtle.end_fill() + """ + if not self.filling(): + self._fillitem = self.screen._createpoly() + self.items.append(self._fillitem) + self._fillpath = [self._position] + self._newLine() + if self.undobuffer: + self.undobuffer.push(("beginfill", self._fillitem)) + self._update() + + + def end_fill(self): + """Fill the shape drawn after the call begin_fill(). + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.color("black", "red") + >>> turtle.begin_fill() + >>> turtle.circle(60) + >>> turtle.end_fill() + """ + if self.filling(): + if len(self._fillpath) > 2: + self.screen._drawpoly(self._fillitem, self._fillpath, + fill=self._fillcolor) + if self.undobuffer: + self.undobuffer.push(("dofill", self._fillitem)) + self._fillitem = self._fillpath = None + self._update() + + def dot(self, size=None, *color): + """Draw a dot with diameter size, using color. + + Optional arguments: + size -- an integer >= 1 (if given) + color -- a colorstring or a numeric color tuple + + Draw a circular dot with diameter size, using color. + If size is not given, the maximum of pensize+4 and 2*pensize is used. + + Example (for a Turtle instance named turtle): + >>> turtle.dot() + >>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50) + """ + if not color: + if isinstance(size, (str, tuple)): + color = self._colorstr(size) + size = self._pensize + max(self._pensize, 4) + else: + color = self._pencolor + if not size: + size = self._pensize + max(self._pensize, 4) + else: + if size is None: + size = self._pensize + max(self._pensize, 4) + color = self._colorstr(color) + if hasattr(self.screen, "_dot"): + item = self.screen._dot(self._position, size, color) + self.items.append(item) + if self.undobuffer: + self.undobuffer.push(("dot", item)) + else: + pen = self.pen() + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + try: + if self.resizemode() == 'auto': + self.ht() + self.pendown() + self.pensize(size) + self.pencolor(color) + self.forward(0) + finally: + self.pen(pen) + if self.undobuffer: + self.undobuffer.cumulate = False + + def _write(self, txt, align, font): + """Performs the writing for write() + """ + item, end = self.screen._write(self._position, txt, align, font, + self._pencolor) + self._update() + self.items.append(item) + if self.undobuffer: + self.undobuffer.push(("wri", item)) + return end + + def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")): + """Write text at the current turtle position. + + Arguments: + arg -- info, which is to be written to the TurtleScreen + move (optional) -- True/False + align (optional) -- one of the strings "left", "center" or right" + font (optional) -- a triple (fontname, fontsize, fonttype) + + Write text - the string representation of arg - at the current + turtle position according to align ("left", "center" or right") + and with the given font. + If move is True, the pen is moved to the bottom-right corner + of the text. By default, move is False. + + Example (for a Turtle instance named turtle): + >>> turtle.write('Home = ', True, align="center") + >>> turtle.write((0,0), True) + """ + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + end = self._write(str(arg), align.lower(), font) + if move: + x, y = self.pos() + self.setpos(end, y) + if self.undobuffer: + self.undobuffer.cumulate = False + + def begin_poly(self): + """Start recording the vertices of a polygon. + + No argument. + + Start recording the vertices of a polygon. Current turtle position + is first point of polygon. + + Example (for a Turtle instance named turtle): + >>> turtle.begin_poly() + """ + self._poly = [self._position] + self._creatingPoly = True + + def end_poly(self): + """Stop recording the vertices of a polygon. + + No argument. + + Stop recording the vertices of a polygon. Current turtle position is + last point of polygon. This will be connected with the first point. + + Example (for a Turtle instance named turtle): + >>> turtle.end_poly() + """ + self._creatingPoly = False + + def get_poly(self): + """Return the lastly recorded polygon. + + No argument. + + Example (for a Turtle instance named turtle): + >>> p = turtle.get_poly() + >>> turtle.register_shape("myFavouriteShape", p) + """ + ## check if there is any poly? + if self._poly is not None: + return tuple(self._poly) + + def getscreen(self): + """Return the TurtleScreen object, the turtle is drawing on. + + No argument. + + Return the TurtleScreen object, the turtle is drawing on. + So TurtleScreen-methods can be called for that object. + + Example (for a Turtle instance named turtle): + >>> ts = turtle.getscreen() + >>> ts + + >>> ts.bgcolor("pink") + """ + return self.screen + + def getturtle(self): + """Return the Turtleobject itself. + + No argument. + + Only reasonable use: as a function to return the 'anonymous turtle': + + Example: + >>> pet = getturtle() + >>> pet.fd(50) + >>> pet + + >>> turtles() + [] + """ + return self + + getpen = getturtle + + + ################################################################ + ### screen oriented methods recurring to methods of TurtleScreen + ################################################################ + + def _delay(self, delay=None): + """Set delay value which determines speed of turtle animation. + """ + return self.screen.delay(delay) + + def onclick(self, fun, btn=1, add=None): + """Bind fun to mouse-click event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + add -- True or False. If True, new binding will be added, otherwise + it will replace a former binding. + + Example for the anonymous turtle, i. e. the procedural way: + + >>> def turn(x, y): + ... left(360) + ... + >>> onclick(turn) # Now clicking into the turtle will turn it. + >>> onclick(None) # event-binding will be removed + """ + self.screen._onclick(self.turtle._item, fun, btn, add) + self._update() + + def onrelease(self, fun, btn=1, add=None): + """Bind fun to mouse-button-release event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + + Example (for a MyTurtle instance named joe): + >>> class MyTurtle(Turtle): + ... def glow(self,x,y): + ... self.fillcolor("red") + ... def unglow(self,x,y): + ... self.fillcolor("") + ... + >>> joe = MyTurtle() + >>> joe.onclick(joe.glow) + >>> joe.onrelease(joe.unglow) + + Clicking on joe turns fillcolor red, unclicking turns it to + transparent. + """ + self.screen._onrelease(self.turtle._item, fun, btn, add) + self._update() + + def ondrag(self, fun, btn=1, add=None): + """Bind fun to mouse-move event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + + Every sequence of mouse-move-events on a turtle is preceded by a + mouse-click event on that turtle. + + Example (for a Turtle instance named turtle): + >>> turtle.ondrag(turtle.goto) + + Subsequently clicking and dragging a Turtle will move it + across the screen thereby producing handdrawings (if pen is + down). + """ + self.screen._ondrag(self.turtle._item, fun, btn, add) + + + def _undo(self, action, data): + """Does the main part of the work for undo() + """ + if self.undobuffer is None: + return + if action == "rot": + angle, degPAU = data + self._rotate(-angle*degPAU/self._degreesPerAU) + dummy = self.undobuffer.pop() + elif action == "stamp": + stitem = data[0] + self.clearstamp(stitem) + elif action == "go": + self._undogoto(data) + elif action in ["wri", "dot"]: + item = data[0] + self.screen._delete(item) + self.items.remove(item) + elif action == "dofill": + item = data[0] + self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)), + fill="", outline="") + elif action == "beginfill": + item = data[0] + self._fillitem = self._fillpath = None + if item in self.items: + self.screen._delete(item) + self.items.remove(item) + elif action == "pen": + TPen.pen(self, data[0]) + self.undobuffer.pop() + + def undo(self): + """undo (repeatedly) the last turtle action. + + No argument. + + undo (repeatedly) the last turtle action. + Number of available undo actions is determined by the size of + the undobuffer. + + Example (for a Turtle instance named turtle): + >>> for i in range(4): + ... turtle.fd(50); turtle.lt(80) + ... + >>> for i in range(8): + ... turtle.undo() + ... + """ + if self.undobuffer is None: + return + item = self.undobuffer.pop() + action = item[0] + data = item[1:] + if action == "seq": + while data: + item = data.pop() + self._undo(item[0], item[1:]) + else: + self._undo(action, data) + + turtlesize = shapesize + +RawPen = RawTurtle + +### Screen - Singleton ######################## + +def Screen(): + """Return the singleton screen object. + If none exists at the moment, create a new one and return it, + else return the existing one.""" + if Turtle._screen is None: + Turtle._screen = _Screen() + return Turtle._screen + +class _Screen(TurtleScreen): + + _root = None + _canvas = None + _title = _CFG["title"] + + def __init__(self): + # XXX there is no need for this code to be conditional, + # as there will be only a single _Screen instance, anyway + # XXX actually, the turtle demo is injecting root window, + # so perhaps the conditional creation of a root should be + # preserved (perhaps by passing it as an optional parameter) + if _Screen._root is None: + _Screen._root = self._root = _Root() + self._root.title(_Screen._title) + self._root.ondestroy(self._destroy) + if _Screen._canvas is None: + width = _CFG["width"] + height = _CFG["height"] + canvwidth = _CFG["canvwidth"] + canvheight = _CFG["canvheight"] + leftright = _CFG["leftright"] + topbottom = _CFG["topbottom"] + self._root.setupcanvas(width, height, canvwidth, canvheight) + _Screen._canvas = self._root._getcanvas() + TurtleScreen.__init__(self, _Screen._canvas) + self.setup(width, height, leftright, topbottom) + + def setup(self, width=_CFG["width"], height=_CFG["height"], + startx=_CFG["leftright"], starty=_CFG["topbottom"]): + """ Set the size and position of the main window. + + Arguments: + width: as integer a size in pixels, as float a fraction of the screen. + Default is 50% of screen. + height: as integer the height in pixels, as float a fraction of the + screen. Default is 75% of screen. + startx: if positive, starting position in pixels from the left + edge of the screen, if negative from the right edge + Default, startx=None is to center window horizontally. + starty: if positive, starting position in pixels from the top + edge of the screen, if negative from the bottom edge + Default, starty=None is to center window vertically. + + Examples (for a Screen instance named screen): + >>> screen.setup (width=200, height=200, startx=0, starty=0) + + sets window to 200x200 pixels, in upper left of screen + + >>> screen.setup(width=.75, height=0.5, startx=None, starty=None) + + sets window to 75% of screen by 50% of screen and centers + """ + if not hasattr(self._root, "set_geometry"): + return + sw = self._root.win_width() + sh = self._root.win_height() + if isinstance(width, float) and 0 <= width <= 1: + width = sw*width + if startx is None: + startx = (sw - width) / 2 + if isinstance(height, float) and 0 <= height <= 1: + height = sh*height + if starty is None: + starty = (sh - height) / 2 + self._root.set_geometry(width, height, startx, starty) + self.update() + + def title(self, titlestring): + """Set title of turtle-window + + Argument: + titlestring -- a string, to appear in the titlebar of the + turtle graphics window. + + This is a method of Screen-class. Not available for TurtleScreen- + objects. + + Example (for a Screen instance named screen): + >>> screen.title("Welcome to the turtle-zoo!") + """ + if _Screen._root is not None: + _Screen._root.title(titlestring) + _Screen._title = titlestring + + def _destroy(self): + root = self._root + if root is _Screen._root: + Turtle._pen = None + Turtle._screen = None + _Screen._root = None + _Screen._canvas = None + TurtleScreen._RUNNING = False + root.destroy() + + def bye(self): + """Shut the turtlegraphics window. + + Example (for a TurtleScreen instance named screen): + >>> screen.bye() + """ + self._destroy() + + def exitonclick(self): + """Go into mainloop until the mouse is clicked. + + No arguments. + + Bind bye() method to mouseclick on TurtleScreen. + If "using_IDLE" - value in configuration dictionary is False + (default value), enter mainloop. + If IDLE with -n switch (no subprocess) is used, this value should be + set to True in turtle.cfg. In this case IDLE's mainloop + is active also for the client script. + + This is a method of the Screen-class and not available for + TurtleScreen instances. + + Example (for a Screen instance named screen): + >>> screen.exitonclick() + + """ + def exitGracefully(x, y): + """Screen.bye() with two dummy-parameters""" + self.bye() + self.onclick(exitGracefully) + if _CFG["using_IDLE"]: + return + try: + mainloop() + except AttributeError: + exit(0) + +class Turtle(RawTurtle): + """RawTurtle auto-creating (scrolled) canvas. + + When a Turtle object is created or a function derived from some + Turtle method is called a TurtleScreen object is automatically created. + """ + _pen = None + _screen = None + + def __init__(self, + shape=_CFG["shape"], + undobuffersize=_CFG["undobuffersize"], + visible=_CFG["visible"]): + if Turtle._screen is None: + Turtle._screen = Screen() + RawTurtle.__init__(self, Turtle._screen, + shape=shape, + undobuffersize=undobuffersize, + visible=visible) + +Pen = Turtle + +def write_docstringdict(filename="turtle_docstringdict"): + """Create and write docstring-dictionary to file. + + Optional argument: + filename -- a string, used as filename + default value is turtle_docstringdict + + Has to be called explicitly, (not used by the turtle-graphics classes) + The docstring dictionary will be written to the Python script .py + It is intended to serve as a template for translation of the docstrings + into different languages. + """ + docsdict = {} + + for methodname in _tg_screen_functions: + key = "_Screen."+methodname + docsdict[key] = eval(key).__doc__ + for methodname in _tg_turtle_functions: + key = "Turtle."+methodname + docsdict[key] = eval(key).__doc__ + + with open("%s.py" % filename,"w") as f: + keys = sorted(x for x in docsdict + if x.split('.')[1] not in _alias_list) + f.write('docsdict = {\n\n') + for key in keys[:-1]: + f.write('%s :\n' % repr(key)) + f.write(' """%s\n""",\n\n' % docsdict[key]) + key = keys[-1] + f.write('%s :\n' % repr(key)) + f.write(' """%s\n"""\n\n' % docsdict[key]) + f.write("}\n") + f.close() + +def read_docstrings(lang): + """Read in docstrings from lang-specific docstring dictionary. + + Transfer docstrings, translated to lang, from a dictionary-file + to the methods of classes Screen and Turtle and - in revised form - + to the corresponding functions. + """ + modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()} + module = __import__(modname) + docsdict = module.docsdict + for key in docsdict: + try: +# eval(key).im_func.__doc__ = docsdict[key] + eval(key).__doc__ = docsdict[key] + except Exception: + print("Bad docstring-entry: %s" % key) + +_LANGUAGE = _CFG["language"] + +try: + if _LANGUAGE != "english": + read_docstrings(_LANGUAGE) +except ImportError: + print("Cannot find docsdict for", _LANGUAGE) +except Exception: + print ("Unknown Error when trying to import %s-docstring-dictionary" % + _LANGUAGE) + + +def getmethparlist(ob): + """Get strings describing the arguments for the given object + + Returns a pair of strings representing function parameter lists + including parenthesis. The first string is suitable for use in + function definition and the second is suitable for use in function + call. The "self" parameter is not included. + """ + defText = callText = "" + # bit of a hack for methods - turn it into a function + # but we drop the "self" param. + # Try and build one for Python defined functions + args, varargs, varkw = inspect.getargs(ob.__code__) + items2 = args[1:] + realArgs = args[1:] + defaults = ob.__defaults__ or [] + defaults = ["=%r" % (value,) for value in defaults] + defaults = [""] * (len(realArgs)-len(defaults)) + defaults + items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)] + if varargs is not None: + items1.append("*" + varargs) + items2.append("*" + varargs) + if varkw is not None: + items1.append("**" + varkw) + items2.append("**" + varkw) + defText = ", ".join(items1) + defText = "(%s)" % defText + callText = ", ".join(items2) + callText = "(%s)" % callText + return defText, callText + +def _turtle_docrevise(docstr): + """To reduce docstrings from RawTurtle class for functions + """ + import re + if docstr is None: + return None + turtlename = _CFG["exampleturtle"] + newdocstr = docstr.replace("%s." % turtlename,"") + parexp = re.compile(r' \(.+ %s\):' % turtlename) + newdocstr = parexp.sub(":", newdocstr) + return newdocstr + +def _screen_docrevise(docstr): + """To reduce docstrings from TurtleScreen class for functions + """ + import re + if docstr is None: + return None + screenname = _CFG["examplescreen"] + newdocstr = docstr.replace("%s." % screenname,"") + parexp = re.compile(r' \(.+ %s\):' % screenname) + newdocstr = parexp.sub(":", newdocstr) + return newdocstr + +## The following mechanism makes all methods of RawTurtle and Turtle available +## as functions. So we can enhance, change, add, delete methods to these +## classes and do not need to change anything here. + +__func_body = """\ +def {name}{paramslist}: + if {obj} is None: + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + {obj} = {init} + try: + return {obj}.{name}{argslist} + except TK.TclError: + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + raise +""" + +def _make_global_funcs(functions, cls, obj, init, docrevise): + for methodname in functions: + method = getattr(cls, methodname) + pl1, pl2 = getmethparlist(method) + if pl1 == "": + print(">>>>>>", pl1, pl2) + continue + defstr = __func_body.format(obj=obj, init=init, name=methodname, + paramslist=pl1, argslist=pl2) + exec(defstr, globals()) + globals()[methodname].__doc__ = docrevise(method.__doc__) + +_make_global_funcs(_tg_screen_functions, _Screen, + 'Turtle._screen', 'Screen()', _screen_docrevise) +_make_global_funcs(_tg_turtle_functions, Turtle, + 'Turtle._pen', 'Turtle()', _turtle_docrevise) + + +done = mainloop + +if __name__ == "__main__": + def switchpen(): + if isdown(): + pu() + else: + pd() + + def demo1(): + """Demo of old turtle.py - module""" + reset() + tracer(True) + up() + backward(100) + down() + # draw 3 squares; the last filled + width(3) + for i in range(3): + if i == 2: + begin_fill() + for _ in range(4): + forward(20) + left(90) + if i == 2: + color("maroon") + end_fill() + up() + forward(30) + down() + width(1) + color("black") + # move out of the way + tracer(False) + up() + right(90) + forward(100) + right(90) + forward(100) + right(180) + down() + # some text + write("startstart", 1) + write("start", 1) + color("red") + # staircase + for i in range(5): + forward(20) + left(90) + forward(20) + right(90) + # filled staircase + tracer(True) + begin_fill() + for i in range(5): + forward(20) + left(90) + forward(20) + right(90) + end_fill() + # more text + + def demo2(): + """Demo of some new features.""" + speed(1) + st() + pensize(3) + setheading(towards(0, 0)) + radius = distance(0, 0)/2.0 + rt(90) + for _ in range(18): + switchpen() + circle(radius, 10) + write("wait a moment...") + while undobufferentries(): + undo() + reset() + lt(90) + colormode(255) + laenge = 10 + pencolor("green") + pensize(3) + lt(180) + for i in range(-2, 16): + if i > 0: + begin_fill() + fillcolor(255-15*i, 0, 15*i) + for _ in range(3): + fd(laenge) + lt(120) + end_fill() + laenge += 10 + lt(15) + speed((speed()+1)%12) + #end_fill() + + lt(120) + pu() + fd(70) + rt(30) + pd() + color("red","yellow") + speed(0) + begin_fill() + for _ in range(4): + circle(50, 90) + rt(90) + fd(30) + rt(90) + end_fill() + lt(90) + pu() + fd(30) + pd() + shape("turtle") + + tri = getturtle() + tri.resizemode("auto") + turtle = Turtle() + turtle.resizemode("auto") + turtle.shape("turtle") + turtle.reset() + turtle.left(90) + turtle.speed(0) + turtle.up() + turtle.goto(280, 40) + turtle.lt(30) + turtle.down() + turtle.speed(6) + turtle.color("blue","orange") + turtle.pensize(2) + tri.speed(6) + setheading(towards(turtle)) + count = 1 + while tri.distance(turtle) > 4: + turtle.fd(3.5) + turtle.lt(0.6) + tri.setheading(tri.towards(turtle)) + tri.fd(4) + if count % 20 == 0: + turtle.stamp() + tri.stamp() + switchpen() + count += 1 + tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right") + tri.pencolor("black") + tri.pencolor("red") + + def baba(xdummy, ydummy): + clearscreen() + bye() + + time.sleep(2) + + while undobufferentries(): + tri.undo() + turtle.undo() + tri.fd(50) + tri.write(" Click me!", font = ("Courier", 12, "bold") ) + tri.onclick(baba, 1) + + demo1() + demo2() + exitonclick() diff --git a/evalkit_cambrian/lib/python3.10/uuid.py b/evalkit_cambrian/lib/python3.10/uuid.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9f87b79457fac74f7dee5643a01870dd3fd8e9 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/uuid.py @@ -0,0 +1,733 @@ +r"""UUID objects (universally unique identifiers) according to RFC 4122. + +This module provides immutable UUID objects (class UUID) and the functions +uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 +UUIDs as specified in RFC 4122. + +If all you want is a unique ID, you should probably call uuid1() or uuid4(). +Note that uuid1() may compromise privacy since it creates a UUID containing +the computer's network address. uuid4() creates a random UUID. + +Typical usage: + + >>> import uuid + + # make a UUID based on the host ID and current time + >>> uuid.uuid1() # doctest: +SKIP + UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') + + # make a UUID using an MD5 hash of a namespace UUID and a name + >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') + UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') + + # make a random UUID + >>> uuid.uuid4() # doctest: +SKIP + UUID('16fd2706-8baf-433b-82eb-8c7fada847da') + + # make a UUID using a SHA-1 hash of a namespace UUID and a name + >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') + UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') + + # make a UUID from a string of hex digits (braces and hyphens ignored) + >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') + + # convert a UUID to a string of hex digits in standard form + >>> str(x) + '00010203-0405-0607-0809-0a0b0c0d0e0f' + + # get the raw 16 bytes of the UUID + >>> x.bytes + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + + # make a UUID from a 16-byte string + >>> uuid.UUID(bytes=x.bytes) + UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') +""" + +import os +import sys + +from enum import Enum + + +__author__ = 'Ka-Ping Yee ' + +# The recognized platforms - known behaviors +if sys.platform in ('win32', 'darwin'): + _AIX = _LINUX = False +else: + import platform + _platform_system = platform.system() + _AIX = _platform_system == 'AIX' + _LINUX = _platform_system == 'Linux' + +_MAC_DELIM = b':' +_MAC_OMITS_LEADING_ZEROES = False +if _AIX: + _MAC_DELIM = b'.' + _MAC_OMITS_LEADING_ZEROES = True + +RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ + 'reserved for NCS compatibility', 'specified in RFC 4122', + 'reserved for Microsoft compatibility', 'reserved for future definition'] + +int_ = int # The built-in int type +bytes_ = bytes # The built-in bytes type + + +class SafeUUID(Enum): + safe = 0 + unsafe = -1 + unknown = None + + +class UUID: + """Instances of the UUID class represent UUIDs as specified in RFC 4122. + UUID objects are immutable, hashable, and usable as dictionary keys. + Converting a UUID to a string with str() yields something in the form + '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts + five possible forms: a similar string of hexadecimal digits, or a tuple + of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and + 48-bit values respectively) as an argument named 'fields', or a string + of 16 bytes (with all the integer fields in big-endian order) as an + argument named 'bytes', or a string of 16 bytes (with the first three + fields in little-endian order) as an argument named 'bytes_le', or a + single 128-bit integer as an argument named 'int'. + + UUIDs have these read-only attributes: + + bytes the UUID as a 16-byte string (containing the six + integer fields in big-endian byte order) + + bytes_le the UUID as a 16-byte string (with time_low, time_mid, + and time_hi_version in little-endian byte order) + + fields a tuple of the six integer fields of the UUID, + which are also available as six individual attributes + and two derived attributes: + + time_low the first 32 bits of the UUID + time_mid the next 16 bits of the UUID + time_hi_version the next 16 bits of the UUID + clock_seq_hi_variant the next 8 bits of the UUID + clock_seq_low the next 8 bits of the UUID + node the last 48 bits of the UUID + + time the 60-bit timestamp + clock_seq the 14-bit sequence number + + hex the UUID as a 32-character hexadecimal string + + int the UUID as a 128-bit integer + + urn the UUID as a URN as specified in RFC 4122 + + variant the UUID variant (one of the constants RESERVED_NCS, + RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) + + version the UUID version number (1 through 5, meaningful only + when the variant is RFC_4122) + + is_safe An enum indicating whether the UUID has been generated in + a way that is safe for multiprocessing applications, via + uuid_generate_time_safe(3). + """ + + __slots__ = ('int', 'is_safe', '__weakref__') + + def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, + int=None, version=None, + *, is_safe=SafeUUID.unknown): + r"""Create a UUID from either a string of 32 hexadecimal digits, + a string of 16 bytes as the 'bytes' argument, a string of 16 bytes + in little-endian order as the 'bytes_le' argument, a tuple of six + integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, + 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as + the 'fields' argument, or a single 128-bit integer as the 'int' + argument. When a string of hex digits is given, curly braces, + hyphens, and a URN prefix are all optional. For example, these + expressions all yield the same UUID: + + UUID('{12345678-1234-5678-1234-567812345678}') + UUID('12345678123456781234567812345678') + UUID('urn:uuid:12345678-1234-5678-1234-567812345678') + UUID(bytes='\x12\x34\x56\x78'*4) + UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + + '\x12\x34\x56\x78\x12\x34\x56\x78') + UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) + UUID(int=0x12345678123456781234567812345678) + + Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must + be given. The 'version' argument is optional; if given, the resulting + UUID will have its variant and version set according to RFC 4122, + overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. + + is_safe is an enum exposed as an attribute on the instance. It + indicates whether the UUID has been generated in a way that is safe + for multiprocessing applications, via uuid_generate_time_safe(3). + """ + + if [hex, bytes, bytes_le, fields, int].count(None) != 4: + raise TypeError('one of the hex, bytes, bytes_le, fields, ' + 'or int arguments must be given') + if hex is not None: + hex = hex.replace('urn:', '').replace('uuid:', '') + hex = hex.strip('{}').replace('-', '') + if len(hex) != 32: + raise ValueError('badly formed hexadecimal UUID string') + int = int_(hex, 16) + if bytes_le is not None: + if len(bytes_le) != 16: + raise ValueError('bytes_le is not a 16-char string') + bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + + bytes_le[8-1:6-1:-1] + bytes_le[8:]) + if bytes is not None: + if len(bytes) != 16: + raise ValueError('bytes is not a 16-char string') + assert isinstance(bytes, bytes_), repr(bytes) + int = int_.from_bytes(bytes, byteorder='big') + if fields is not None: + if len(fields) != 6: + raise ValueError('fields is not a 6-tuple') + (time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node) = fields + if not 0 <= time_low < 1<<32: + raise ValueError('field 1 out of range (need a 32-bit value)') + if not 0 <= time_mid < 1<<16: + raise ValueError('field 2 out of range (need a 16-bit value)') + if not 0 <= time_hi_version < 1<<16: + raise ValueError('field 3 out of range (need a 16-bit value)') + if not 0 <= clock_seq_hi_variant < 1<<8: + raise ValueError('field 4 out of range (need an 8-bit value)') + if not 0 <= clock_seq_low < 1<<8: + raise ValueError('field 5 out of range (need an 8-bit value)') + if not 0 <= node < 1<<48: + raise ValueError('field 6 out of range (need a 48-bit value)') + clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low + int = ((time_low << 96) | (time_mid << 80) | + (time_hi_version << 64) | (clock_seq << 48) | node) + if int is not None: + if not 0 <= int < 1<<128: + raise ValueError('int is out of range (need a 128-bit value)') + if version is not None: + if not 1 <= version <= 5: + raise ValueError('illegal version number') + # Set the variant to RFC 4122. + int &= ~(0xc000 << 48) + int |= 0x8000 << 48 + # Set the version number. + int &= ~(0xf000 << 64) + int |= version << 76 + object.__setattr__(self, 'int', int) + object.__setattr__(self, 'is_safe', is_safe) + + def __getstate__(self): + d = {'int': self.int} + if self.is_safe != SafeUUID.unknown: + # is_safe is a SafeUUID instance. Return just its value, so that + # it can be un-pickled in older Python versions without SafeUUID. + d['is_safe'] = self.is_safe.value + return d + + def __setstate__(self, state): + object.__setattr__(self, 'int', state['int']) + # is_safe was added in 3.7; it is also omitted when it is "unknown" + object.__setattr__(self, 'is_safe', + SafeUUID(state['is_safe']) + if 'is_safe' in state else SafeUUID.unknown) + + def __eq__(self, other): + if isinstance(other, UUID): + return self.int == other.int + return NotImplemented + + # Q. What's the value of being able to sort UUIDs? + # A. Use them as keys in a B-Tree or similar mapping. + + def __lt__(self, other): + if isinstance(other, UUID): + return self.int < other.int + return NotImplemented + + def __gt__(self, other): + if isinstance(other, UUID): + return self.int > other.int + return NotImplemented + + def __le__(self, other): + if isinstance(other, UUID): + return self.int <= other.int + return NotImplemented + + def __ge__(self, other): + if isinstance(other, UUID): + return self.int >= other.int + return NotImplemented + + def __hash__(self): + return hash(self.int) + + def __int__(self): + return self.int + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __setattr__(self, name, value): + raise TypeError('UUID objects are immutable') + + def __str__(self): + hex = '%032x' % self.int + return '%s-%s-%s-%s-%s' % ( + hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) + + @property + def bytes(self): + return self.int.to_bytes(16, 'big') + + @property + def bytes_le(self): + bytes = self.bytes + return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + + bytes[8:]) + + @property + def fields(self): + return (self.time_low, self.time_mid, self.time_hi_version, + self.clock_seq_hi_variant, self.clock_seq_low, self.node) + + @property + def time_low(self): + return self.int >> 96 + + @property + def time_mid(self): + return (self.int >> 80) & 0xffff + + @property + def time_hi_version(self): + return (self.int >> 64) & 0xffff + + @property + def clock_seq_hi_variant(self): + return (self.int >> 56) & 0xff + + @property + def clock_seq_low(self): + return (self.int >> 48) & 0xff + + @property + def time(self): + return (((self.time_hi_version & 0x0fff) << 48) | + (self.time_mid << 32) | self.time_low) + + @property + def clock_seq(self): + return (((self.clock_seq_hi_variant & 0x3f) << 8) | + self.clock_seq_low) + + @property + def node(self): + return self.int & 0xffffffffffff + + @property + def hex(self): + return '%032x' % self.int + + @property + def urn(self): + return 'urn:uuid:' + str(self) + + @property + def variant(self): + if not self.int & (0x8000 << 48): + return RESERVED_NCS + elif not self.int & (0x4000 << 48): + return RFC_4122 + elif not self.int & (0x2000 << 48): + return RESERVED_MICROSOFT + else: + return RESERVED_FUTURE + + @property + def version(self): + # The version bits are only meaningful for RFC 4122 UUIDs. + if self.variant == RFC_4122: + return int((self.int >> 76) & 0xf) + + +def _get_command_stdout(command, *args): + import io, os, shutil, subprocess + + try: + path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) + path_dirs.extend(['/sbin', '/usr/sbin']) + executable = shutil.which(command, path=os.pathsep.join(path_dirs)) + if executable is None: + return None + # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output + # on stderr (Note: we don't have an example where the words we search + # for are actually localized, but in theory some system could do so.) + env = dict(os.environ) + env['LC_ALL'] = 'C' + # Empty strings will be quoted by popen so we should just ommit it + if args != ('',): + command = (executable, *args) + else: + command = (executable,) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + env=env) + if not proc: + return None + stdout, stderr = proc.communicate() + return io.BytesIO(stdout) + except (OSError, subprocess.SubprocessError): + return None + + +# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant +# bit of the first octet signifies whether the MAC address is universally (0) +# or locally (1) administered. Network cards from hardware manufacturers will +# always be universally administered to guarantee global uniqueness of the MAC +# address, but any particular machine may have other interfaces which are +# locally administered. An example of the latter is the bridge interface to +# the Touch Bar on MacBook Pros. +# +# This bit works out to be the 42nd bit counting from 1 being the least +# significant, or 1<<41. We'll prefer universally administered MAC addresses +# over locally administered ones since the former are globally unique, but +# we'll return the first of the latter found if that's all the machine has. +# +# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local + +def _is_universal(mac): + return not (mac & (1 << 41)) + + +def _find_mac_near_keyword(command, args, keywords, get_word_index): + """Searches a command's output for a MAC address near a keyword. + + Each line of words in the output is case-insensitively searched for + any of the given keywords. Upon a match, get_word_index is invoked + to pick a word from the line, given the index of the match. For + example, lambda i: 0 would get the first word on the line, while + lambda i: i - 1 would get the word preceding the keyword. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + first_local_mac = None + for line in stdout: + words = line.lower().rstrip().split() + for i in range(len(words)): + if words[i] in keywords: + try: + word = words[get_word_index(i)] + mac = int(word.replace(_MAC_DELIM, b''), 16) + except (ValueError, IndexError): + # Virtual interfaces, such as those provided by + # VPNs, do not have a colon-delimited MAC address + # as expected, but a 16-byte HWAddr separated by + # dashes. These should be ignored in favor of a + # real MAC address + pass + else: + if _is_universal(mac): + return mac + first_local_mac = first_local_mac or mac + return first_local_mac or None + + +def _parse_mac(word): + # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), + # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). + # + # Virtual interfaces, such as those provided by VPNs, do not have a + # colon-delimited MAC address as expected, but a 16-byte HWAddr separated + # by dashes. These should be ignored in favor of a real MAC address + parts = word.split(_MAC_DELIM) + if len(parts) != 6: + return + if _MAC_OMITS_LEADING_ZEROES: + # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. + # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 + # not + # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 + if not all(1 <= len(part) <= 2 for part in parts): + return + hexstr = b''.join(part.rjust(2, b'0') for part in parts) + else: + if not all(len(part) == 2 for part in parts): + return + hexstr = b''.join(parts) + try: + return int(hexstr, 16) + except ValueError: + return + + +def _find_mac_under_heading(command, args, heading): + """Looks for a MAC address under a heading in a command's output. + + The first line of words in the output is searched for the given + heading. Words at the same word index as the heading in subsequent + lines are then examined to see if they look like MAC addresses. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + keywords = stdout.readline().rstrip().split() + try: + column_index = keywords.index(heading) + except ValueError: + return None + + first_local_mac = None + for line in stdout: + words = line.rstrip().split() + try: + word = words[column_index] + except IndexError: + continue + + mac = _parse_mac(word) + if mac is None: + continue + if _is_universal(mac): + return mac + if first_local_mac is None: + first_local_mac = mac + + return first_local_mac + + +# The following functions call external programs to 'get' a macaddr value to +# be used as basis for an uuid +def _ifconfig_getnode(): + """Get the hardware address on Unix by running ifconfig.""" + # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. + keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') + for args in ('', '-a', '-av'): + mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) + if mac: + return mac + return None + +def _ip_getnode(): + """Get the hardware address on Unix by running ip.""" + # This works on Linux with iproute2. + mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) + if mac: + return mac + return None + +def _arp_getnode(): + """Get the hardware address on Unix by running arp.""" + import os, socket + try: + ip_addr = socket.gethostbyname(socket.gethostname()) + except OSError: + return None + + # Try getting the MAC addr from arp based on our IP address (Solaris). + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) + if mac: + return mac + + # This works on OpenBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) + if mac: + return mac + + # This works on Linux, FreeBSD and NetBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], + lambda i: i+2) + # Return None instead of 0. + if mac: + return mac + return None + +def _lanscan_getnode(): + """Get the hardware address on Unix by running lanscan.""" + # This might work on HP-UX. + return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) + +def _netstat_getnode(): + """Get the hardware address on Unix by running netstat.""" + # This works on AIX and might work on Tru64 UNIX. + return _find_mac_under_heading('netstat', '-ian', b'Address') + +def _ipconfig_getnode(): + """[DEPRECATED] Get the hardware address on Windows.""" + # bpo-40501: UuidCreateSequential() is now the only supported approach + return _windll_getnode() + +def _netbios_getnode(): + """[DEPRECATED] Get the hardware address on Windows.""" + # bpo-40501: UuidCreateSequential() is now the only supported approach + return _windll_getnode() + + +# Import optional C extension at toplevel, to help disabling it when testing +try: + import _uuid + _generate_time_safe = getattr(_uuid, "generate_time_safe", None) + _UuidCreate = getattr(_uuid, "UuidCreate", None) + _has_uuid_generate_time_safe = _uuid.has_uuid_generate_time_safe +except ImportError: + _uuid = None + _generate_time_safe = None + _UuidCreate = None + _has_uuid_generate_time_safe = None + + +def _load_system_functions(): + """[DEPRECATED] Platform-specific functions loaded at import time""" + + +def _unix_getnode(): + """Get the hardware address on Unix using the _uuid extension module.""" + if _generate_time_safe: + uuid_time, _ = _generate_time_safe() + return UUID(bytes=uuid_time).node + +def _windll_getnode(): + """Get the hardware address on Windows using the _uuid extension module.""" + if _UuidCreate: + uuid_bytes = _UuidCreate() + return UUID(bytes_le=uuid_bytes).node + +def _random_getnode(): + """Get a random node ID.""" + # RFC 4122, $4.1.6 says "For systems with no IEEE address, a randomly or + # pseudo-randomly generated value may be used; see Section 4.5. The + # multicast bit must be set in such addresses, in order that they will + # never conflict with addresses obtained from network cards." + # + # The "multicast bit" of a MAC address is defined to be "the least + # significant bit of the first octet". This works out to be the 41st bit + # counting from 1 being the least significant bit, or 1<<40. + # + # See https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast + import random + return random.getrandbits(48) | (1 << 40) + + +# _OS_GETTERS, when known, are targeted for a specific OS or platform. +# The order is by 'common practice' on the specified platform. +# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method +# which, when successful, means none of these "external" methods are called. +# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., +# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) +if _LINUX: + _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] +elif sys.platform == 'darwin': + _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] +elif sys.platform == 'win32': + # bpo-40201: _windll_getnode will always succeed, so these are not needed + _OS_GETTERS = [] +elif _AIX: + _OS_GETTERS = [_netstat_getnode] +else: + _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, + _netstat_getnode, _lanscan_getnode] +if os.name == 'posix': + _GETTERS = [_unix_getnode] + _OS_GETTERS +elif os.name == 'nt': + _GETTERS = [_windll_getnode] + _OS_GETTERS +else: + _GETTERS = _OS_GETTERS + +_node = None + +def getnode(): + """Get the hardware address as a 48-bit positive integer. + + The first time this runs, it may launch a separate program, which could + be quite slow. If all attempts to obtain the hardware address fail, we + choose a random 48-bit number with its eighth bit set to 1 as recommended + in RFC 4122. + """ + global _node + if _node is not None: + return _node + + for getter in _GETTERS + [_random_getnode]: + try: + _node = getter() + except: + continue + if (_node is not None) and (0 <= _node < (1 << 48)): + return _node + assert False, '_random_getnode() returned invalid value: {}'.format(_node) + + +_last_timestamp = None + +def uuid1(node=None, clock_seq=None): + """Generate a UUID from a host ID, sequence number, and the current time. + If 'node' is not given, getnode() is used to obtain the hardware + address. If 'clock_seq' is given, it is used as the sequence number; + otherwise a random 14-bit sequence number is chosen.""" + + # When the system provides a version-1 UUID generator, use it (but don't + # use UuidCreate here because its UUIDs don't conform to RFC 4122). + if _generate_time_safe is not None and node is clock_seq is None: + uuid_time, safely_generated = _generate_time_safe() + try: + is_safe = SafeUUID(safely_generated) + except ValueError: + is_safe = SafeUUID.unknown + return UUID(bytes=uuid_time, is_safe=is_safe) + + global _last_timestamp + import time + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp is not None and timestamp <= _last_timestamp: + timestamp = _last_timestamp + 1 + _last_timestamp = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_low = timestamp & 0xffffffff + time_mid = (timestamp >> 32) & 0xffff + time_hi_version = (timestamp >> 48) & 0x0fff + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = (clock_seq >> 8) & 0x3f + if node is None: + node = getnode() + return UUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + +def uuid3(namespace, name): + """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" + from hashlib import md5 + digest = md5( + namespace.bytes + bytes(name, "utf-8"), + usedforsecurity=False + ).digest() + return UUID(bytes=digest[:16], version=3) + +def uuid4(): + """Generate a random UUID.""" + return UUID(bytes=os.urandom(16), version=4) + +def uuid5(namespace, name): + """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" + from hashlib import sha1 + hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest() + return UUID(bytes=hash[:16], version=5) + +# The following standard UUIDs are for use with uuid3() or uuid5(). + +NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') diff --git a/evalkit_cambrian/lib/python3.10/zipfile.py b/evalkit_cambrian/lib/python3.10/zipfile.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd44fb1e4a8f0a3774f4a18ad30d4a41ab08e3d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/zipfile.py @@ -0,0 +1,2510 @@ +""" +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +""" +import binascii +import importlib.util +import io +import itertools +import os +import posixpath +import re +import shutil +import stat +import struct +import sys +import threading +import time +import contextlib +import pathlib + +try: + import zlib # We may need its compression method + crc32 = zlib.crc32 +except ImportError: + zlib = None + crc32 = binascii.crc32 + +try: + import bz2 # We may need its compression method +except ImportError: + bz2 = None + +try: + import lzma # We may need its compression method +except ImportError: + lzma = None + +__all__ = ["BadZipFile", "BadZipfile", "error", + "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", + "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile", + "Path"] + +class BadZipFile(Exception): + pass + + +class LargeZipFile(Exception): + """ + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + """ + +error = BadZipfile = BadZipFile # Pre-3.2 compatibility names + + +ZIP64_LIMIT = (1 << 31) - 1 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 +ZIP_MAX_COMMENT = (1 << 16) - 1 + +# constants for Zip file compression methods +ZIP_STORED = 0 +ZIP_DEFLATED = 8 +ZIP_BZIP2 = 12 +ZIP_LZMA = 14 +# Other ZIP compression methods not supported + +DEFAULT_VERSION = 20 +ZIP64_VERSION = 45 +BZIP2_VERSION = 46 +LZMA_VERSION = 63 +# we recognize (but not necessarily support) all features up to that version +MAX_EXTRACT_VERSION = 63 + +# Below are some formats and associated data for reading/writing headers using +# the struct module. The names and structures of headers/records are those used +# in the PKWARE description of the ZIP file format: +# http://www.pkware.com/documents/casestudies/APPNOTE.TXT +# (URL valid as of January 2008) + +# The "end of central directory" structure, magic number, size, and indices +# (section V.I in the format document) +structEndArchive = b"<4s4H2LH" +stringEndArchive = b"PK\005\006" +sizeEndCentDir = struct.calcsize(structEndArchive) + +_ECD_SIGNATURE = 0 +_ECD_DISK_NUMBER = 1 +_ECD_DISK_START = 2 +_ECD_ENTRIES_THIS_DISK = 3 +_ECD_ENTRIES_TOTAL = 4 +_ECD_SIZE = 5 +_ECD_OFFSET = 6 +_ECD_COMMENT_SIZE = 7 +# These last two indices are not part of the structure as defined in the +# spec, but they are used internally by this module as a convenience +_ECD_COMMENT = 8 +_ECD_LOCATION = 9 + +# The "central directory" structure, magic number, size, and indices +# of entries in the structure (section V.F in the format document) +structCentralDir = "<4s4B4HL2L5H2L" +stringCentralDir = b"PK\001\002" +sizeCentralDir = struct.calcsize(structCentralDir) + +# indexes of entries in the central directory structure +_CD_SIGNATURE = 0 +_CD_CREATE_VERSION = 1 +_CD_CREATE_SYSTEM = 2 +_CD_EXTRACT_VERSION = 3 +_CD_EXTRACT_SYSTEM = 4 +_CD_FLAG_BITS = 5 +_CD_COMPRESS_TYPE = 6 +_CD_TIME = 7 +_CD_DATE = 8 +_CD_CRC = 9 +_CD_COMPRESSED_SIZE = 10 +_CD_UNCOMPRESSED_SIZE = 11 +_CD_FILENAME_LENGTH = 12 +_CD_EXTRA_FIELD_LENGTH = 13 +_CD_COMMENT_LENGTH = 14 +_CD_DISK_NUMBER_START = 15 +_CD_INTERNAL_FILE_ATTRIBUTES = 16 +_CD_EXTERNAL_FILE_ATTRIBUTES = 17 +_CD_LOCAL_HEADER_OFFSET = 18 + +# The "local file header" structure, magic number, size, and indices +# (section V.A in the format document) +structFileHeader = "<4s2B4HL2L2H" +stringFileHeader = b"PK\003\004" +sizeFileHeader = struct.calcsize(structFileHeader) + +_FH_SIGNATURE = 0 +_FH_EXTRACT_VERSION = 1 +_FH_EXTRACT_SYSTEM = 2 +_FH_GENERAL_PURPOSE_FLAG_BITS = 3 +_FH_COMPRESSION_METHOD = 4 +_FH_LAST_MOD_TIME = 5 +_FH_LAST_MOD_DATE = 6 +_FH_CRC = 7 +_FH_COMPRESSED_SIZE = 8 +_FH_UNCOMPRESSED_SIZE = 9 +_FH_FILENAME_LENGTH = 10 +_FH_EXTRA_FIELD_LENGTH = 11 + +# The "Zip64 end of central directory locator" structure, magic number, and size +structEndArchive64Locator = "<4sLQL" +stringEndArchive64Locator = b"PK\x06\x07" +sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) + +# The "Zip64 end of central directory" record, magic number, size, and indices +# (section V.G in the format document) +structEndArchive64 = "<4sQ2H2L4Q" +stringEndArchive64 = b"PK\x06\x06" +sizeEndCentDir64 = struct.calcsize(structEndArchive64) + +_CD64_SIGNATURE = 0 +_CD64_DIRECTORY_RECSIZE = 1 +_CD64_CREATE_VERSION = 2 +_CD64_EXTRACT_VERSION = 3 +_CD64_DISK_NUMBER = 4 +_CD64_DISK_NUMBER_START = 5 +_CD64_NUMBER_ENTRIES_THIS_DISK = 6 +_CD64_NUMBER_ENTRIES_TOTAL = 7 +_CD64_DIRECTORY_SIZE = 8 +_CD64_OFFSET_START_CENTDIR = 9 + +_DD_SIGNATURE = 0x08074b50 + +_EXTRA_FIELD_STRUCT = struct.Struct(' 1: + raise BadZipFile("zipfiles that span multiple disks are not supported") + + # Assume no 'zip64 extensible data' + fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + return endrec + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) + if sig != stringEndArchive64: + return endrec + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig + endrec[_ECD_DISK_NUMBER] = disk_num + endrec[_ECD_DISK_START] = disk_dir + endrec[_ECD_ENTRIES_THIS_DISK] = dircount + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset + return endrec + + +def _EndRecData(fpin): + """Return data from the "End of Central Directory" record, or None. + + The data is a list of the nine items in the ZIP "End of central dir" + record followed by a tenth item, the file seek offset of this record.""" + + # Determine file size + fpin.seek(0, 2) + filesize = fpin.tell() + + # Check to see if this is ZIP file with no archive comment (the + # "end of central directory" structure should be the last item in the + # file if this is the case). + try: + fpin.seek(-sizeEndCentDir, 2) + except OSError: + return None + data = fpin.read() + if (len(data) == sizeEndCentDir and + data[0:4] == stringEndArchive and + data[-2:] == b"\000\000"): + # the signature is correct and there's no comment, unpack structure + endrec = struct.unpack(structEndArchive, data) + endrec=list(endrec) + + # Append a blank comment and record start offset + endrec.append(b"") + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, -sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" + # record signature. The comment is the last item in the ZIP file and may be + # up to 64K long. It is assumed that the "end of central directory" magic + # number does not appear in the comment. + maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0) + fpin.seek(maxCommentStart, 0) + data = fpin.read() + start = data.rfind(stringEndArchive) + if start >= 0: + # found the magic number; attempt to unpack and interpret + recData = data[start:start+sizeEndCentDir] + if len(recData) != sizeEndCentDir: + # Zip file is corrupted. + return None + endrec = list(struct.unpack(structEndArchive, recData)) + commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file + comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] + endrec.append(comment) + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, maxCommentStart + start - filesize, + endrec) + + # Unable to find a valid end of central directory structure + return None + + +class ZipInfo (object): + """Class with attributes describing each file in the ZIP archive.""" + + __slots__ = ( + 'orig_filename', + 'filename', + 'date_time', + 'compress_type', + '_compresslevel', + 'comment', + 'extra', + 'create_system', + 'create_version', + 'extract_version', + 'reserved', + 'flag_bits', + 'volume', + 'internal_attr', + 'external_attr', + 'header_offset', + 'CRC', + 'compress_size', + 'file_size', + '_raw_time', + '_end_offset', + ) + + def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): + self.orig_filename = filename # Original file name in archive + + # Terminate the file name at the first null byte. Null bytes in file + # names are used as tricks by viruses in archives. + null_byte = filename.find(chr(0)) + if null_byte >= 0: + filename = filename[0:null_byte] + # This is used to ensure paths in generated ZIP files always use + # forward slashes as the directory separator, as required by the + # ZIP format specification. + if os.sep != "/" and os.sep in filename: + filename = filename.replace(os.sep, "/") + + self.filename = filename # Normalized file name + self.date_time = date_time # year, month, day, hour, min, sec + + if date_time[0] < 1980: + raise ValueError('ZIP does not support timestamps before 1980') + + # Standard values: + self.compress_type = ZIP_STORED # Type of compression for the file + self._compresslevel = None # Level for the compressor + self.comment = b"" # Comment for each file + self.extra = b"" # ZIP extra data + if sys.platform == 'win32': + self.create_system = 0 # System which created ZIP archive + else: + # Assume everything else is unix-y + self.create_system = 3 # System which created ZIP archive + self.create_version = DEFAULT_VERSION # Version which created ZIP archive + self.extract_version = DEFAULT_VERSION # Version needed to extract archive + self.reserved = 0 # Must be zero + self.flag_bits = 0 # ZIP flag bits + self.volume = 0 # Volume number of file header + self.internal_attr = 0 # Internal attributes + self.external_attr = 0 # External file attributes + self.compress_size = 0 # Size of the compressed file + self.file_size = 0 # Size of the uncompressed file + self._end_offset = None # Start of the next local header or central directory + # Other attributes are set by class ZipFile: + # header_offset Byte offset to the file header + # CRC CRC-32 of the uncompressed file + + def __repr__(self): + result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] + if self.compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self.compress_type, + self.compress_type)) + hi = self.external_attr >> 16 + lo = self.external_attr & 0xFFFF + if hi: + result.append(' filemode=%r' % stat.filemode(hi)) + if lo: + result.append(' external_attr=%#x' % lo) + isdir = self.is_dir() + if not isdir or self.file_size: + result.append(' file_size=%r' % self.file_size) + if ((not isdir or self.compress_size) and + (self.compress_type != ZIP_STORED or + self.file_size != self.compress_size)): + result.append(' compress_size=%r' % self.compress_size) + result.append('>') + return ''.join(result) + + def FileHeader(self, zip64=None): + """Return the per-file header as a bytes object.""" + dt = self.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + if self.flag_bits & 0x08: + # Set these to zero because we write them after the file data + CRC = compress_size = file_size = 0 + else: + CRC = self.CRC + compress_size = self.compress_size + file_size = self.file_size + + extra = self.extra + + min_version = 0 + if zip64 is None: + zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT + if zip64: + fmt = ' ZIP64_LIMIT or compress_size > ZIP64_LIMIT: + if not zip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + # File is larger than what fits into a 4 byte integer, + # fall back to the ZIP64 extension + file_size = 0xffffffff + compress_size = 0xffffffff + min_version = ZIP64_VERSION + + if self.compress_type == ZIP_BZIP2: + min_version = max(BZIP2_VERSION, min_version) + elif self.compress_type == ZIP_LZMA: + min_version = max(LZMA_VERSION, min_version) + + self.extract_version = max(min_version, self.extract_version) + self.create_version = max(min_version, self.create_version) + filename, flag_bits = self._encodeFilenameFlags() + header = struct.pack(structFileHeader, stringFileHeader, + self.extract_version, self.reserved, flag_bits, + self.compress_type, dostime, dosdate, CRC, + compress_size, file_size, + len(filename), len(extra)) + return header + filename + extra + + def _encodeFilenameFlags(self): + try: + return self.filename.encode('ascii'), self.flag_bits + except UnicodeEncodeError: + return self.filename.encode('utf-8'), self.flag_bits | 0x800 + + def _decodeExtra(self): + # Try to decode the extra field. + extra = self.extra + unpack = struct.unpack + while len(extra) >= 4: + tp, ln = unpack(' len(extra): + raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) + if tp == 0x0001: + data = extra[4:ln+4] + # ZIP64 extension (large files and/or large archives) + try: + if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): + field = "File size" + self.file_size, = unpack(' 2107: + date_time = (2107, 12, 31, 23, 59, 59) + # Create ZipInfo instance to store file information + if arcname is None: + arcname = filename + arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) + while arcname[0] in (os.sep, os.altsep): + arcname = arcname[1:] + if isdir: + arcname += '/' + zinfo = cls(arcname, date_time) + zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes + if isdir: + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.file_size = st.st_size + + return zinfo + + def is_dir(self): + """Return True if this archive member is a directory.""" + return self.filename[-1] == '/' + + +# ZIP encryption uses the CRC32 one-byte primitive for scrambling some +# internal keys. We noticed that a direct implementation is faster than +# relying on binascii.crc32(). + +_crctable = None +def _gen_crc(crc): + for j in range(8): + if crc & 1: + crc = (crc >> 1) ^ 0xEDB88320 + else: + crc >>= 1 + return crc + +# ZIP supports a password-based form of encryption. Even though known +# plaintext attacks have been found against it, it is still useful +# to be able to get data out of such a file. +# +# Usage: +# zd = _ZipDecrypter(mypwd) +# plain_bytes = zd(cypher_bytes) + +def _ZipDecrypter(pwd): + key0 = 305419896 + key1 = 591751049 + key2 = 878082192 + + global _crctable + if _crctable is None: + _crctable = list(map(_gen_crc, range(256))) + crctable = _crctable + + def crc32(ch, crc): + """Compute the CRC32 primitive on one byte.""" + return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] + + def update_keys(c): + nonlocal key0, key1, key2 + key0 = crc32(c, key0) + key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF + key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF + key2 = crc32(key1 >> 24, key2) + + for p in pwd: + update_keys(p) + + def decrypter(data): + """Decrypt a bytes object.""" + result = bytearray() + append = result.append + for c in data: + k = key2 | 2 + c ^= ((k * (k^1)) >> 8) & 0xFF + update_keys(c) + append(c) + return bytes(result) + + return decrypter + + +class LZMACompressor: + + def __init__(self): + self._comp = None + + def _init(self): + props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) + self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ + lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) + ]) + return struct.pack('> 8) & 0xff + else: + # compare against the CRC otherwise + check_byte = (zipinfo.CRC >> 24) & 0xff + h = self._init_decrypter() + if h != check_byte: + raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) + + + def _init_decrypter(self): + self._decrypter = _ZipDecrypter(self._pwd) + # The first 12 bytes in the cypher stream is an encryption header + # used to strengthen the algorithm. The first 11 bytes are + # completely random, while the 12th contains the MSB of the CRC, + # or the MSB of the file time depending on the header type + # and is used to check the correctness of the password. + header = self._fileobj.read(12) + self._compress_left -= 12 + return self._decrypter(header)[11] + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if not self.closed: + result.append(' name=%r mode=%r' % (self.name, self.mode)) + if self._compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self._compress_type, + self._compress_type)) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def readline(self, limit=-1): + """Read and return a line from the stream. + + If limit is specified, at most limit bytes will be read. + """ + + if limit < 0: + # Shortcut common case - newline found in buffer. + i = self._readbuffer.find(b'\n', self._offset) + 1 + if i > 0: + line = self._readbuffer[self._offset: i] + self._offset = i + return line + + return io.BufferedIOBase.readline(self, limit) + + def peek(self, n=1): + """Returns buffered bytes without advancing the position.""" + if n > len(self._readbuffer) - self._offset: + chunk = self.read(n) + if len(chunk) > self._offset: + self._readbuffer = chunk + self._readbuffer[self._offset:] + self._offset = 0 + else: + self._offset -= len(chunk) + + # Return up to 512 bytes to reduce allocation overhead for tight loops. + return self._readbuffer[self._offset: self._offset + 512] + + def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def read(self, n=-1): + """Read and return up to n bytes. + If the argument is omitted, None, or negative, data is read and returned until EOF is reached. + """ + if self.closed: + raise ValueError("read from closed file.") + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + buf += self._read1(self.MAX_N) + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while n > 0 and not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + buf += data + n -= len(data) + return buf + + def _update_crc(self, newdata): + # Update the CRC using the given data. + if self._expected_crc is None: + # No need to compute the CRC if we don't have a reference value + return + self._running_crc = crc32(newdata, self._running_crc) + # Check the CRC if we're at the end of the file + if self._eof and self._running_crc != self._expected_crc: + raise BadZipFile("Bad CRC-32 for file %r" % self.name) + + def read1(self, n): + """Read up to n bytes with at most one read() system call.""" + + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + data = self._read1(self.MAX_N) + if data: + buf += data + break + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + if n > 0: + while not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + if data: + buf += data + break + return buf + + def _read1(self, n): + # Read up to n compressed bytes with at most one read() system call, + # decrypt and decompress them. + if self._eof or n <= 0: + return b'' + + # Read from file. + if self._compress_type == ZIP_DEFLATED: + ## Handle unconsumed data. + data = self._decompressor.unconsumed_tail + if n > len(data): + data += self._read2(n - len(data)) + else: + data = self._read2(n) + + if self._compress_type == ZIP_STORED: + self._eof = self._compress_left <= 0 + elif self._compress_type == ZIP_DEFLATED: + n = max(n, self.MIN_READ_SIZE) + data = self._decompressor.decompress(data, n) + self._eof = (self._decompressor.eof or + self._compress_left <= 0 and + not self._decompressor.unconsumed_tail) + if self._eof: + data += self._decompressor.flush() + else: + data = self._decompressor.decompress(data) + self._eof = self._decompressor.eof or self._compress_left <= 0 + + data = data[:self._left] + self._left -= len(data) + if self._left <= 0: + self._eof = True + self._update_crc(data) + return data + + def _read2(self, n): + if self._compress_left <= 0: + return b'' + + n = max(n, self.MIN_READ_SIZE) + n = min(n, self._compress_left) + + data = self._fileobj.read(n) + self._compress_left -= len(data) + if not data: + raise EOFError + + if self._decrypter is not None: + data = self._decrypter(data) + return data + + def close(self): + try: + if self._close_fileobj: + self._fileobj.close() + finally: + super().close() + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return self._seekable + + def seek(self, offset, whence=0): + if self.closed: + raise ValueError("seek on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + curr_pos = self.tell() + if whence == 0: # Seek from start of file + new_pos = offset + elif whence == 1: # Seek from current position + new_pos = curr_pos + offset + elif whence == 2: # Seek from EOF + new_pos = self._orig_file_size + offset + else: + raise ValueError("whence must be os.SEEK_SET (0), " + "os.SEEK_CUR (1), or os.SEEK_END (2)") + + if new_pos > self._orig_file_size: + new_pos = self._orig_file_size + + if new_pos < 0: + new_pos = 0 + + read_offset = new_pos - curr_pos + buff_offset = read_offset + self._offset + + if buff_offset >= 0 and buff_offset < len(self._readbuffer): + # Just move the _offset index if the new position is in the _readbuffer + self._offset = buff_offset + read_offset = 0 + elif read_offset < 0: + # Position is before the current position. Reset the ZipExtFile + self._fileobj.seek(self._orig_compress_start) + self._running_crc = self._orig_start_crc + self._compress_left = self._orig_compress_size + self._left = self._orig_file_size + self._readbuffer = b'' + self._offset = 0 + self._decompressor = _get_decompressor(self._compress_type) + self._eof = False + read_offset = new_pos + if self._decrypter is not None: + self._init_decrypter() + + while read_offset > 0: + read_len = min(self.MAX_SEEK_READ, read_offset) + self.read(read_len) + read_offset -= read_len + + return self.tell() + + def tell(self): + if self.closed: + raise ValueError("tell on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset + return filepos + + +class _ZipWriteFile(io.BufferedIOBase): + def __init__(self, zf, zinfo, zip64): + self._zinfo = zinfo + self._zip64 = zip64 + self._zipfile = zf + self._compressor = _get_compressor(zinfo.compress_type, + zinfo._compresslevel) + self._file_size = 0 + self._compress_size = 0 + self._crc = 0 + + @property + def _fileobj(self): + return self._zipfile.fp + + def writable(self): + return True + + def write(self, data): + if self.closed: + raise ValueError('I/O operation on closed file.') + + # Accept any data that supports the buffer protocol + if isinstance(data, (bytes, bytearray)): + nbytes = len(data) + else: + data = memoryview(data) + nbytes = data.nbytes + self._file_size += nbytes + + self._crc = crc32(data, self._crc) + if self._compressor: + data = self._compressor.compress(data) + self._compress_size += len(data) + self._fileobj.write(data) + return nbytes + + def close(self): + if self.closed: + return + try: + super().close() + # Flush any data from the compressor, and update header info + if self._compressor: + buf = self._compressor.flush() + self._compress_size += len(buf) + self._fileobj.write(buf) + self._zinfo.compress_size = self._compress_size + else: + self._zinfo.compress_size = self._file_size + self._zinfo.CRC = self._crc + self._zinfo.file_size = self._file_size + + # Write updated header info + if self._zinfo.flag_bits & 0x08: + # Write CRC and file sizes after the file data + fmt = ' ZIP64_LIMIT: + raise RuntimeError( + 'File size unexpectedly exceeded ZIP64 limit') + if self._compress_size > ZIP64_LIMIT: + raise RuntimeError( + 'Compressed size unexpectedly exceeded ZIP64 limit') + # Seek backwards and write file header (which will now include + # correct CRC and file sizes) + + # Preserve current position in file + self._zipfile.start_dir = self._fileobj.tell() + self._fileobj.seek(self._zinfo.header_offset) + self._fileobj.write(self._zinfo.FileHeader(self._zip64)) + self._fileobj.seek(self._zipfile.start_dir) + + # Successfully written: Add file to our caches + self._zipfile.filelist.append(self._zinfo) + self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo + finally: + self._zipfile._writing = False + + + +class ZipFile: + """ Class with methods to open, read, write, close, list zip files. + + z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True, + compresslevel=None) + + file: Either the path to the file, or a file-like object. + If it is a path, the file will be opened and closed by ZipFile. + mode: The mode can be either read 'r', write 'w', exclusive create 'x', + or append 'a'. + compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib), + ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma). + allowZip64: if True ZipFile will create files with ZIP64 extensions when + needed, otherwise it will raise an exception when this would + be necessary. + compresslevel: None (default for the given compression type) or an integer + specifying the level to pass to the compressor. + When using ZIP_STORED or ZIP_LZMA this keyword has no effect. + When using ZIP_DEFLATED integers 0 through 9 are accepted. + When using ZIP_BZIP2 integers 1 through 9 are accepted. + + """ + + fp = None # Set here since __del__ checks it + _windows_illegal_name_trans_table = None + + def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True, + compresslevel=None, *, strict_timestamps=True): + """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', + or append 'a'.""" + if mode not in ('r', 'w', 'x', 'a'): + raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'") + + _check_compression(compression) + + self._allowZip64 = allowZip64 + self._didModify = False + self.debug = 0 # Level of printing: 0 through 3 + self.NameToInfo = {} # Find file info given name + self.filelist = [] # List of ZipInfo instances for archive + self.compression = compression # Method of compression + self.compresslevel = compresslevel + self.mode = mode + self.pwd = None + self._comment = b'' + self._strict_timestamps = strict_timestamps + + # Check if we were passed a file-like object + if isinstance(file, os.PathLike): + file = os.fspath(file) + if isinstance(file, str): + # No, it's a filename + self._filePassed = 0 + self.filename = file + modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b', + 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'} + filemode = modeDict[mode] + while True: + try: + self.fp = io.open(file, filemode) + except OSError: + if filemode in modeDict: + filemode = modeDict[filemode] + continue + raise + break + else: + self._filePassed = 1 + self.fp = file + self.filename = getattr(file, 'name', None) + self._fileRefCnt = 1 + self._lock = threading.RLock() + self._seekable = True + self._writing = False + + try: + if mode == 'r': + self._RealGetContents() + elif mode in ('w', 'x'): + # set the modified flag so central directory gets written + # even if no files are added to the archive + self._didModify = True + try: + self.start_dir = self.fp.tell() + except (AttributeError, OSError): + self.fp = _Tellable(self.fp) + self.start_dir = 0 + self._seekable = False + else: + # Some file-like objects can provide tell() but not seek() + try: + self.fp.seek(self.start_dir) + except (AttributeError, OSError): + self._seekable = False + elif mode == 'a': + try: + # See if file is a zip file + self._RealGetContents() + # seek to start of directory and overwrite + self.fp.seek(self.start_dir) + except BadZipFile: + # file is not a zip file, just append + self.fp.seek(0, 2) + + # set the modified flag so central directory gets written + # even if no files are added to the archive + self._didModify = True + self.start_dir = self.fp.tell() + else: + raise ValueError("Mode must be 'r', 'w', 'x', or 'a'") + except: + fp = self.fp + self.fp = None + self._fpclose(fp) + raise + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if self.fp is not None: + if self._filePassed: + result.append(' file=%r' % self.fp) + elif self.filename is not None: + result.append(' filename=%r' % self.filename) + result.append(' mode=%r' % self.mode) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def _RealGetContents(self): + """Read in the table of contents for the ZIP file.""" + fp = self.fp + try: + endrec = _EndRecData(fp) + except OSError: + raise BadZipFile("File is not a zip file") + if not endrec: + raise BadZipFile("File is not a zip file") + if self.debug > 1: + print(endrec) + size_cd = endrec[_ECD_SIZE] # bytes in central directory + offset_cd = endrec[_ECD_OFFSET] # offset of central directory + self._comment = endrec[_ECD_COMMENT] # archive comment + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd + if endrec[_ECD_SIGNATURE] == stringEndArchive64: + # If Zip64 extension structures are present, account for them + concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) + + if self.debug > 2: + inferred = concat + offset_cd + print("given, inferred, offset", offset_cd, inferred, concat) + # self.start_dir: Position of start of central directory + self.start_dir = offset_cd + concat + if self.start_dir < 0: + raise BadZipFile("Bad offset for central directory") + fp.seek(self.start_dir, 0) + data = fp.read(size_cd) + fp = io.BytesIO(data) + total = 0 + while total < size_cd: + centdir = fp.read(sizeCentralDir) + if len(centdir) != sizeCentralDir: + raise BadZipFile("Truncated central directory") + centdir = struct.unpack(structCentralDir, centdir) + if centdir[_CD_SIGNATURE] != stringCentralDir: + raise BadZipFile("Bad magic number for central directory") + if self.debug > 2: + print(centdir) + filename = fp.read(centdir[_CD_FILENAME_LENGTH]) + flags = centdir[5] + if flags & 0x800: + # UTF-8 file names extension + filename = filename.decode('utf-8') + else: + # Historical ZIP filename encoding + filename = filename.decode('cp437') + # Create ZipInfo instance to store file information + x = ZipInfo(filename) + x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) + x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) + x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + (x.create_version, x.create_system, x.extract_version, x.reserved, + x.flag_bits, x.compress_type, t, d, + x.CRC, x.compress_size, x.file_size) = centdir[1:12] + if x.extract_version > MAX_EXTRACT_VERSION: + raise NotImplementedError("zip file version %.1f" % + (x.extract_version / 10)) + x.volume, x.internal_attr, x.external_attr = centdir[15:18] + # Convert date/time code to (year, month, day, hour, min, sec) + x._raw_time = t + x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, + t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) + + x._decodeExtra() + x.header_offset = x.header_offset + concat + self.filelist.append(x) + self.NameToInfo[x.filename] = x + + # update total bytes read from central directory + total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + + centdir[_CD_EXTRA_FIELD_LENGTH] + + centdir[_CD_COMMENT_LENGTH]) + + if self.debug > 2: + print("total", total) + + end_offset = self.start_dir + for zinfo in sorted(self.filelist, + key=lambda zinfo: zinfo.header_offset, + reverse=True): + zinfo._end_offset = end_offset + end_offset = zinfo.header_offset + + def namelist(self): + """Return a list of file names in the archive.""" + return [data.filename for data in self.filelist] + + def infolist(self): + """Return a list of class ZipInfo instances for files in the + archive.""" + return self.filelist + + def printdir(self, file=None): + """Print a table of contents for the zip file.""" + print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), + file=file) + for zinfo in self.filelist: + date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] + print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), + file=file) + + def testzip(self): + """Read all the files and check the CRC.""" + chunk_size = 2 ** 20 + for zinfo in self.filelist: + try: + # Read by chunks, to avoid an OverflowError or a + # MemoryError with very large embedded files. + with self.open(zinfo.filename, "r") as f: + while f.read(chunk_size): # Check CRC-32 + pass + except BadZipFile: + return zinfo.filename + + def getinfo(self, name): + """Return the instance of ZipInfo given 'name'.""" + info = self.NameToInfo.get(name) + if info is None: + raise KeyError( + 'There is no item named %r in the archive' % name) + + return info + + def setpassword(self, pwd): + """Set default password for encrypted files.""" + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd: + self.pwd = pwd + else: + self.pwd = None + + @property + def comment(self): + """The comment text associated with the ZIP file.""" + return self._comment + + @comment.setter + def comment(self, comment): + if not isinstance(comment, bytes): + raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) + # check for valid comment length + if len(comment) > ZIP_MAX_COMMENT: + import warnings + warnings.warn('Archive comment is too long; truncating to %d bytes' + % ZIP_MAX_COMMENT, stacklevel=2) + comment = comment[:ZIP_MAX_COMMENT] + self._comment = comment + self._didModify = True + + def read(self, name, pwd=None): + """Return file bytes for name.""" + with self.open(name, "r", pwd) as fp: + return fp.read() + + def open(self, name, mode="r", pwd=None, *, force_zip64=False): + """Return file-like object for 'name'. + + name is a string for the file name within the ZIP file, or a ZipInfo + object. + + mode should be 'r' to read a file already in the ZIP file, or 'w' to + write to a file newly added to the archive. + + pwd is the password to decrypt files (only used for reading). + + When writing, if the file size is not known in advance but may exceed + 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large + files. If the size is known in advance, it is best to pass a ZipInfo + instance for name, with zinfo.file_size set. + """ + if mode not in {"r", "w"}: + raise ValueError('open() requires mode "r" or "w"') + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd and (mode == "w"): + raise ValueError("pwd is only supported for reading files") + if not self.fp: + raise ValueError( + "Attempt to use ZIP archive that was already closed") + + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name + elif mode == 'w': + zinfo = ZipInfo(name) + zinfo.compress_type = self.compression + zinfo._compresslevel = self.compresslevel + else: + # Get info object for name + zinfo = self.getinfo(name) + + if mode == 'w': + return self._open_to_write(zinfo, force_zip64=force_zip64) + + if self._writing: + raise ValueError("Can't read from the ZIP file while there " + "is an open writing handle on it. " + "Close the writing handle before trying to read.") + + # Open for reading: + self._fileRefCnt += 1 + zef_file = _SharedFile(self.fp, zinfo.header_offset, + self._fpclose, self._lock, lambda: self._writing) + try: + # Skip the file header: + fheader = zef_file.read(sizeFileHeader) + if len(fheader) != sizeFileHeader: + raise BadZipFile("Truncated file header") + fheader = struct.unpack(structFileHeader, fheader) + if fheader[_FH_SIGNATURE] != stringFileHeader: + raise BadZipFile("Bad magic number for file header") + + fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) + if fheader[_FH_EXTRA_FIELD_LENGTH]: + zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) + + if zinfo.flag_bits & 0x20: + # Zip 2.7: compressed patched data + raise NotImplementedError("compressed patched data (flag bit 5)") + + if zinfo.flag_bits & 0x40: + # strong encryption + raise NotImplementedError("strong encryption (flag bit 6)") + + if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800: + # UTF-8 filename + fname_str = fname.decode("utf-8") + else: + fname_str = fname.decode("cp437") + + if fname_str != zinfo.orig_filename: + raise BadZipFile( + 'File name in directory %r and header %r differ.' + % (zinfo.orig_filename, fname)) + + if (zinfo._end_offset is not None and + zef_file.tell() + zinfo.compress_size > zinfo._end_offset): + raise BadZipFile(f"Overlapped entries: {zinfo.orig_filename!r} (possible zip bomb)") + + # check for encrypted flag & handle password + is_encrypted = zinfo.flag_bits & 0x1 + if is_encrypted: + if not pwd: + pwd = self.pwd + if not pwd: + raise RuntimeError("File %r is encrypted, password " + "required for extraction" % name) + else: + pwd = None + + return ZipExtFile(zef_file, mode, zinfo, pwd, True) + except: + zef_file.close() + raise + + def _open_to_write(self, zinfo, force_zip64=False): + if force_zip64 and not self._allowZip64: + raise ValueError( + "force_zip64 is True, but allowZip64 was False when opening " + "the ZIP file." + ) + if self._writing: + raise ValueError("Can't write to the ZIP file while there is " + "another write handle open on it. " + "Close the first handle before opening another.") + + # Size and CRC are overwritten with correct data after processing the file + zinfo.compress_size = 0 + zinfo.CRC = 0 + + zinfo.flag_bits = 0x00 + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + if not self._seekable: + zinfo.flag_bits |= 0x08 + + if not zinfo.external_attr: + zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- + + # Compressed size can be larger than uncompressed size + zip64 = self._allowZip64 and \ + (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT) + + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() + + self._writecheck(zinfo) + self._didModify = True + + self.fp.write(zinfo.FileHeader(zip64)) + + self._writing = True + return _ZipWriteFile(self, zinfo, zip64) + + def extract(self, member, path=None, pwd=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a ZipInfo object. You can + specify a different directory using `path'. + """ + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + return self._extract_member(member, path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all members from the archive to the current working + directory. `path' specifies a different directory to extract to. + `members' is optional and must be a subset of the list returned + by namelist(). + """ + if members is None: + members = self.namelist() + + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + for zipinfo in members: + self._extract_member(zipinfo, path, pwd) + + @classmethod + def _sanitize_windows_name(cls, arcname, pathsep): + """Replace bad characters and remove trailing dots from parts.""" + table = cls._windows_illegal_name_trans_table + if not table: + illegal = ':<>|"?*' + table = str.maketrans(illegal, '_' * len(illegal)) + cls._windows_illegal_name_trans_table = table + arcname = arcname.translate(table) + # remove trailing dots + arcname = (x.rstrip('.') for x in arcname.split(pathsep)) + # rejoin, removing empty parts. + arcname = pathsep.join(x for x in arcname if x) + return arcname + + def _extract_member(self, member, targetpath, pwd): + """Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + # build the destination pathname, replacing + # forward slashes to platform specific separators. + arcname = member.filename.replace('/', os.path.sep) + + if os.path.altsep: + arcname = arcname.replace(os.path.altsep, os.path.sep) + # interpret absolute pathname as relative, remove drive letter or + # UNC path, redundant separators, "." and ".." components. + arcname = os.path.splitdrive(arcname)[1] + invalid_path_parts = ('', os.path.curdir, os.path.pardir) + arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) + if x not in invalid_path_parts) + if os.path.sep == '\\': + # filter illegal characters on Windows + arcname = self._sanitize_windows_name(arcname, os.path.sep) + + targetpath = os.path.join(targetpath, arcname) + targetpath = os.path.normpath(targetpath) + + # Create all upper directories if necessary. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + os.makedirs(upperdirs) + + if member.is_dir(): + if not os.path.isdir(targetpath): + os.mkdir(targetpath) + return targetpath + + with self.open(member, pwd=pwd) as source, \ + open(targetpath, "wb") as target: + shutil.copyfileobj(source, target) + + return targetpath + + def _writecheck(self, zinfo): + """Check for errors before writing a file to the archive.""" + if zinfo.filename in self.NameToInfo: + import warnings + warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) + if self.mode not in ('w', 'x', 'a'): + raise ValueError("write() requires mode 'w', 'x', or 'a'") + if not self.fp: + raise ValueError( + "Attempt to write ZIP archive that was already closed") + _check_compression(zinfo.compress_type) + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + + def write(self, filename, arcname=None, + compress_type=None, compresslevel=None): + """Put the bytes from filename into the archive under the name + arcname.""" + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists" + ) + + zinfo = ZipInfo.from_file(filename, arcname, + strict_timestamps=self._strict_timestamps) + + if zinfo.is_dir(): + zinfo.compress_size = 0 + zinfo.CRC = 0 + else: + if compress_type is not None: + zinfo.compress_type = compress_type + else: + zinfo.compress_type = self.compression + + if compresslevel is not None: + zinfo._compresslevel = compresslevel + else: + zinfo._compresslevel = self.compresslevel + + if zinfo.is_dir(): + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header bytes + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + + self._writecheck(zinfo) + self._didModify = True + + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader(False)) + self.start_dir = self.fp.tell() + else: + with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: + shutil.copyfileobj(src, dest, 1024*8) + + def writestr(self, zinfo_or_arcname, data, + compress_type=None, compresslevel=None): + """Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.""" + if isinstance(data, str): + data = data.encode("utf-8") + if not isinstance(zinfo_or_arcname, ZipInfo): + zinfo = ZipInfo(filename=zinfo_or_arcname, + date_time=time.localtime(time.time())[:6]) + zinfo.compress_type = self.compression + zinfo._compresslevel = self.compresslevel + if zinfo.filename[-1] == '/': + zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.external_attr = 0o600 << 16 # ?rw------- + else: + zinfo = zinfo_or_arcname + + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists." + ) + + if compress_type is not None: + zinfo.compress_type = compress_type + + if compresslevel is not None: + zinfo._compresslevel = compresslevel + + zinfo.file_size = len(data) # Uncompressed size + with self._lock: + with self.open(zinfo, mode='w') as dest: + dest.write(data) + + def __del__(self): + """Call the "close()" method in case the user forgot.""" + self.close() + + def close(self): + """Close the file, and for mode 'w', 'x' and 'a' write the ending + records.""" + if self.fp is None: + return + + if self._writing: + raise ValueError("Can't close the ZIP file while there is " + "an open writing handle on it. " + "Close the writing handle before closing the zip.") + + try: + if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + self._write_end_record() + finally: + fp = self.fp + self.fp = None + self._fpclose(fp) + + def _write_end_record(self): + for zinfo in self.filelist: # write central directory + dt = zinfo.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + extra = [] + if zinfo.file_size > ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size + + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffff + else: + header_offset = zinfo.header_offset + + extra_data = zinfo.extra + min_version = 0 + if extra: + # Append a ZIP64 field to the extra's + extra_data = _strip_extra(extra_data, (1,)) + extra_data = struct.pack( + ' ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: + # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + 44, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self._comment)) + self.fp.write(endrec) + self.fp.write(self._comment) + if self.mode == "a": + self.fp.truncate() + self.fp.flush() + + def _fpclose(self, fp): + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + fp.close() + + +class PyZipFile(ZipFile): + """Class to create ZIP archives with Python library files and packages.""" + + def __init__(self, file, mode="r", compression=ZIP_STORED, + allowZip64=True, optimize=-1): + ZipFile.__init__(self, file, mode=mode, compression=compression, + allowZip64=allowZip64) + self._optimize = optimize + + def writepy(self, pathname, basename="", filterfunc=None): + """Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + """ + pathname = os.fspath(pathname) + if filterfunc and not filterfunc(pathname): + if self.debug: + label = 'path' if os.path.isdir(pathname) else 'file' + print('%s %r skipped by filterfunc' % (label, pathname)) + return + dir, name = os.path.split(pathname) + if os.path.isdir(pathname): + initname = os.path.join(pathname, "__init__.py") + if os.path.isfile(initname): + # This is a package directory, add it + if basename: + basename = "%s/%s" % (basename, name) + else: + basename = name + if self.debug: + print("Adding package in", pathname, "as", basename) + fname, arcname = self._get_codename(initname[0:-3], basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + dirlist = sorted(os.listdir(pathname)) + dirlist.remove("__init__.py") + # Add all *.py files and package subdirectories + for filename in dirlist: + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if os.path.isdir(path): + if os.path.isfile(os.path.join(path, "__init__.py")): + # This is a package directory, add it + self.writepy(path, basename, + filterfunc=filterfunc) # Recursive call + elif ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + # This is NOT a package directory, add its files at top level + if self.debug: + print("Adding files from directory", pathname) + for filename in sorted(os.listdir(pathname)): + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + if pathname[-3:] != ".py": + raise RuntimeError( + 'Files added with writepy() must end with ".py"') + fname, arcname = self._get_codename(pathname[0:-3], basename) + if self.debug: + print("Adding file", arcname) + self.write(fname, arcname) + + def _get_codename(self, pathname, basename): + """Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + """ + def _compile(file, optimize=-1): + import py_compile + if self.debug: + print("Compiling", file) + try: + py_compile.compile(file, doraise=True, optimize=optimize) + except py_compile.PyCompileError as err: + print(err.msg) + return False + return True + + file_py = pathname + ".py" + file_pyc = pathname + ".pyc" + pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') + pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) + pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) + if self._optimize == -1: + # legacy mode: use whatever file is present + if (os.path.isfile(file_pyc) and + os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): + # Use .pyc file. + arcname = fname = file_pyc + elif (os.path.isfile(pycache_opt0) and + os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt0 + arcname = file_pyc + elif (os.path.isfile(pycache_opt1) and + os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt1 + arcname = file_pyc + elif (os.path.isfile(pycache_opt2) and + os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt2 + arcname = file_pyc + else: + # Compile py into PEP 3147 pyc file. + if _compile(file_py): + if sys.flags.optimize == 0: + fname = pycache_opt0 + elif sys.flags.optimize == 1: + fname = pycache_opt1 + else: + fname = pycache_opt2 + arcname = file_pyc + else: + fname = arcname = file_py + else: + # new mode: use given optimization level + if self._optimize == 0: + fname = pycache_opt0 + arcname = file_pyc + else: + arcname = file_pyc + if self._optimize == 1: + fname = pycache_opt1 + elif self._optimize == 2: + fname = pycache_opt2 + else: + msg = "invalid value for 'optimize': {!r}".format(self._optimize) + raise ValueError(msg) + if not (os.path.isfile(fname) and + os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): + if not _compile(file_py, optimize=self._optimize): + fname = arcname = file_py + archivename = os.path.split(arcname)[1] + if basename: + archivename = "%s/%s" % (basename, archivename) + return (fname, archivename) + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path. + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] + """ + path = path.rstrip(posixpath.sep) + while path.rstrip(posixpath.sep): + yield path + path, tail = posixpath.split(path) + + +_dedupe = dict.fromkeys +"""Deduplicate an iterable in original order""" + + +def _difference(minuend, subtrahend): + """ + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + """ + return itertools.filterfalse(set(subtrahend).__contains__, minuend) + + +class CompleteDirs(ZipFile): + """ + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + """ + + @staticmethod + def _implied_dirs(names): + parents = itertools.chain.from_iterable(map(_parents, names)) + as_dirs = (p + posixpath.sep for p in parents) + return _dedupe(_difference(as_dirs, names)) + + def namelist(self): + names = super(CompleteDirs, self).namelist() + return names + list(self._implied_dirs(names)) + + def _name_set(self): + return set(self.namelist()) + + def resolve_dir(self, name): + """ + If the name represents a directory, return that name + as a directory (with the trailing slash). + """ + names = self._name_set() + dirname = name + '/' + dir_match = name not in names and dirname in names + return dirname if dir_match else name + + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return ZipInfo(filename=name) + + @classmethod + def make(cls, source): + """ + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + """ + if isinstance(source, CompleteDirs): + return source + + if not isinstance(source, ZipFile): + return cls(source) + + # Only allow for FastLookup when supplied zipfile is read-only + if 'r' not in source.mode: + cls = CompleteDirs + + source.__class__ = cls + return source + + +class FastLookup(CompleteDirs): + """ + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + """ + + def namelist(self): + with contextlib.suppress(AttributeError): + return self.__names + self.__names = super(FastLookup, self).namelist() + return self.__names + + def _name_set(self): + with contextlib.suppress(AttributeError): + return self.__lookup + self.__lookup = super(FastLookup, self)._name_set() + return self.__lookup + + +def _extract_text_encoding(encoding=None, *args, **kwargs): + # stacklevel=3 so that the caller of the caller see any warning. + return io.text_encoding(encoding, 3), args, kwargs + + +class Path: + """ + A pathlib-compatible interface for zip files. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'mem/abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> root = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = root.iterdir() + >>> a + Path('mem/abcde.zip', 'a.txt') + >>> b + Path('mem/abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('mem/abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text() + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> import os + >>> str(c).replace(os.sep, posixpath.sep) + 'mem/abcde.zip/b/c.txt' + + At the root, ``name``, ``filename``, and ``parent`` + resolve to the zipfile. Note these attributes are not + valid and will raise a ``ValueError`` if the zipfile + has no filename. + + >>> root.name + 'abcde.zip' + >>> str(root.filename).replace(os.sep, posixpath.sep) + 'mem/abcde.zip' + >>> str(root.parent) + 'mem' + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + """ + Construct a Path from a ZipFile or filename. + + Note: When the source is an existing ZipFile object, + its type (__class__) will be mutated to a + specialized type. If the caller wishes to retain the + original type, the caller should either create a + separate ZipFile object or pass a filename. + """ + self.root = FastLookup.make(root) + self.at = at + + def open(self, mode='r', *args, pwd=None, **kwargs): + """ + Open this entry as text or binary following the semantics + of ``pathlib.Path.open()`` by passing arguments through + to io.TextIOWrapper(). + """ + if self.is_dir(): + raise IsADirectoryError(self) + zip_mode = mode[0] + if not self.exists() and zip_mode == 'r': + raise FileNotFoundError(self) + stream = self.root.open(self.at, zip_mode, pwd=pwd) + if 'b' in mode: + if args or kwargs: + raise ValueError("encoding args invalid for binary operation") + return stream + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) + + @property + def name(self): + return pathlib.PurePosixPath(self.at).name or self.filename.name + + @property + def filename(self): + return pathlib.Path(self.root.filename).joinpath(self.at) + + def read_text(self, *args, **kwargs): + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: + return strm.read() + + def read_bytes(self): + with self.open('rb') as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return self.__class__(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return self.exists() and not self.is_dir() + + def exists(self): + return self.at in self.root._name_set() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self.root.namelist()) + return filter(self._is_child, subs) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, *other): + next = posixpath.join(self.at, *other) + return self._next(self.root.resolve_dir(next)) + + __truediv__ = joinpath + + @property + def parent(self): + if not self.at: + return self.filename.parent + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) + + +def main(args=None): + import argparse + + description = 'A simple command-line interface for zipfile module.' + parser = argparse.ArgumentParser(description=description) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a zipfile') + group.add_argument('-e', '--extract', nargs=2, + metavar=('', ''), + help='Extract zipfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create zipfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a zipfile is valid') + args = parser.parse_args(args) + + if args.test is not None: + src = args.test + with ZipFile(src, 'r') as zf: + badfile = zf.testzip() + if badfile: + print("The following enclosed file is corrupted: {!r}".format(badfile)) + print("Done testing") + + elif args.list is not None: + src = args.list + with ZipFile(src, 'r') as zf: + zf.printdir() + + elif args.extract is not None: + src, curdir = args.extract + with ZipFile(src, 'r') as zf: + zf.extractall(curdir) + + elif args.create is not None: + zip_name = args.create.pop(0) + files = args.create + + def addToZip(zf, path, zippath): + if os.path.isfile(path): + zf.write(path, zippath, ZIP_DEFLATED) + elif os.path.isdir(path): + if zippath: + zf.write(path, zippath) + for nm in sorted(os.listdir(path)): + addToZip(zf, + os.path.join(path, nm), os.path.join(zippath, nm)) + # else: ignore + + with ZipFile(zip_name, 'w') as zf: + for path in files: + zippath = os.path.basename(path) + if not zippath: + zippath = os.path.basename(os.path.dirname(path)) + if zippath in ('', os.curdir, os.pardir): + zippath = '' + addToZip(zf, path, zippath) + + +if __name__ == "__main__": + main() diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/ar_lb.msg b/evalkit_cambrian/lib/tcl8.6/msgs/ar_lb.msg new file mode 100644 index 0000000000000000000000000000000000000000..e62acd35092b7bc3f9ade6c5a34dae2baa12aa30 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/ar_lb.msg @@ -0,0 +1,39 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ar_LB DAYS_OF_WEEK_ABBREV [list \ + "\u0627\u0644\u0623\u062d\u062f"\ + "\u0627\u0644\u0627\u062b\u0646\u064a\u0646"\ + "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621"\ + "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621"\ + "\u0627\u0644\u062e\u0645\u064a\u0633"\ + "\u0627\u0644\u062c\u0645\u0639\u0629"\ + "\u0627\u0644\u0633\u0628\u062a"] + ::msgcat::mcset ar_LB MONTHS_ABBREV [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] + ::msgcat::mcset ar_LB MONTHS_FULL [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/ar_sy.msg b/evalkit_cambrian/lib/tcl8.6/msgs/ar_sy.msg new file mode 100644 index 0000000000000000000000000000000000000000..d5e1c873a1c2b7790d7d539cbb59a3d2e5ae02dd --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/ar_sy.msg @@ -0,0 +1,39 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ar_SY DAYS_OF_WEEK_ABBREV [list \ + "\u0627\u0644\u0623\u062d\u062f"\ + "\u0627\u0644\u0627\u062b\u0646\u064a\u0646"\ + "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621"\ + "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621"\ + "\u0627\u0644\u062e\u0645\u064a\u0633"\ + "\u0627\u0644\u062c\u0645\u0639\u0629"\ + "\u0627\u0644\u0633\u0628\u062a"] + ::msgcat::mcset ar_SY MONTHS_ABBREV [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] + ::msgcat::mcset ar_SY MONTHS_FULL [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631\u0627\u0646"\ + "\u062d\u0632\u064a\u0631"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/en_gb.msg b/evalkit_cambrian/lib/tcl8.6/msgs/en_gb.msg new file mode 100644 index 0000000000000000000000000000000000000000..5c61c43c6e180d379e44e3a10d45b64b782e8bfc --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/en_gb.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_GB DATE_FORMAT "%d/%m/%y" + ::msgcat::mcset en_GB TIME_FORMAT "%T" + ::msgcat::mcset en_GB TIME_FORMAT_12 "%T" + ::msgcat::mcset en_GB DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/en_in.msg b/evalkit_cambrian/lib/tcl8.6/msgs/en_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..a1f155d2c58101bc3f4551e8c14cc7ab6931a62f --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/en_in.msg @@ -0,0 +1,8 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_IN AM "AM" + ::msgcat::mcset en_IN PM "PM" + ::msgcat::mcset en_IN DATE_FORMAT "%d %B %Y" + ::msgcat::mcset en_IN TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset en_IN DATE_TIME_FORMAT "%d %B %Y %H:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/en_nz.msg b/evalkit_cambrian/lib/tcl8.6/msgs/en_nz.msg new file mode 100644 index 0000000000000000000000000000000000000000..b419017a91d9e3db7435c34a68bd54a224454bf9 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/en_nz.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_NZ DATE_FORMAT "%e/%m/%Y" + ::msgcat::mcset en_NZ TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset en_NZ TIME_FORMAT_12 "%I:%M:%S %P %z" + ::msgcat::mcset en_NZ DATE_TIME_FORMAT "%e/%m/%Y %H:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/en_za.msg b/evalkit_cambrian/lib/tcl8.6/msgs/en_za.msg new file mode 100644 index 0000000000000000000000000000000000000000..fe43797fda976a1328445a338fe7deab323d647f --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/en_za.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_ZA DATE_FORMAT "%Y/%m/%d" + ::msgcat::mcset en_ZA TIME_FORMAT_12 "%I:%M:%S" + ::msgcat::mcset en_ZA DATE_TIME_FORMAT "%Y/%m/%d %I:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/eo.msg b/evalkit_cambrian/lib/tcl8.6/msgs/eo.msg new file mode 100644 index 0000000000000000000000000000000000000000..1d2a24fece6b010f0020fb9b814ac9ab1ae07445 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/eo.msg @@ -0,0 +1,54 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset eo DAYS_OF_WEEK_ABBREV [list \ + "di"\ + "lu"\ + "ma"\ + "me"\ + "\u0135a"\ + "ve"\ + "sa"] + ::msgcat::mcset eo DAYS_OF_WEEK_FULL [list \ + "diman\u0109o"\ + "lundo"\ + "mardo"\ + "merkredo"\ + "\u0135a\u016ddo"\ + "vendredo"\ + "sabato"] + ::msgcat::mcset eo MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mar"\ + "apr"\ + "maj"\ + "jun"\ + "jul"\ + "a\u016dg"\ + "sep"\ + "okt"\ + "nov"\ + "dec"\ + ""] + ::msgcat::mcset eo MONTHS_FULL [list \ + "januaro"\ + "februaro"\ + "marto"\ + "aprilo"\ + "majo"\ + "junio"\ + "julio"\ + "a\u016dgusto"\ + "septembro"\ + "oktobro"\ + "novembro"\ + "decembro"\ + ""] + ::msgcat::mcset eo BCE "aK" + ::msgcat::mcset eo CE "pK" + ::msgcat::mcset eo AM "atm" + ::msgcat::mcset eo PM "ptm" + ::msgcat::mcset eo DATE_FORMAT "%Y-%b-%d" + ::msgcat::mcset eo TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset eo DATE_TIME_FORMAT "%Y-%b-%d %H:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/es_ar.msg b/evalkit_cambrian/lib/tcl8.6/msgs/es_ar.msg new file mode 100644 index 0000000000000000000000000000000000000000..7d350274ec27838924cf024bd097361386ef4719 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/es_ar.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_AR DATE_FORMAT "%d/%m/%Y" + ::msgcat::mcset es_AR TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset es_AR DATE_TIME_FORMAT "%d/%m/%Y %H:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/es_hn.msg b/evalkit_cambrian/lib/tcl8.6/msgs/es_hn.msg new file mode 100644 index 0000000000000000000000000000000000000000..a758ca2b6118c92384333f7beb6513939672e81e --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/es_hn.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_HN DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_HN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_HN DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/es_ni.msg b/evalkit_cambrian/lib/tcl8.6/msgs/es_ni.msg new file mode 100644 index 0000000000000000000000000000000000000000..7c394953a3d069ae78355c632ae28a25fbf111d2 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/es_ni.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_NI DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_NI TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_NI DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/fa.msg b/evalkit_cambrian/lib/tcl8.6/msgs/fa.msg new file mode 100644 index 0000000000000000000000000000000000000000..89b2f90894a4336f0f5cc15365b08e3a07539a20 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/fa.msg @@ -0,0 +1,47 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fa DAYS_OF_WEEK_ABBREV [list \ + "\u06cc\u2214"\ + "\u062f\u2214"\ + "\u0633\u2214"\ + "\u0686\u2214"\ + "\u067e\u2214"\ + "\u062c\u2214"\ + "\u0634\u2214"] + ::msgcat::mcset fa DAYS_OF_WEEK_FULL [list \ + "\u06cc\u06cc\u200c\u0634\u0646\u0628\u0647"\ + "\u062f\u0648\u0634\u0646\u0628\u0647"\ + "\u0633\u0647\u200c\u0634\u0646\u0628\u0647"\ + "\u0686\u0647\u0627\u0631\u0634\u0646\u0628\u0647"\ + "\u067e\u0646\u062c\u200c\u0634\u0646\u0628\u0647"\ + "\u062c\u0645\u0639\u0647"\ + "\u0634\u0646\u0628\u0647"] + ::msgcat::mcset fa MONTHS_ABBREV [list \ + "\u0698\u0627\u0646"\ + "\u0641\u0648\u0631"\ + "\u0645\u0627\u0631"\ + "\u0622\u0648\u0631"\ + "\u0645\u0640\u0647"\ + "\u0698\u0648\u0646"\ + "\u0698\u0648\u06cc"\ + "\u0627\u0648\u062a"\ + "\u0633\u067e\u062a"\ + "\u0627\u0643\u062a"\ + "\u0646\u0648\u0627"\ + "\u062f\u0633\u0627"\ + ""] + ::msgcat::mcset fa MONTHS_FULL [list \ + "\u0698\u0627\u0646\u0648\u06cc\u0647"\ + "\u0641\u0648\u0631\u0648\u06cc\u0647"\ + "\u0645\u0627\u0631\u0633"\ + "\u0622\u0648\u0631\u06cc\u0644"\ + "\u0645\u0647"\ + "\u0698\u0648\u0626\u0646"\ + "\u0698\u0648\u0626\u06cc\u0647"\ + "\u0627\u0648\u062a"\ + "\u0633\u067e\u062a\u0627\u0645\u0628\u0631"\ + "\u0627\u0643\u062a\u0628\u0631"\ + "\u0646\u0648\u0627\u0645\u0628\u0631"\ + "\u062f\u0633\u0627\u0645\u0628\u0631"\ + ""] +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/fo_fo.msg b/evalkit_cambrian/lib/tcl8.6/msgs/fo_fo.msg new file mode 100644 index 0000000000000000000000000000000000000000..2392b8e6734d6f811cadb5c4ed6aa62147829309 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/fo_fo.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fo_FO DATE_FORMAT "%d/%m-%Y" + ::msgcat::mcset fo_FO TIME_FORMAT "%T" + ::msgcat::mcset fo_FO TIME_FORMAT_12 "%T" + ::msgcat::mcset fo_FO DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/fr_ch.msg b/evalkit_cambrian/lib/tcl8.6/msgs/fr_ch.msg new file mode 100644 index 0000000000000000000000000000000000000000..7e2bac73d8023f0be9847d123901cbba0fe2ae53 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/fr_ch.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fr_CH DATE_FORMAT "%d. %m. %y" + ::msgcat::mcset fr_CH TIME_FORMAT "%T" + ::msgcat::mcset fr_CH TIME_FORMAT_12 "%T" + ::msgcat::mcset fr_CH DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/gv_gb.msg b/evalkit_cambrian/lib/tcl8.6/msgs/gv_gb.msg new file mode 100644 index 0000000000000000000000000000000000000000..5e96e6f3cd699726995bf54773b5c938f2215912 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/gv_gb.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset gv_GB DATE_FORMAT "%d %B %Y" + ::msgcat::mcset gv_GB TIME_FORMAT_12 "%l:%M:%S %P" + ::msgcat::mcset gv_GB DATE_TIME_FORMAT "%d %B %Y %l:%M:%S %P %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/ms.msg b/evalkit_cambrian/lib/tcl8.6/msgs/ms.msg new file mode 100644 index 0000000000000000000000000000000000000000..e954431b7b7bfc876c3a1c53d22d3843546dee2d --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/ms.msg @@ -0,0 +1,47 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ms DAYS_OF_WEEK_ABBREV [list \ + "Aha"\ + "Isn"\ + "Sei"\ + "Rab"\ + "Kha"\ + "Jum"\ + "Sab"] + ::msgcat::mcset ms DAYS_OF_WEEK_FULL [list \ + "Ahad"\ + "Isnin"\ + "Selasa"\ + "Rahu"\ + "Khamis"\ + "Jumaat"\ + "Sabtu"] + ::msgcat::mcset ms MONTHS_ABBREV [list \ + "Jan"\ + "Feb"\ + "Mac"\ + "Apr"\ + "Mei"\ + "Jun"\ + "Jul"\ + "Ogos"\ + "Sep"\ + "Okt"\ + "Nov"\ + "Dis"\ + ""] + ::msgcat::mcset ms MONTHS_FULL [list \ + "Januari"\ + "Februari"\ + "Mac"\ + "April"\ + "Mei"\ + "Jun"\ + "Julai"\ + "Ogos"\ + "September"\ + "Oktober"\ + "November"\ + "Disember"\ + ""] +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/sl.msg b/evalkit_cambrian/lib/tcl8.6/msgs/sl.msg new file mode 100644 index 0000000000000000000000000000000000000000..42bc5097a77ceb8028b1b766a8a2df35305bcf55 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/sl.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset sl DAYS_OF_WEEK_ABBREV [list \ + "Ned"\ + "Pon"\ + "Tor"\ + "Sre"\ + "\u010cet"\ + "Pet"\ + "Sob"] + ::msgcat::mcset sl DAYS_OF_WEEK_FULL [list \ + "Nedelja"\ + "Ponedeljek"\ + "Torek"\ + "Sreda"\ + "\u010cetrtek"\ + "Petek"\ + "Sobota"] + ::msgcat::mcset sl MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mar"\ + "apr"\ + "maj"\ + "jun"\ + "jul"\ + "avg"\ + "sep"\ + "okt"\ + "nov"\ + "dec"\ + ""] + ::msgcat::mcset sl MONTHS_FULL [list \ + "januar"\ + "februar"\ + "marec"\ + "april"\ + "maj"\ + "junij"\ + "julij"\ + "avgust"\ + "september"\ + "oktober"\ + "november"\ + "december"\ + ""] + ::msgcat::mcset sl BCE "pr.n.\u0161." + ::msgcat::mcset sl CE "po Kr." + ::msgcat::mcset sl DATE_FORMAT "%Y.%m.%e" + ::msgcat::mcset sl TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset sl DATE_TIME_FORMAT "%Y.%m.%e %k:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/sw.msg b/evalkit_cambrian/lib/tcl8.6/msgs/sw.msg new file mode 100644 index 0000000000000000000000000000000000000000..b888b43df7009891dd34872cdce6002b961287a9 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/sw.msg @@ -0,0 +1,49 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset sw DAYS_OF_WEEK_ABBREV [list \ + "Jpi"\ + "Jtt"\ + "Jnn"\ + "Jtn"\ + "Alh"\ + "Iju"\ + "Jmo"] + ::msgcat::mcset sw DAYS_OF_WEEK_FULL [list \ + "Jumapili"\ + "Jumatatu"\ + "Jumanne"\ + "Jumatano"\ + "Alhamisi"\ + "Ijumaa"\ + "Jumamosi"] + ::msgcat::mcset sw MONTHS_ABBREV [list \ + "Jan"\ + "Feb"\ + "Mar"\ + "Apr"\ + "Mei"\ + "Jun"\ + "Jul"\ + "Ago"\ + "Sep"\ + "Okt"\ + "Nov"\ + "Des"\ + ""] + ::msgcat::mcset sw MONTHS_FULL [list \ + "Januari"\ + "Februari"\ + "Machi"\ + "Aprili"\ + "Mei"\ + "Juni"\ + "Julai"\ + "Agosti"\ + "Septemba"\ + "Oktoba"\ + "Novemba"\ + "Desemba"\ + ""] + ::msgcat::mcset sw BCE "KK" + ::msgcat::mcset sw CE "BK" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/ta_in.msg b/evalkit_cambrian/lib/tcl8.6/msgs/ta_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..24590ac410382ab4a7a3a93cbfb9d3704b518906 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/ta_in.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ta_IN DATE_FORMAT "%d %M %Y" + ::msgcat::mcset ta_IN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset ta_IN DATE_TIME_FORMAT "%d %M %Y %I:%M:%S %P %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/te_in.msg b/evalkit_cambrian/lib/tcl8.6/msgs/te_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..61638b5a2513abfd6626bc9429fb1f183e689676 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/te_in.msg @@ -0,0 +1,8 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset te_IN AM "\u0c2a\u0c42\u0c30\u0c4d\u0c35\u0c3e\u0c39\u0c4d\u0c28" + ::msgcat::mcset te_IN PM "\u0c05\u0c2a\u0c30\u0c3e\u0c39\u0c4d\u0c28" + ::msgcat::mcset te_IN DATE_FORMAT "%d/%m/%Y" + ::msgcat::mcset te_IN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset te_IN DATE_TIME_FORMAT "%d/%m/%Y %I:%M:%S %P %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/vi.msg b/evalkit_cambrian/lib/tcl8.6/msgs/vi.msg new file mode 100644 index 0000000000000000000000000000000000000000..c98b2a6a7d9ca24c3cb18b9f1e2eca4aa9ad82a1 --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/vi.msg @@ -0,0 +1,50 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset vi DAYS_OF_WEEK_ABBREV [list \ + "Th 2"\ + "Th 3"\ + "Th 4"\ + "Th 5"\ + "Th 6"\ + "Th 7"\ + "CN"] + ::msgcat::mcset vi DAYS_OF_WEEK_FULL [list \ + "Th\u01b0\u0301 hai"\ + "Th\u01b0\u0301 ba"\ + "Th\u01b0\u0301 t\u01b0"\ + "Th\u01b0\u0301 n\u0103m"\ + "Th\u01b0\u0301 s\u00e1u"\ + "Th\u01b0\u0301 ba\u0309y"\ + "Chu\u0309 nh\u00e2\u0323t"] + ::msgcat::mcset vi MONTHS_ABBREV [list \ + "Thg 1"\ + "Thg 2"\ + "Thg 3"\ + "Thg 4"\ + "Thg 5"\ + "Thg 6"\ + "Thg 7"\ + "Thg 8"\ + "Thg 9"\ + "Thg 10"\ + "Thg 11"\ + "Thg 12"\ + ""] + ::msgcat::mcset vi MONTHS_FULL [list \ + "Th\u00e1ng m\u00f4\u0323t"\ + "Th\u00e1ng hai"\ + "Th\u00e1ng ba"\ + "Th\u00e1ng t\u01b0"\ + "Th\u00e1ng n\u0103m"\ + "Th\u00e1ng s\u00e1u"\ + "Th\u00e1ng ba\u0309y"\ + "Th\u00e1ng t\u00e1m"\ + "Th\u00e1ng ch\u00edn"\ + "Th\u00e1ng m\u01b0\u01a1\u0300i"\ + "Th\u00e1ng m\u01b0\u01a1\u0300i m\u00f4\u0323t"\ + "Th\u00e1ng m\u01b0\u01a1\u0300i hai"\ + ""] + ::msgcat::mcset vi DATE_FORMAT "%d %b %Y" + ::msgcat::mcset vi TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset vi DATE_TIME_FORMAT "%d %b %Y %H:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/zh.msg b/evalkit_cambrian/lib/tcl8.6/msgs/zh.msg new file mode 100644 index 0000000000000000000000000000000000000000..b799a3261aa1b34120bc75b20b1441e07d41d79b --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/zh.msg @@ -0,0 +1,55 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset zh DAYS_OF_WEEK_ABBREV [list \ + "\u661f\u671f\u65e5"\ + "\u661f\u671f\u4e00"\ + "\u661f\u671f\u4e8c"\ + "\u661f\u671f\u4e09"\ + "\u661f\u671f\u56db"\ + "\u661f\u671f\u4e94"\ + "\u661f\u671f\u516d"] + ::msgcat::mcset zh DAYS_OF_WEEK_FULL [list \ + "\u661f\u671f\u65e5"\ + "\u661f\u671f\u4e00"\ + "\u661f\u671f\u4e8c"\ + "\u661f\u671f\u4e09"\ + "\u661f\u671f\u56db"\ + "\u661f\u671f\u4e94"\ + "\u661f\u671f\u516d"] + ::msgcat::mcset zh MONTHS_ABBREV [list \ + "\u4e00\u6708"\ + "\u4e8c\u6708"\ + "\u4e09\u6708"\ + "\u56db\u6708"\ + "\u4e94\u6708"\ + "\u516d\u6708"\ + "\u4e03\u6708"\ + "\u516b\u6708"\ + "\u4e5d\u6708"\ + "\u5341\u6708"\ + "\u5341\u4e00\u6708"\ + "\u5341\u4e8c\u6708"\ + ""] + ::msgcat::mcset zh MONTHS_FULL [list \ + "\u4e00\u6708"\ + "\u4e8c\u6708"\ + "\u4e09\u6708"\ + "\u56db\u6708"\ + "\u4e94\u6708"\ + "\u516d\u6708"\ + "\u4e03\u6708"\ + "\u516b\u6708"\ + "\u4e5d\u6708"\ + "\u5341\u6708"\ + "\u5341\u4e00\u6708"\ + "\u5341\u4e8c\u6708"\ + ""] + ::msgcat::mcset zh BCE "\u516c\u5143\u524d" + ::msgcat::mcset zh CE "\u516c\u5143" + ::msgcat::mcset zh AM "\u4e0a\u5348" + ::msgcat::mcset zh PM "\u4e0b\u5348" + ::msgcat::mcset zh LOCALE_NUMERALS "\u3007 \u4e00 \u4e8c \u4e09 \u56db \u4e94 \u516d \u4e03 \u516b \u4e5d \u5341 \u5341\u4e00 \u5341\u4e8c \u5341\u4e09 \u5341\u56db \u5341\u4e94 \u5341\u516d \u5341\u4e03 \u5341\u516b \u5341\u4e5d \u4e8c\u5341 \u5eff\u4e00 \u5eff\u4e8c \u5eff\u4e09 \u5eff\u56db \u5eff\u4e94 \u5eff\u516d \u5eff\u4e03 \u5eff\u516b \u5eff\u4e5d \u4e09\u5341 \u5345\u4e00 \u5345\u4e8c \u5345\u4e09 \u5345\u56db \u5345\u4e94 \u5345\u516d \u5345\u4e03 \u5345\u516b \u5345\u4e5d \u56db\u5341 \u56db\u5341\u4e00 \u56db\u5341\u4e8c \u56db\u5341\u4e09 \u56db\u5341\u56db \u56db\u5341\u4e94 \u56db\u5341\u516d \u56db\u5341\u4e03 \u56db\u5341\u516b \u56db\u5341\u4e5d \u4e94\u5341 \u4e94\u5341\u4e00 \u4e94\u5341\u4e8c \u4e94\u5341\u4e09 \u4e94\u5341\u56db \u4e94\u5341\u4e94 \u4e94\u5341\u516d \u4e94\u5341\u4e03 \u4e94\u5341\u516b \u4e94\u5341\u4e5d \u516d\u5341 \u516d\u5341\u4e00 \u516d\u5341\u4e8c \u516d\u5341\u4e09 \u516d\u5341\u56db \u516d\u5341\u4e94 \u516d\u5341\u516d \u516d\u5341\u4e03 \u516d\u5341\u516b \u516d\u5341\u4e5d \u4e03\u5341 \u4e03\u5341\u4e00 \u4e03\u5341\u4e8c \u4e03\u5341\u4e09 \u4e03\u5341\u56db \u4e03\u5341\u4e94 \u4e03\u5341\u516d \u4e03\u5341\u4e03 \u4e03\u5341\u516b \u4e03\u5341\u4e5d \u516b\u5341 \u516b\u5341\u4e00 \u516b\u5341\u4e8c \u516b\u5341\u4e09 \u516b\u5341\u56db \u516b\u5341\u4e94 \u516b\u5341\u516d \u516b\u5341\u4e03 \u516b\u5341\u516b \u516b\u5341\u4e5d \u4e5d\u5341 \u4e5d\u5341\u4e00 \u4e5d\u5341\u4e8c \u4e5d\u5341\u4e09 \u4e5d\u5341\u56db \u4e5d\u5341\u4e94 \u4e5d\u5341\u516d \u4e5d\u5341\u4e03 \u4e5d\u5341\u516b \u4e5d\u5341\u4e5d" + ::msgcat::mcset zh LOCALE_DATE_FORMAT "\u516c\u5143%Y\u5e74%B%Od\u65e5" + ::msgcat::mcset zh LOCALE_TIME_FORMAT "%OH\u65f6%OM\u5206%OS\u79d2" + ::msgcat::mcset zh LOCALE_DATE_TIME_FORMAT "%A %Y\u5e74%B%Od\u65e5%OH\u65f6%OM\u5206%OS\u79d2 %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/zh_sg.msg b/evalkit_cambrian/lib/tcl8.6/msgs/zh_sg.msg new file mode 100644 index 0000000000000000000000000000000000000000..a2f3e39d8d21ea13661da244af688157bd46773b --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/zh_sg.msg @@ -0,0 +1,8 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset zh_SG AM "\u4e0a\u5348" + ::msgcat::mcset zh_SG PM "\u4e2d\u5348" + ::msgcat::mcset zh_SG DATE_FORMAT "%d %B %Y" + ::msgcat::mcset zh_SG TIME_FORMAT_12 "%P %I:%M:%S" + ::msgcat::mcset zh_SG DATE_TIME_FORMAT "%d %B %Y %P %I:%M:%S %z" +} diff --git a/evalkit_cambrian/lib/tcl8.6/msgs/zh_tw.msg b/evalkit_cambrian/lib/tcl8.6/msgs/zh_tw.msg new file mode 100644 index 0000000000000000000000000000000000000000..e0796b11d5fc283a20e26ebff97a94269ff7e36f --- /dev/null +++ b/evalkit_cambrian/lib/tcl8.6/msgs/zh_tw.msg @@ -0,0 +1,8 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset zh_TW BCE "\u6c11\u570b\u524d" + ::msgcat::mcset zh_TW CE "\u6c11\u570b" + ::msgcat::mcset zh_TW DATE_FORMAT "%Y/%m/%e" + ::msgcat::mcset zh_TW TIME_FORMAT_12 "%P %I:%M:%S" + ::msgcat::mcset zh_TW DATE_TIME_FORMAT "%Y/%m/%e %P %I:%M:%S %z" +}