diff --git a/evalkit_cambrian/lib/python3.10/_aix_support.py b/evalkit_cambrian/lib/python3.10/_aix_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..45504934063df87f8e77e790bdd8ae40c4d4c2aa
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_aix_support.py
@@ -0,0 +1,89 @@
+"""Shared AIX support functions."""
+
+import sys
+import sysconfig
+
+try:
+ import subprocess
+except ImportError: # pragma: no cover
+ # _aix_support is used in distutils by setup.py to build C extensions,
+ # before subprocess dependencies like _posixsubprocess are available.
+ import _bootsubprocess as subprocess
+
+
+def _aix_tag(vrtl, bd):
+ # type: (List[int], int) -> str
+ # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default)
+ _sz = 32 if sys.maxsize == (2**31-1) else 64
+ # vrtl[version, release, technology_level]
+ return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], bd, _sz)
+
+
+# extract version, release and technology level from a VRMF string
+def _aix_vrtl(vrmf):
+ # type: (str) -> List[int]
+ v, r, tl = vrmf.split(".")[:3]
+ return [int(v[-1]), int(r), int(tl)]
+
+
+def _aix_bosmp64():
+ # type: () -> Tuple[str, int]
+ """
+ Return a Tuple[str, int] e.g., ['7.1.4.34', 1806]
+ The fileset bos.mp64 is the AIX kernel. It's VRMF and builddate
+ reflect the current ABI levels of the runtime environment.
+ """
+ # We expect all AIX systems to have lslpp installed in this location
+ out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.mp64"])
+ out = out.decode("utf-8")
+ out = out.strip().split(":") # type: ignore
+ # Use str() and int() to help mypy see types
+ return (str(out[2]), int(out[-1]))
+
+
+def aix_platform():
+ # type: () -> str
+ """
+ AIX filesets are identified by four decimal values: V.R.M.F.
+ V (version) and R (release) can be retreived using ``uname``
+ Since 2007, starting with AIX 5.3 TL7, the M value has been
+ included with the fileset bos.mp64 and represents the Technology
+ Level (TL) of AIX. The F (Fix) value also increases, but is not
+ relevant for comparing releases and binary compatibility.
+ For binary compatibility the so-called builddate is needed.
+ Again, the builddate of an AIX release is associated with bos.mp64.
+ AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\
+ support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html
+
+ For pep425 purposes the AIX platform tag becomes:
+ "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize)
+ e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit
+ and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit
+ """
+ vrmf, bd = _aix_bosmp64()
+ return _aix_tag(_aix_vrtl(vrmf), bd)
+
+
+# extract vrtl from the BUILD_GNU_TYPE as an int
+def _aix_bgt():
+ # type: () -> List[int]
+ gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE")
+ if not gnu_type:
+ raise ValueError("BUILD_GNU_TYPE is not defined")
+ return _aix_vrtl(vrmf=gnu_type)
+
+
+def aix_buildtag():
+ # type: () -> str
+ """
+ Return the platform_tag of the system Python was built on.
+ """
+ # AIX_BUILDDATE is defined by configure with:
+ # lslpp -Lcq bos.mp64 | awk -F: '{ print $NF }'
+ build_date = sysconfig.get_config_var("AIX_BUILDDATE")
+ try:
+ build_date = int(build_date)
+ except (ValueError, TypeError):
+ raise ValueError(f"AIX_BUILDDATE is not defined or invalid: "
+ f"{build_date!r}")
+ return _aix_tag(_aix_bgt(), build_date)
diff --git a/evalkit_cambrian/lib/python3.10/_compat_pickle.py b/evalkit_cambrian/lib/python3.10/_compat_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..f68496ae639f5f880ae5f4ca0a220a29d2e354be
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_compat_pickle.py
@@ -0,0 +1,251 @@
+# This module is used to map the old Python 2 names to the new names used in
+# Python 3 for the pickle module. This needed to make pickle streams
+# generated with Python 2 loadable by Python 3.
+
+# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
+# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
+# Thus, this could cause the module to be imported recursively.
+IMPORT_MAPPING = {
+ '__builtin__' : 'builtins',
+ 'copy_reg': 'copyreg',
+ 'Queue': 'queue',
+ 'SocketServer': 'socketserver',
+ 'ConfigParser': 'configparser',
+ 'repr': 'reprlib',
+ 'tkFileDialog': 'tkinter.filedialog',
+ 'tkSimpleDialog': 'tkinter.simpledialog',
+ 'tkColorChooser': 'tkinter.colorchooser',
+ 'tkCommonDialog': 'tkinter.commondialog',
+ 'Dialog': 'tkinter.dialog',
+ 'Tkdnd': 'tkinter.dnd',
+ 'tkFont': 'tkinter.font',
+ 'tkMessageBox': 'tkinter.messagebox',
+ 'ScrolledText': 'tkinter.scrolledtext',
+ 'Tkconstants': 'tkinter.constants',
+ 'Tix': 'tkinter.tix',
+ 'ttk': 'tkinter.ttk',
+ 'Tkinter': 'tkinter',
+ 'markupbase': '_markupbase',
+ '_winreg': 'winreg',
+ 'thread': '_thread',
+ 'dummy_thread': '_dummy_thread',
+ 'dbhash': 'dbm.bsd',
+ 'dumbdbm': 'dbm.dumb',
+ 'dbm': 'dbm.ndbm',
+ 'gdbm': 'dbm.gnu',
+ 'xmlrpclib': 'xmlrpc.client',
+ 'SimpleXMLRPCServer': 'xmlrpc.server',
+ 'httplib': 'http.client',
+ 'htmlentitydefs' : 'html.entities',
+ 'HTMLParser' : 'html.parser',
+ 'Cookie': 'http.cookies',
+ 'cookielib': 'http.cookiejar',
+ 'BaseHTTPServer': 'http.server',
+ 'test.test_support': 'test.support',
+ 'commands': 'subprocess',
+ 'urlparse' : 'urllib.parse',
+ 'robotparser' : 'urllib.robotparser',
+ 'urllib2': 'urllib.request',
+ 'anydbm': 'dbm',
+ '_abcoll' : 'collections.abc',
+}
+
+
+# This contains rename rules that are easy to handle. We ignore the more
+# complex stuff (e.g. mapping the names in the urllib and types modules).
+# These rules should be run before import names are fixed.
+NAME_MAPPING = {
+ ('__builtin__', 'xrange'): ('builtins', 'range'),
+ ('__builtin__', 'reduce'): ('functools', 'reduce'),
+ ('__builtin__', 'intern'): ('sys', 'intern'),
+ ('__builtin__', 'unichr'): ('builtins', 'chr'),
+ ('__builtin__', 'unicode'): ('builtins', 'str'),
+ ('__builtin__', 'long'): ('builtins', 'int'),
+ ('itertools', 'izip'): ('builtins', 'zip'),
+ ('itertools', 'imap'): ('builtins', 'map'),
+ ('itertools', 'ifilter'): ('builtins', 'filter'),
+ ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
+ ('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
+ ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
+ ('UserList', 'UserList'): ('collections', 'UserList'),
+ ('UserString', 'UserString'): ('collections', 'UserString'),
+ ('whichdb', 'whichdb'): ('dbm', 'whichdb'),
+ ('_socket', 'fromfd'): ('socket', 'fromfd'),
+ ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
+ ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
+ ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
+ ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
+ ('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
+ ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
+ ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
+ ('urllib', 'quote'): ('urllib.parse', 'quote'),
+ ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
+ ('urllib', 'unquote'): ('urllib.parse', 'unquote'),
+ ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
+ ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
+ ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
+ ('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
+ ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
+ ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
+ ('urllib2', 'URLError'): ('urllib.error', 'URLError'),
+}
+
+PYTHON2_EXCEPTIONS = (
+ "ArithmeticError",
+ "AssertionError",
+ "AttributeError",
+ "BaseException",
+ "BufferError",
+ "BytesWarning",
+ "DeprecationWarning",
+ "EOFError",
+ "EnvironmentError",
+ "Exception",
+ "FloatingPointError",
+ "FutureWarning",
+ "GeneratorExit",
+ "IOError",
+ "ImportError",
+ "ImportWarning",
+ "IndentationError",
+ "IndexError",
+ "KeyError",
+ "KeyboardInterrupt",
+ "LookupError",
+ "MemoryError",
+ "NameError",
+ "NotImplementedError",
+ "OSError",
+ "OverflowError",
+ "PendingDeprecationWarning",
+ "ReferenceError",
+ "RuntimeError",
+ "RuntimeWarning",
+ # StandardError is gone in Python 3, so we map it to Exception
+ "StopIteration",
+ "SyntaxError",
+ "SyntaxWarning",
+ "SystemError",
+ "SystemExit",
+ "TabError",
+ "TypeError",
+ "UnboundLocalError",
+ "UnicodeDecodeError",
+ "UnicodeEncodeError",
+ "UnicodeError",
+ "UnicodeTranslateError",
+ "UnicodeWarning",
+ "UserWarning",
+ "ValueError",
+ "Warning",
+ "ZeroDivisionError",
+)
+
+try:
+ WindowsError
+except NameError:
+ pass
+else:
+ PYTHON2_EXCEPTIONS += ("WindowsError",)
+
+for excname in PYTHON2_EXCEPTIONS:
+ NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
+
+MULTIPROCESSING_EXCEPTIONS = (
+ 'AuthenticationError',
+ 'BufferTooShort',
+ 'ProcessError',
+ 'TimeoutError',
+)
+
+for excname in MULTIPROCESSING_EXCEPTIONS:
+ NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
+
+# Same, but for 3.x to 2.x
+REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
+assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
+REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
+assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
+
+# Non-mutual mappings.
+
+IMPORT_MAPPING.update({
+ 'cPickle': 'pickle',
+ '_elementtree': 'xml.etree.ElementTree',
+ 'FileDialog': 'tkinter.filedialog',
+ 'SimpleDialog': 'tkinter.simpledialog',
+ 'DocXMLRPCServer': 'xmlrpc.server',
+ 'SimpleHTTPServer': 'http.server',
+ 'CGIHTTPServer': 'http.server',
+ # For compatibility with broken pickles saved in old Python 3 versions
+ 'UserDict': 'collections',
+ 'UserList': 'collections',
+ 'UserString': 'collections',
+ 'whichdb': 'dbm',
+ 'StringIO': 'io',
+ 'cStringIO': 'io',
+})
+
+REVERSE_IMPORT_MAPPING.update({
+ '_bz2': 'bz2',
+ '_dbm': 'dbm',
+ '_functools': 'functools',
+ '_gdbm': 'gdbm',
+ '_pickle': 'pickle',
+})
+
+NAME_MAPPING.update({
+ ('__builtin__', 'basestring'): ('builtins', 'str'),
+ ('exceptions', 'StandardError'): ('builtins', 'Exception'),
+ ('UserDict', 'UserDict'): ('collections', 'UserDict'),
+ ('socket', '_socketobject'): ('socket', 'SocketType'),
+})
+
+REVERSE_NAME_MAPPING.update({
+ ('_functools', 'reduce'): ('__builtin__', 'reduce'),
+ ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
+ ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
+ ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
+ ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
+ ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
+ ('xmlrpc.server', 'XMLRPCDocGenerator'):
+ ('DocXMLRPCServer', 'XMLRPCDocGenerator'),
+ ('xmlrpc.server', 'DocXMLRPCRequestHandler'):
+ ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
+ ('xmlrpc.server', 'DocXMLRPCServer'):
+ ('DocXMLRPCServer', 'DocXMLRPCServer'),
+ ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
+ ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
+ ('http.server', 'SimpleHTTPRequestHandler'):
+ ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
+ ('http.server', 'CGIHTTPRequestHandler'):
+ ('CGIHTTPServer', 'CGIHTTPRequestHandler'),
+ ('_socket', 'socket'): ('socket', '_socketobject'),
+})
+
+PYTHON3_OSERROR_EXCEPTIONS = (
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'TimeoutError',
+)
+
+for excname in PYTHON3_OSERROR_EXCEPTIONS:
+ REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
+
+PYTHON3_IMPORTERROR_EXCEPTIONS = (
+ 'ModuleNotFoundError',
+)
+
+for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
+ REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
diff --git a/evalkit_cambrian/lib/python3.10/_osx_support.py b/evalkit_cambrian/lib/python3.10/_osx_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa66c8b9f4189f8b688f26469def701b821e049c
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_osx_support.py
@@ -0,0 +1,574 @@
+"""Shared OS X support functions."""
+
+import os
+import re
+import sys
+
+__all__ = [
+ 'compiler_fixup',
+ 'customize_config_vars',
+ 'customize_compiler',
+ 'get_platform_osx',
+]
+
+# configuration variables that may contain universal build flags,
+# like "-arch" or "-isdkroot", that may need customization for
+# the user environment
+_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
+ 'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
+ 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
+ 'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
+
+# configuration variables that may contain compiler calls
+_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
+
+# prefix added to original configuration variable names
+_INITPRE = '_OSX_SUPPORT_INITIAL_'
+
+
+def _find_executable(executable, path=None):
+ """Tries to find 'executable' in the directories listed in 'path'.
+
+ A string listing directories separated by 'os.pathsep'; defaults to
+ os.environ['PATH']. Returns the complete filename or None if not found.
+ """
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if (sys.platform == 'win32') and (ext != '.exe'):
+ executable = executable + '.exe'
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ f = os.path.join(p, executable)
+ if os.path.isfile(f):
+ # the file exists, we have a shot at spawn working
+ return f
+ return None
+ else:
+ return executable
+
+
+def _read_output(commandstring, capture_stderr=False):
+ """Output from successful command execution or None"""
+ # Similar to os.popen(commandstring, "r").read(),
+ # but without actually using os.popen because that
+ # function is not usable during python bootstrap.
+ # tempfile is also not available then.
+ import contextlib
+ try:
+ import tempfile
+ fp = tempfile.NamedTemporaryFile()
+ except ImportError:
+ fp = open("/tmp/_osx_support.%s"%(
+ os.getpid(),), "w+b")
+
+ with contextlib.closing(fp) as fp:
+ if capture_stderr:
+ cmd = "%s >'%s' 2>&1" % (commandstring, fp.name)
+ else:
+ cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
+ return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
+
+
+def _find_build_tool(toolname):
+ """Find a build tool on current path or using xcrun"""
+ return (_find_executable(toolname)
+ or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
+ or ''
+ )
+
+_SYSTEM_VERSION = None
+
+def _get_system_version():
+ """Return the OS X system version as a string"""
+ # Reading this plist is a documented way to get the system
+ # version (see the documentation for the Gestalt Manager)
+ # We avoid using platform.mac_ver to avoid possible bootstrap issues during
+ # the build of Python itself (distutils is used to build standard library
+ # extensions).
+
+ global _SYSTEM_VERSION
+
+ if _SYSTEM_VERSION is None:
+ _SYSTEM_VERSION = ''
+ try:
+ f = open('/System/Library/CoreServices/SystemVersion.plist', encoding="utf-8")
+ except OSError:
+ # We're on a plain darwin box, fall back to the default
+ # behaviour.
+ pass
+ else:
+ try:
+ m = re.search(r'ProductUserVisibleVersion\s*'
+ r'(.*?)', f.read())
+ finally:
+ f.close()
+ if m is not None:
+ _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
+ # else: fall back to the default behaviour
+
+ return _SYSTEM_VERSION
+
+_SYSTEM_VERSION_TUPLE = None
+def _get_system_version_tuple():
+ """
+ Return the macOS system version as a tuple
+
+ The return value is safe to use to compare
+ two version numbers.
+ """
+ global _SYSTEM_VERSION_TUPLE
+ if _SYSTEM_VERSION_TUPLE is None:
+ osx_version = _get_system_version()
+ if osx_version:
+ try:
+ _SYSTEM_VERSION_TUPLE = tuple(int(i) for i in osx_version.split('.'))
+ except ValueError:
+ _SYSTEM_VERSION_TUPLE = ()
+
+ return _SYSTEM_VERSION_TUPLE
+
+
+def _remove_original_values(_config_vars):
+ """Remove original unmodified values for testing"""
+ # This is needed for higher-level cross-platform tests of get_platform.
+ for k in list(_config_vars):
+ if k.startswith(_INITPRE):
+ del _config_vars[k]
+
+def _save_modified_value(_config_vars, cv, newvalue):
+ """Save modified and original unmodified value of configuration var"""
+
+ oldvalue = _config_vars.get(cv, '')
+ if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
+ _config_vars[_INITPRE + cv] = oldvalue
+ _config_vars[cv] = newvalue
+
+
+_cache_default_sysroot = None
+def _default_sysroot(cc):
+ """ Returns the root of the default SDK for this system, or '/' """
+ global _cache_default_sysroot
+
+ if _cache_default_sysroot is not None:
+ return _cache_default_sysroot
+
+ contents = _read_output('%s -c -E -v - "):
+ in_incdirs = True
+ elif line.startswith("End of search list"):
+ in_incdirs = False
+ elif in_incdirs:
+ line = line.strip()
+ if line == '/usr/include':
+ _cache_default_sysroot = '/'
+ elif line.endswith(".sdk/usr/include"):
+ _cache_default_sysroot = line[:-12]
+ if _cache_default_sysroot is None:
+ _cache_default_sysroot = '/'
+
+ return _cache_default_sysroot
+
+def _supports_universal_builds():
+ """Returns True if universal builds are supported on this system"""
+ # As an approximation, we assume that if we are running on 10.4 or above,
+ # then we are running with an Xcode environment that supports universal
+ # builds, in particular -isysroot and -arch arguments to the compiler. This
+ # is in support of allowing 10.4 universal builds to run on 10.3.x systems.
+
+ osx_version = _get_system_version_tuple()
+ return bool(osx_version >= (10, 4)) if osx_version else False
+
+def _supports_arm64_builds():
+ """Returns True if arm64 builds are supported on this system"""
+ # There are two sets of systems supporting macOS/arm64 builds:
+ # 1. macOS 11 and later, unconditionally
+ # 2. macOS 10.15 with Xcode 12.2 or later
+ # For now the second category is ignored.
+ osx_version = _get_system_version_tuple()
+ return osx_version >= (11, 0) if osx_version else False
+
+
+def _find_appropriate_compiler(_config_vars):
+ """Find appropriate C compiler for extension module builds"""
+
+ # Issue #13590:
+ # The OSX location for the compiler varies between OSX
+ # (or rather Xcode) releases. With older releases (up-to 10.5)
+ # the compiler is in /usr/bin, with newer releases the compiler
+ # can only be found inside Xcode.app if the "Command Line Tools"
+ # are not installed.
+ #
+ # Furthermore, the compiler that can be used varies between
+ # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
+ # as the compiler, after that 'clang' should be used because
+ # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
+ # miscompiles Python.
+
+ # skip checks if the compiler was overridden with a CC env variable
+ if 'CC' in os.environ:
+ return _config_vars
+
+ # The CC config var might contain additional arguments.
+ # Ignore them while searching.
+ cc = oldcc = _config_vars['CC'].split()[0]
+ if not _find_executable(cc):
+ # Compiler is not found on the shell search PATH.
+ # Now search for clang, first on PATH (if the Command LIne
+ # Tools have been installed in / or if the user has provided
+ # another location via CC). If not found, try using xcrun
+ # to find an uninstalled clang (within a selected Xcode).
+
+ # NOTE: Cannot use subprocess here because of bootstrap
+ # issues when building Python itself (and os.popen is
+ # implemented on top of subprocess and is therefore not
+ # usable as well)
+
+ cc = _find_build_tool('clang')
+
+ elif os.path.basename(cc).startswith('gcc'):
+ # Compiler is GCC, check if it is LLVM-GCC
+ data = _read_output("'%s' --version"
+ % (cc.replace("'", "'\"'\"'"),))
+ if data and 'llvm-gcc' in data:
+ # Found LLVM-GCC, fall back to clang
+ cc = _find_build_tool('clang')
+
+ if not cc:
+ raise SystemError(
+ "Cannot locate working compiler")
+
+ if cc != oldcc:
+ # Found a replacement compiler.
+ # Modify config vars using new compiler, if not already explicitly
+ # overridden by an env variable, preserving additional arguments.
+ for cv in _COMPILER_CONFIG_VARS:
+ if cv in _config_vars and cv not in os.environ:
+ cv_split = _config_vars[cv].split()
+ cv_split[0] = cc if cv != 'CXX' else cc + '++'
+ _save_modified_value(_config_vars, cv, ' '.join(cv_split))
+
+ return _config_vars
+
+
+def _remove_universal_flags(_config_vars):
+ """Remove all universal build arguments from config vars"""
+
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ # Do not alter a config var explicitly overridden by env var
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
+ flags = re.sub(r'-isysroot\s*\S+', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _remove_unsupported_archs(_config_vars):
+ """Remove any unsupported archs from config vars"""
+ # Different Xcode releases support different sets for '-arch'
+ # flags. In particular, Xcode 4.x no longer supports the
+ # PPC architectures.
+ #
+ # This code automatically removes '-arch ppc' and '-arch ppc64'
+ # when these are not supported. That makes it possible to
+ # build extensions on OSX 10.7 and later with the prebuilt
+ # 32-bit installer on the python.org website.
+
+ # skip checks if the compiler was overridden with a CC env variable
+ if 'CC' in os.environ:
+ return _config_vars
+
+ if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
+ # NOTE: Cannot use subprocess here because of bootstrap
+ # issues when building Python itself
+ status = os.system(
+ """echo 'int main{};' | """
+ """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
+ %(_config_vars['CC'].replace("'", "'\"'\"'"),))
+ if status:
+ # The compile failed for some reason. Because of differences
+ # across Xcode and compiler versions, there is no reliable way
+ # to be sure why it failed. Assume here it was due to lack of
+ # PPC support and remove the related '-arch' flags from each
+ # config variables not explicitly overridden by an environment
+ # variable. If the error was for some other reason, we hope the
+ # failure will show up again when trying to compile an extension
+ # module.
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _override_all_archs(_config_vars):
+ """Allow override of all archs with ARCHFLAGS env var"""
+ # NOTE: This name was introduced by Apple in OSX 10.5 and
+ # is used by several scripting languages distributed with
+ # that OS release.
+ if 'ARCHFLAGS' in os.environ:
+ arch = os.environ['ARCHFLAGS']
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ if cv in _config_vars and '-arch' in _config_vars[cv]:
+ flags = _config_vars[cv]
+ flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
+ flags = flags + ' ' + arch
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _check_for_unavailable_sdk(_config_vars):
+ """Remove references to any SDKs not available"""
+ # If we're on OSX 10.5 or later and the user tries to
+ # compile an extension using an SDK that is not present
+ # on the current machine it is better to not use an SDK
+ # than to fail. This is particularly important with
+ # the standalone Command Line Tools alternative to a
+ # full-blown Xcode install since the CLT packages do not
+ # provide SDKs. If the SDK is not present, it is assumed
+ # that the header files and dev libs have been installed
+ # to /usr and /System/Library by either a standalone CLT
+ # package or the CLT component within Xcode.
+ cflags = _config_vars.get('CFLAGS', '')
+ m = re.search(r'-isysroot\s*(\S+)', cflags)
+ if m is not None:
+ sdk = m.group(1)
+ if not os.path.exists(sdk):
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ # Do not alter a config var explicitly overridden by env var
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub(r'-isysroot\s*\S+(?:\s|$)', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def compiler_fixup(compiler_so, cc_args):
+ """
+ This function will strip '-isysroot PATH' and '-arch ARCH' from the
+ compile flags if the user has specified one them in extra_compile_flags.
+
+ This is needed because '-arch ARCH' adds another architecture to the
+ build, without a way to remove an architecture. Furthermore GCC will
+ barf if multiple '-isysroot' arguments are present.
+ """
+ stripArch = stripSysroot = False
+
+ compiler_so = list(compiler_so)
+
+ if not _supports_universal_builds():
+ # OSX before 10.4.0, these don't support -arch and -isysroot at
+ # all.
+ stripArch = stripSysroot = True
+ else:
+ stripArch = '-arch' in cc_args
+ stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
+
+ if stripArch or 'ARCHFLAGS' in os.environ:
+ while True:
+ try:
+ index = compiler_so.index('-arch')
+ # Strip this argument and the next one:
+ del compiler_so[index:index+2]
+ except ValueError:
+ break
+
+ elif not _supports_arm64_builds():
+ # Look for "-arch arm64" and drop that
+ for idx in reversed(range(len(compiler_so))):
+ if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64":
+ del compiler_so[idx:idx+2]
+
+ if 'ARCHFLAGS' in os.environ and not stripArch:
+ # User specified different -arch flags in the environ,
+ # see also distutils.sysconfig
+ compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
+
+ if stripSysroot:
+ while True:
+ indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
+ if not indices:
+ break
+ index = indices[0]
+ if compiler_so[index] == '-isysroot':
+ # Strip this argument and the next one:
+ del compiler_so[index:index+2]
+ else:
+ # It's '-isysroot/some/path' in one arg
+ del compiler_so[index:index+1]
+
+ # Check if the SDK that is used during compilation actually exists,
+ # the universal build requires the usage of a universal SDK and not all
+ # users have that installed by default.
+ sysroot = None
+ argvar = cc_args
+ indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
+ if not indices:
+ argvar = compiler_so
+ indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
+
+ for idx in indices:
+ if argvar[idx] == '-isysroot':
+ sysroot = argvar[idx+1]
+ break
+ else:
+ sysroot = argvar[idx][len('-isysroot'):]
+ break
+
+ if sysroot and not os.path.isdir(sysroot):
+ sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n")
+ sys.stderr.write("Please check your Xcode installation\n")
+ sys.stderr.flush()
+
+ return compiler_so
+
+
+def customize_config_vars(_config_vars):
+ """Customize Python build configuration variables.
+
+ Called internally from sysconfig with a mutable mapping
+ containing name/value pairs parsed from the configured
+ makefile used to build this interpreter. Returns
+ the mapping updated as needed to reflect the environment
+ in which the interpreter is running; in the case of
+ a Python from a binary installer, the installed
+ environment may be very different from the build
+ environment, i.e. different OS levels, different
+ built tools, different available CPU architectures.
+
+ This customization is performed whenever
+ distutils.sysconfig.get_config_vars() is first
+ called. It may be used in environments where no
+ compilers are present, i.e. when installing pure
+ Python dists. Customization of compiler paths
+ and detection of unavailable archs is deferred
+ until the first extension module build is
+ requested (in distutils.sysconfig.customize_compiler).
+
+ Currently called from distutils.sysconfig
+ """
+
+ if not _supports_universal_builds():
+ # On Mac OS X before 10.4, check if -arch and -isysroot
+ # are in CFLAGS or LDFLAGS and remove them if they are.
+ # This is needed when building extensions on a 10.3 system
+ # using a universal build of python.
+ _remove_universal_flags(_config_vars)
+
+ # Allow user to override all archs with ARCHFLAGS env var
+ _override_all_archs(_config_vars)
+
+ # Remove references to sdks that are not found
+ _check_for_unavailable_sdk(_config_vars)
+
+ return _config_vars
+
+
+def customize_compiler(_config_vars):
+ """Customize compiler path and configuration variables.
+
+ This customization is performed when the first
+ extension module build is requested
+ in distutils.sysconfig.customize_compiler.
+ """
+
+ # Find a compiler to use for extension module builds
+ _find_appropriate_compiler(_config_vars)
+
+ # Remove ppc arch flags if not supported here
+ _remove_unsupported_archs(_config_vars)
+
+ # Allow user to override all archs with ARCHFLAGS env var
+ _override_all_archs(_config_vars)
+
+ return _config_vars
+
+
+def get_platform_osx(_config_vars, osname, release, machine):
+ """Filter values for get_platform()"""
+ # called from get_platform() in sysconfig and distutils.util
+ #
+ # For our purposes, we'll assume that the system version from
+ # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
+ # to. This makes the compatibility story a bit more sane because the
+ # machine is going to compile and link as if it were
+ # MACOSX_DEPLOYMENT_TARGET.
+
+ macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
+ macrelease = _get_system_version() or macver
+ macver = macver or macrelease
+
+ if macver:
+ release = macver
+ osname = "macosx"
+
+ # Use the original CFLAGS value, if available, so that we
+ # return the same machine type for the platform string.
+ # Otherwise, distutils may consider this a cross-compiling
+ # case and disallow installs.
+ cflags = _config_vars.get(_INITPRE+'CFLAGS',
+ _config_vars.get('CFLAGS', ''))
+ if macrelease:
+ try:
+ macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
+ except ValueError:
+ macrelease = (10, 3)
+ else:
+ # assume no universal support
+ macrelease = (10, 3)
+
+ if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
+ # The universal build will build fat binaries, but not on
+ # systems before 10.4
+
+ machine = 'fat'
+
+ archs = re.findall(r'-arch\s+(\S+)', cflags)
+ archs = tuple(sorted(set(archs)))
+
+ if len(archs) == 1:
+ machine = archs[0]
+ elif archs == ('arm64', 'x86_64'):
+ machine = 'universal2'
+ elif archs == ('i386', 'ppc'):
+ machine = 'fat'
+ elif archs == ('i386', 'x86_64'):
+ machine = 'intel'
+ elif archs == ('i386', 'ppc', 'x86_64'):
+ machine = 'fat3'
+ elif archs == ('ppc64', 'x86_64'):
+ machine = 'fat64'
+ elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
+ machine = 'universal'
+ else:
+ raise ValueError(
+ "Don't know machine value for archs=%r" % (archs,))
+
+ elif machine == 'i386':
+ # On OSX the machine type returned by uname is always the
+ # 32-bit variant, even if the executable architecture is
+ # the 64-bit variant
+ if sys.maxsize >= 2**32:
+ machine = 'x86_64'
+
+ elif machine in ('PowerPC', 'Power_Macintosh'):
+ # Pick a sane name for the PPC architecture.
+ # See 'i386' case
+ if sys.maxsize >= 2**32:
+ machine = 'ppc64'
+ else:
+ machine = 'ppc'
+
+ return (osname, release, machine)
diff --git a/evalkit_cambrian/lib/python3.10/_strptime.py b/evalkit_cambrian/lib/python3.10/_strptime.py
new file mode 100644
index 0000000000000000000000000000000000000000..b97dfcce1e8e4d7dfe2eb8c3a22f2f9b7536c78b
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_strptime.py
@@ -0,0 +1,579 @@
+"""Strptime-related classes and functions.
+
+CLASSES:
+ LocaleTime -- Discovers and stores locale-specific time information
+ TimeRE -- Creates regexes for pattern matching a string of text containing
+ time information
+
+FUNCTIONS:
+ _getlang -- Figure out what language is being used for the locale
+ strptime -- Calculates the time struct represented by the passed-in string
+
+"""
+import time
+import locale
+import calendar
+from re import compile as re_compile
+from re import IGNORECASE
+from re import escape as re_escape
+from datetime import (date as datetime_date,
+ timedelta as datetime_timedelta,
+ timezone as datetime_timezone)
+from _thread import allocate_lock as _thread_allocate_lock
+
+__all__ = []
+
+def _getlang():
+ # Figure out what the current language is set to.
+ return locale.getlocale(locale.LC_TIME)
+
+class LocaleTime(object):
+ """Stores and handles locale-specific information related to time.
+
+ ATTRIBUTES:
+ f_weekday -- full weekday names (7-item list)
+ a_weekday -- abbreviated weekday names (7-item list)
+ f_month -- full month names (13-item list; dummy value in [0], which
+ is added by code)
+ a_month -- abbreviated month names (13-item list, dummy value in
+ [0], which is added by code)
+ am_pm -- AM/PM representation (2-item list)
+ LC_date_time -- format string for date/time representation (string)
+ LC_date -- format string for date representation (string)
+ LC_time -- format string for time representation (string)
+ timezone -- daylight- and non-daylight-savings timezone representation
+ (2-item list of sets)
+ lang -- Language used by instance (2-item tuple)
+ """
+
+ def __init__(self):
+ """Set all attributes.
+
+ Order of methods called matters for dependency reasons.
+
+ The locale language is set at the offset and then checked again before
+ exiting. This is to make sure that the attributes were not set with a
+ mix of information from more than one locale. This would most likely
+ happen when using threads where one thread calls a locale-dependent
+ function while another thread changes the locale while the function in
+ the other thread is still running. Proper coding would call for
+ locks to prevent changing the locale while locale-dependent code is
+ running. The check here is done in case someone does not think about
+ doing this.
+
+ Only other possible issue is if someone changed the timezone and did
+ not call tz.tzset . That is an issue for the programmer, though,
+ since changing the timezone is worthless without that call.
+
+ """
+ self.lang = _getlang()
+ self.__calc_weekday()
+ self.__calc_month()
+ self.__calc_am_pm()
+ self.__calc_timezone()
+ self.__calc_date_time()
+ if _getlang() != self.lang:
+ raise ValueError("locale changed during initialization")
+ if time.tzname != self.tzname or time.daylight != self.daylight:
+ raise ValueError("timezone changed during initialization")
+
+ def __calc_weekday(self):
+ # Set self.a_weekday and self.f_weekday using the calendar
+ # module.
+ a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
+ f_weekday = [calendar.day_name[i].lower() for i in range(7)]
+ self.a_weekday = a_weekday
+ self.f_weekday = f_weekday
+
+ def __calc_month(self):
+ # Set self.f_month and self.a_month using the calendar module.
+ a_month = [calendar.month_abbr[i].lower() for i in range(13)]
+ f_month = [calendar.month_name[i].lower() for i in range(13)]
+ self.a_month = a_month
+ self.f_month = f_month
+
+ def __calc_am_pm(self):
+ # Set self.am_pm by using time.strftime().
+
+ # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
+ # magical; just happened to have used it everywhere else where a
+ # static date was needed.
+ am_pm = []
+ for hour in (1, 22):
+ time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
+ am_pm.append(time.strftime("%p", time_tuple).lower())
+ self.am_pm = am_pm
+
+ def __calc_date_time(self):
+ # Set self.date_time, self.date, & self.time by using
+ # time.strftime().
+
+ # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
+ # overloaded numbers is minimized. The order in which searches for
+ # values within the format string is very important; it eliminates
+ # possible ambiguity for what something represents.
+ time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
+ date_time = [None, None, None]
+ date_time[0] = time.strftime("%c", time_tuple).lower()
+ date_time[1] = time.strftime("%x", time_tuple).lower()
+ date_time[2] = time.strftime("%X", time_tuple).lower()
+ replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
+ (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
+ (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
+ ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+ ('44', '%M'), ('55', '%S'), ('76', '%j'),
+ ('17', '%d'), ('03', '%m'), ('3', '%m'),
+ # '3' needed for when no leading zero.
+ ('2', '%w'), ('10', '%I')]
+ replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
+ for tz in tz_values])
+ for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
+ current_format = date_time[offset]
+ for old, new in replacement_pairs:
+ # Must deal with possible lack of locale info
+ # manifesting itself as the empty string (e.g., Swedish's
+ # lack of AM/PM info) or a platform returning a tuple of empty
+ # strings (e.g., MacOS 9 having timezone as ('','')).
+ if old:
+ current_format = current_format.replace(old, new)
+ # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
+ # 2005-01-03 occurs before the first Monday of the year. Otherwise
+ # %U is used.
+ time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
+ if '00' in time.strftime(directive, time_tuple):
+ U_W = '%W'
+ else:
+ U_W = '%U'
+ date_time[offset] = current_format.replace('11', U_W)
+ self.LC_date_time = date_time[0]
+ self.LC_date = date_time[1]
+ self.LC_time = date_time[2]
+
+ def __calc_timezone(self):
+ # Set self.timezone by using time.tzname.
+ # Do not worry about possibility of time.tzname[0] == time.tzname[1]
+ # and time.daylight; handle that in strptime.
+ try:
+ time.tzset()
+ except AttributeError:
+ pass
+ self.tzname = time.tzname
+ self.daylight = time.daylight
+ no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
+ if self.daylight:
+ has_saving = frozenset({self.tzname[1].lower()})
+ else:
+ has_saving = frozenset()
+ self.timezone = (no_saving, has_saving)
+
+
+class TimeRE(dict):
+ """Handle conversion from format directives to regexes."""
+
+ def __init__(self, locale_time=None):
+ """Create keys/values.
+
+ Order of execution is important for dependency reasons.
+
+ """
+ if locale_time:
+ self.locale_time = locale_time
+ else:
+ self.locale_time = LocaleTime()
+ base = super()
+ base.__init__({
+ # The " [1-9]" part of the regex is to make %c from ANSI C work
+ 'd': r"(?P3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
+ 'f': r"(?P[0-9]{1,6})",
+ 'H': r"(?P2[0-3]|[0-1]\d|\d)",
+ 'I': r"(?P1[0-2]|0[1-9]|[1-9])",
+ 'G': r"(?P\d\d\d\d)",
+ 'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
+ 'm': r"(?P1[0-2]|0[1-9]|[1-9])",
+ 'M': r"(?P[0-5]\d|\d)",
+ 'S': r"(?P6[0-1]|[0-5]\d|\d)",
+ 'U': r"(?P5[0-3]|[0-4]\d|\d)",
+ 'w': r"(?P[0-6])",
+ 'u': r"(?P[1-7])",
+ 'V': r"(?P5[0-3]|0[1-9]|[1-4]\d|\d)",
+ # W is set below by using 'U'
+ 'y': r"(?P\d\d)",
+ #XXX: Does 'Y' need to worry about having less or more than
+ # 4 digits?
+ 'Y': r"(?P\d\d\d\d)",
+ 'z': r"(?P[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|(?-i:Z))",
+ 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
+ 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
+ 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
+ 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
+ 'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
+ 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
+ for tz in tz_names),
+ 'Z'),
+ '%': '%'})
+ base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
+ base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
+ base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+ base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+
+ def __seqToRE(self, to_convert, directive):
+ """Convert a list to a regex string for matching a directive.
+
+ Want possible matching values to be from longest to shortest. This
+ prevents the possibility of a match occurring for a value that also
+ a substring of a larger value that should have matched (e.g., 'abc'
+ matching when 'abcdef' should have been the match).
+
+ """
+ to_convert = sorted(to_convert, key=len, reverse=True)
+ for value in to_convert:
+ if value != '':
+ break
+ else:
+ return ''
+ regex = '|'.join(re_escape(stuff) for stuff in to_convert)
+ regex = '(?P<%s>%s' % (directive, regex)
+ return '%s)' % regex
+
+ def pattern(self, format):
+ """Return regex pattern for the format string.
+
+ Need to make sure that any characters that might be interpreted as
+ regex syntax are escaped.
+
+ """
+ processed_format = ''
+ # The sub() call escapes all characters that might be misconstrued
+ # as regex syntax. Cannot use re.escape since we have to deal with
+ # format directives (%m, etc.).
+ regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
+ format = regex_chars.sub(r"\\\1", format)
+ whitespace_replacement = re_compile(r'\s+')
+ format = whitespace_replacement.sub(r'\\s+', format)
+ while '%' in format:
+ directive_index = format.index('%')+1
+ processed_format = "%s%s%s" % (processed_format,
+ format[:directive_index-1],
+ self[format[directive_index]])
+ format = format[directive_index+1:]
+ return "%s%s" % (processed_format, format)
+
+ def compile(self, format):
+ """Return a compiled re object for the format string."""
+ return re_compile(self.pattern(format), IGNORECASE)
+
+_cache_lock = _thread_allocate_lock()
+# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
+# first!
+_TimeRE_cache = TimeRE()
+_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
+_regex_cache = {}
+
+def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
+ """Calculate the Julian day based on the year, week of the year, and day of
+ the week, with week_start_day representing whether the week of the year
+ assumes the week starts on Sunday or Monday (6 or 0)."""
+ first_weekday = datetime_date(year, 1, 1).weekday()
+ # If we are dealing with the %U directive (week starts on Sunday), it's
+ # easier to just shift the view to Sunday being the first day of the
+ # week.
+ if not week_starts_Mon:
+ first_weekday = (first_weekday + 1) % 7
+ day_of_week = (day_of_week + 1) % 7
+ # Need to watch out for a week 0 (when the first day of the year is not
+ # the same as that specified by %U or %W).
+ week_0_length = (7 - first_weekday) % 7
+ if week_of_year == 0:
+ return 1 + day_of_week - first_weekday
+ else:
+ days_to_week = week_0_length + (7 * (week_of_year - 1))
+ return 1 + days_to_week + day_of_week
+
+
+def _calc_julian_from_V(iso_year, iso_week, iso_weekday):
+ """Calculate the Julian day based on the ISO 8601 year, week, and weekday.
+ ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
+ ISO week days range from 1 (Monday) to 7 (Sunday).
+ """
+ correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
+ ordinal = (iso_week * 7) + iso_weekday - correction
+ # ordinal may be negative or 0 now, which means the date is in the previous
+ # calendar year
+ if ordinal < 1:
+ ordinal += datetime_date(iso_year, 1, 1).toordinal()
+ iso_year -= 1
+ ordinal -= datetime_date(iso_year, 1, 1).toordinal()
+ return iso_year, ordinal
+
+
+def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
+ """Return a 2-tuple consisting of a time struct and an int containing
+ the number of microseconds based on the input string and the
+ format string."""
+
+ for index, arg in enumerate([data_string, format]):
+ if not isinstance(arg, str):
+ msg = "strptime() argument {} must be str, not {}"
+ raise TypeError(msg.format(index, type(arg)))
+
+ global _TimeRE_cache, _regex_cache
+ with _cache_lock:
+ locale_time = _TimeRE_cache.locale_time
+ if (_getlang() != locale_time.lang or
+ time.tzname != locale_time.tzname or
+ time.daylight != locale_time.daylight):
+ _TimeRE_cache = TimeRE()
+ _regex_cache.clear()
+ locale_time = _TimeRE_cache.locale_time
+ if len(_regex_cache) > _CACHE_MAX_SIZE:
+ _regex_cache.clear()
+ format_regex = _regex_cache.get(format)
+ if not format_regex:
+ try:
+ format_regex = _TimeRE_cache.compile(format)
+ # KeyError raised when a bad format is found; can be specified as
+ # \\, in which case it was a stray % but with a space after it
+ except KeyError as err:
+ bad_directive = err.args[0]
+ if bad_directive == "\\":
+ bad_directive = "%"
+ del err
+ raise ValueError("'%s' is a bad directive in format '%s'" %
+ (bad_directive, format)) from None
+ # IndexError only occurs when the format string is "%"
+ except IndexError:
+ raise ValueError("stray %% in format '%s'" % format) from None
+ _regex_cache[format] = format_regex
+ found = format_regex.match(data_string)
+ if not found:
+ raise ValueError("time data %r does not match format %r" %
+ (data_string, format))
+ if len(data_string) != found.end():
+ raise ValueError("unconverted data remains: %s" %
+ data_string[found.end():])
+
+ iso_year = year = None
+ month = day = 1
+ hour = minute = second = fraction = 0
+ tz = -1
+ gmtoff = None
+ gmtoff_fraction = 0
+ # Default to -1 to signify that values not known; not critical to have,
+ # though
+ iso_week = week_of_year = None
+ week_of_year_start = None
+ # weekday and julian defaulted to None so as to signal need to calculate
+ # values
+ weekday = julian = None
+ found_dict = found.groupdict()
+ for group_key in found_dict.keys():
+ # Directives not explicitly handled below:
+ # c, x, X
+ # handled by making out of other directives
+ # U, W
+ # worthless without day of the week
+ if group_key == 'y':
+ year = int(found_dict['y'])
+ # Open Group specification for strptime() states that a %y
+ #value in the range of [00, 68] is in the century 2000, while
+ #[69,99] is in the century 1900
+ if year <= 68:
+ year += 2000
+ else:
+ year += 1900
+ elif group_key == 'Y':
+ year = int(found_dict['Y'])
+ elif group_key == 'G':
+ iso_year = int(found_dict['G'])
+ elif group_key == 'm':
+ month = int(found_dict['m'])
+ elif group_key == 'B':
+ month = locale_time.f_month.index(found_dict['B'].lower())
+ elif group_key == 'b':
+ month = locale_time.a_month.index(found_dict['b'].lower())
+ elif group_key == 'd':
+ day = int(found_dict['d'])
+ elif group_key == 'H':
+ hour = int(found_dict['H'])
+ elif group_key == 'I':
+ hour = int(found_dict['I'])
+ ampm = found_dict.get('p', '').lower()
+ # If there was no AM/PM indicator, we'll treat this like AM
+ if ampm in ('', locale_time.am_pm[0]):
+ # We're in AM so the hour is correct unless we're
+ # looking at 12 midnight.
+ # 12 midnight == 12 AM == hour 0
+ if hour == 12:
+ hour = 0
+ elif ampm == locale_time.am_pm[1]:
+ # We're in PM so we need to add 12 to the hour unless
+ # we're looking at 12 noon.
+ # 12 noon == 12 PM == hour 12
+ if hour != 12:
+ hour += 12
+ elif group_key == 'M':
+ minute = int(found_dict['M'])
+ elif group_key == 'S':
+ second = int(found_dict['S'])
+ elif group_key == 'f':
+ s = found_dict['f']
+ # Pad to always return microseconds.
+ s += "0" * (6 - len(s))
+ fraction = int(s)
+ elif group_key == 'A':
+ weekday = locale_time.f_weekday.index(found_dict['A'].lower())
+ elif group_key == 'a':
+ weekday = locale_time.a_weekday.index(found_dict['a'].lower())
+ elif group_key == 'w':
+ weekday = int(found_dict['w'])
+ if weekday == 0:
+ weekday = 6
+ else:
+ weekday -= 1
+ elif group_key == 'u':
+ weekday = int(found_dict['u'])
+ weekday -= 1
+ elif group_key == 'j':
+ julian = int(found_dict['j'])
+ elif group_key in ('U', 'W'):
+ week_of_year = int(found_dict[group_key])
+ if group_key == 'U':
+ # U starts week on Sunday.
+ week_of_year_start = 6
+ else:
+ # W starts week on Monday.
+ week_of_year_start = 0
+ elif group_key == 'V':
+ iso_week = int(found_dict['V'])
+ elif group_key == 'z':
+ z = found_dict['z']
+ if z == 'Z':
+ gmtoff = 0
+ else:
+ if z[3] == ':':
+ z = z[:3] + z[4:]
+ if len(z) > 5:
+ if z[5] != ':':
+ msg = f"Inconsistent use of : in {found_dict['z']}"
+ raise ValueError(msg)
+ z = z[:5] + z[6:]
+ hours = int(z[1:3])
+ minutes = int(z[3:5])
+ seconds = int(z[5:7] or 0)
+ gmtoff = (hours * 60 * 60) + (minutes * 60) + seconds
+ gmtoff_remainder = z[8:]
+ # Pad to always return microseconds.
+ gmtoff_remainder_padding = "0" * (6 - len(gmtoff_remainder))
+ gmtoff_fraction = int(gmtoff_remainder + gmtoff_remainder_padding)
+ if z.startswith("-"):
+ gmtoff = -gmtoff
+ gmtoff_fraction = -gmtoff_fraction
+ elif group_key == 'Z':
+ # Since -1 is default value only need to worry about setting tz if
+ # it can be something other than -1.
+ found_zone = found_dict['Z'].lower()
+ for value, tz_values in enumerate(locale_time.timezone):
+ if found_zone in tz_values:
+ # Deal with bad locale setup where timezone names are the
+ # same and yet time.daylight is true; too ambiguous to
+ # be able to tell what timezone has daylight savings
+ if (time.tzname[0] == time.tzname[1] and
+ time.daylight and found_zone not in ("utc", "gmt")):
+ break
+ else:
+ tz = value
+ break
+ # Deal with the cases where ambiguities arize
+ # don't assume default values for ISO week/year
+ if year is None and iso_year is not None:
+ if iso_week is None or weekday is None:
+ raise ValueError("ISO year directive '%G' must be used with "
+ "the ISO week directive '%V' and a weekday "
+ "directive ('%A', '%a', '%w', or '%u').")
+ if julian is not None:
+ raise ValueError("Day of the year directive '%j' is not "
+ "compatible with ISO year directive '%G'. "
+ "Use '%Y' instead.")
+ elif week_of_year is None and iso_week is not None:
+ if weekday is None:
+ raise ValueError("ISO week directive '%V' must be used with "
+ "the ISO year directive '%G' and a weekday "
+ "directive ('%A', '%a', '%w', or '%u').")
+ else:
+ raise ValueError("ISO week directive '%V' is incompatible with "
+ "the year directive '%Y'. Use the ISO year '%G' "
+ "instead.")
+
+ leap_year_fix = False
+ if year is None and month == 2 and day == 29:
+ year = 1904 # 1904 is first leap year of 20th century
+ leap_year_fix = True
+ elif year is None:
+ year = 1900
+
+
+ # If we know the week of the year and what day of that week, we can figure
+ # out the Julian day of the year.
+ if julian is None and weekday is not None:
+ if week_of_year is not None:
+ week_starts_Mon = True if week_of_year_start == 0 else False
+ julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
+ week_starts_Mon)
+ elif iso_year is not None and iso_week is not None:
+ year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
+ if julian is not None and julian <= 0:
+ year -= 1
+ yday = 366 if calendar.isleap(year) else 365
+ julian += yday
+
+ if julian is None:
+ # Cannot pre-calculate datetime_date() since can change in Julian
+ # calculation and thus could have different value for the day of
+ # the week calculation.
+ # Need to add 1 to result since first day of the year is 1, not 0.
+ julian = datetime_date(year, month, day).toordinal() - \
+ datetime_date(year, 1, 1).toordinal() + 1
+ else: # Assume that if they bothered to include Julian day (or if it was
+ # calculated above with year/week/weekday) it will be accurate.
+ datetime_result = datetime_date.fromordinal(
+ (julian - 1) +
+ datetime_date(year, 1, 1).toordinal())
+ year = datetime_result.year
+ month = datetime_result.month
+ day = datetime_result.day
+ if weekday is None:
+ weekday = datetime_date(year, month, day).weekday()
+ # Add timezone info
+ tzname = found_dict.get("Z")
+
+ if leap_year_fix:
+ # the caller didn't supply a year but asked for Feb 29th. We couldn't
+ # use the default of 1900 for computations. We set it back to ensure
+ # that February 29th is smaller than March 1st.
+ year = 1900
+
+ return (year, month, day,
+ hour, minute, second,
+ weekday, julian, tz, tzname, gmtoff), fraction, gmtoff_fraction
+
+def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
+ """Return a time struct based on the input string and the
+ format string."""
+ tt = _strptime(data_string, format)[0]
+ return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
+
+def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
+ """Return a class cls instance based on the input string and the
+ format string."""
+ tt, fraction, gmtoff_fraction = _strptime(data_string, format)
+ tzname, gmtoff = tt[-2:]
+ args = tt[:6] + (fraction,)
+ if gmtoff is not None:
+ tzdelta = datetime_timedelta(seconds=gmtoff, microseconds=gmtoff_fraction)
+ if tzname:
+ tz = datetime_timezone(tzdelta, tzname)
+ else:
+ tz = datetime_timezone(tzdelta)
+ args += (tz,)
+
+ return cls(*args)
diff --git a/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig b/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig
new file mode 100644
index 0000000000000000000000000000000000000000..47e3ea63b887cc6acd12ae41bd38a1b48c4d7c1e
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig
@@ -0,0 +1,986 @@
+# system configuration generated and used by the sysconfig module
+build_time_vars = {'ABIFLAGS': '',
+ 'AC_APPLE_UNIVERSAL_BUILD': 0,
+ 'AIX_BUILDDATE': 0,
+ 'AIX_GENUINE_CPLUSPLUS': 0,
+ 'ALIGNOF_LONG': 8,
+ 'ALIGNOF_SIZE_T': 8,
+ 'ALT_SOABI': 0,
+ 'ANDROID_API_LEVEL': 0,
+ 'AR': 'x86_64-conda-linux-gnu-ar',
+ 'ARFLAGS': 'rcs',
+ 'BASECFLAGS': '-Wno-unused-result -Wsign-compare',
+ 'BASECPPFLAGS': '-IObjects -IInclude -IPython',
+ 'BASEMODLIBS': '',
+ 'BINDIR': '/root/envs/evalkit_cambrian/bin',
+ 'BINLIBDEST': '/root/envs/evalkit_cambrian/lib/python3.10',
+ 'BLDLIBRARY': 'libpython3.10.a',
+ 'BLDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 '
+ '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now '
+ '-Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib',
+ 'BUILDEXE': '',
+ 'BUILDPYTHON': 'python',
+ 'BUILD_GNU_TYPE': 'x86_64-conda-linux-gnu',
+ 'BYTESTR_DEPS': '\\',
+ 'CC': 'x86_64-conda-linux-gnu-gcc -pthread',
+ 'CCSHARED': '-fPIC',
+ 'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall '
+ '-march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe '
+ '-isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ '-march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe '
+ '-isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' ',
+ 'CFLAGSFORSHARED': '',
+ 'CFLAGS_ALIASING': '',
+ 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in '
+ 'Makefile.pre.in',
+ 'CONFIGURE_CFLAGS': '-march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 '
+ '-ffunction-sections -pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' ',
+ 'CONFIGURE_CFLAGS_NODIST': '-fno-semantic-interposition '
+ ' '
+ ' -g -std=c99 -Wextra '
+ '-Wno-unused-result -Wno-unused-parameter '
+ '-Wno-missing-field-initializers '
+ '-Werror=implicit-function-declaration '
+ '-fvisibility=hidden',
+ 'CONFIGURE_CPPFLAGS': '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include',
+ 'CONFIGURE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib',
+ 'CONFIGURE_LDFLAGS_NODIST': '-fno-semantic-interposition '
+ ' '
+ ' -g',
+ 'CONFIG_ARGS': "'--prefix=/root/envs/evalkit_cambrian' "
+ "'--build=x86_64-conda-linux-gnu' "
+ "'--host=x86_64-conda-linux-gnu' '--enable-ipv6' "
+ "'--with-ensurepip=no' "
+ "'--with-tzpath=/root/envs/evalkit_cambrian/share/zoneinfo' "
+ "'--with-computed-gotos' '--with-system-ffi' "
+ "'--enable-loadable-sqlite-extensions' "
+ "'--with-tcltk-includes=-I/root/envs/evalkit_cambrian/include' "
+ "'--with-tcltk-libs=-L/root/envs/evalkit_cambrian/lib "
+ "-ltcl8.6 -ltk8.6' '--with-platlibdir=lib' '--with-lto' "
+ "'--enable-optimizations' "
+ "'-oldincludedir=/croot/python-split_1733933809325/_build_env/x86_64-conda-linux-gnu/sysroot/usr/include' "
+ "'--disable-shared' 'PROFILE_TASK=-m test --pgo' "
+ "'build_alias=x86_64-conda-linux-gnu' "
+ "'host_alias=x86_64-conda-linux-gnu' 'MACHDEP=linux' "
+ "'CC=x86_64-conda-linux-gnu-gcc' 'CFLAGS=-march=nocona "
+ '-mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections '
+ '-pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ "' 'LDFLAGS=-Wl,-O2 -Wl,--sort-common -Wl,--as-needed "
+ '-Wl,-z,relro -Wl,-z,now -Wl,--disable-new-dtags '
+ '-Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ "-L/root/envs/evalkit_cambrian/lib' "
+ "'CPPFLAGS=-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem "
+ '/root/envs/evalkit_cambrian/include '
+ "-I/root/envs/evalkit_cambrian/include' "
+ "'CPP=/croot/python-split_1733933809325/_build_env/bin/x86_64-conda-linux-gnu-cpp' "
+ "'PKG_CONFIG_PATH=/root/envs/evalkit_cambrian/lib/pkgconfig'",
+ 'CONFINCLUDEDIR': '/root/envs/evalkit_cambrian/include',
+ 'CONFINCLUDEPY': '/root/envs/evalkit_cambrian/include/python3.10',
+ 'COREPYTHONPATH': '',
+ 'COVERAGE_INFO': '/croot/python-split_1733933809325/work/build-static/coverage.info',
+ 'COVERAGE_REPORT': '/croot/python-split_1733933809325/work/build-static/lcov-report',
+ 'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov '
+ 'report"',
+ 'CPPFLAGS': '-IObjects -IInclude -IPython -I. '
+ '-I/croot/python-split_1733933809325/work/Include -DNDEBUG '
+ '-D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include',
+ 'CXX': 'x86_64-conda-linux-gnu-c++ -pthread',
+ 'DESTDIRS': '/root/envs/evalkit_cambrian '
+ '/root/envs/evalkit_cambrian/lib '
+ '/root/envs/evalkit_cambrian/lib/python3.10 '
+ '/root/envs/evalkit_cambrian/lib/python3.10/lib-dynload',
+ 'DESTLIB': '/root/envs/evalkit_cambrian/lib/python3.10',
+ 'DESTPATH': '',
+ 'DESTSHARED': '/root/envs/evalkit_cambrian/lib/python3.10/lib-dynload',
+ 'DFLAGS': '',
+ 'DIRMODE': 755,
+ 'DIST': 'README.rst ChangeLog configure configure.ac acconfig.h pyconfig.h.in '
+ 'Makefile.pre.in Include Lib Misc Ext-dummy',
+ 'DISTDIRS': 'Include Lib Misc Ext-dummy',
+ 'DISTFILES': 'README.rst ChangeLog configure configure.ac acconfig.h '
+ 'pyconfig.h.in Makefile.pre.in',
+ 'DLINCLDIR': '.',
+ 'DLLLIBRARY': '',
+ 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
+ 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
+ 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
+ 'DTRACE': '',
+ 'DTRACE_DEPS': '\\',
+ 'DTRACE_HEADERS': '',
+ 'DTRACE_OBJS': '',
+ 'DYNLOADFILE': 'dynload_shlib.o',
+ 'ENABLE_IPV6': 1,
+ 'ENSUREPIP': 'no',
+ 'EXE': '',
+ 'EXEMODE': 755,
+ 'EXPERIMENTAL_ISOLATED_SUBINTERPRETERS': 0,
+ 'EXPORTSFROM': '',
+ 'EXPORTSYMS': '',
+ 'EXTRATESTOPTS': '',
+ 'EXT_SUFFIX': '.cpython-310-x86_64-linux-gnu.so',
+ 'FILEMODE': 644,
+ 'FLOAT_WORDS_BIGENDIAN': 0,
+ 'FLOCK_NEEDS_LIBBSD': 0,
+ 'GETPGRP_HAVE_ARG': 0,
+ 'GITBRANCH': '',
+ 'GITTAG': '',
+ 'GITVERSION': '',
+ 'GNULD': 'yes',
+ 'HAVE_ACCEPT4': 1,
+ 'HAVE_ACOSH': 1,
+ 'HAVE_ADDRINFO': 1,
+ 'HAVE_ALARM': 1,
+ 'HAVE_ALIGNED_REQUIRED': 0,
+ 'HAVE_ALLOCA_H': 1,
+ 'HAVE_ALTZONE': 0,
+ 'HAVE_ASINH': 1,
+ 'HAVE_ASM_TYPES_H': 1,
+ 'HAVE_ATANH': 1,
+ 'HAVE_BIND_TEXTDOMAIN_CODESET': 1,
+ 'HAVE_BLUETOOTH_BLUETOOTH_H': 0,
+ 'HAVE_BLUETOOTH_H': 0,
+ 'HAVE_BROKEN_MBSTOWCS': 0,
+ 'HAVE_BROKEN_NICE': 0,
+ 'HAVE_BROKEN_PIPE_BUF': 0,
+ 'HAVE_BROKEN_POLL': 0,
+ 'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
+ 'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
+ 'HAVE_BROKEN_SEM_GETVALUE': 0,
+ 'HAVE_BROKEN_UNSETENV': 0,
+ 'HAVE_BUILTIN_ATOMIC': 1,
+ 'HAVE_CHFLAGS': 0,
+ 'HAVE_CHOWN': 1,
+ 'HAVE_CHROOT': 1,
+ 'HAVE_CLOCK': 1,
+ 'HAVE_CLOCK_GETRES': 1,
+ 'HAVE_CLOCK_GETTIME': 1,
+ 'HAVE_CLOCK_SETTIME': 1,
+ 'HAVE_CLOSE_RANGE': 0,
+ 'HAVE_COMPUTED_GOTOS': 1,
+ 'HAVE_CONFSTR': 1,
+ 'HAVE_CONIO_H': 0,
+ 'HAVE_COPYSIGN': 1,
+ 'HAVE_COPY_FILE_RANGE': 0,
+ 'HAVE_CRYPT_H': 1,
+ 'HAVE_CRYPT_R': 1,
+ 'HAVE_CTERMID': 1,
+ 'HAVE_CTERMID_R': 0,
+ 'HAVE_CURSES_FILTER': 1,
+ 'HAVE_CURSES_H': 1,
+ 'HAVE_CURSES_HAS_KEY': 1,
+ 'HAVE_CURSES_IMMEDOK': 1,
+ 'HAVE_CURSES_IS_PAD': 1,
+ 'HAVE_CURSES_IS_TERM_RESIZED': 1,
+ 'HAVE_CURSES_RESIZETERM': 1,
+ 'HAVE_CURSES_RESIZE_TERM': 1,
+ 'HAVE_CURSES_SYNCOK': 1,
+ 'HAVE_CURSES_TYPEAHEAD': 1,
+ 'HAVE_CURSES_USE_ENV': 1,
+ 'HAVE_CURSES_WCHGAT': 1,
+ 'HAVE_DECL_ISFINITE': 1,
+ 'HAVE_DECL_ISINF': 1,
+ 'HAVE_DECL_ISNAN': 1,
+ 'HAVE_DECL_RTLD_DEEPBIND': 1,
+ 'HAVE_DECL_RTLD_GLOBAL': 1,
+ 'HAVE_DECL_RTLD_LAZY': 1,
+ 'HAVE_DECL_RTLD_LOCAL': 1,
+ 'HAVE_DECL_RTLD_MEMBER': 0,
+ 'HAVE_DECL_RTLD_NODELETE': 1,
+ 'HAVE_DECL_RTLD_NOLOAD': 1,
+ 'HAVE_DECL_RTLD_NOW': 1,
+ 'HAVE_DECL_TZNAME': 0,
+ 'HAVE_DEVICE_MACROS': 1,
+ 'HAVE_DEV_PTC': 0,
+ 'HAVE_DEV_PTMX': 1,
+ 'HAVE_DIRECT_H': 0,
+ 'HAVE_DIRENT_D_TYPE': 1,
+ 'HAVE_DIRENT_H': 1,
+ 'HAVE_DIRFD': 1,
+ 'HAVE_DLFCN_H': 1,
+ 'HAVE_DLOPEN': 1,
+ 'HAVE_DUP2': 1,
+ 'HAVE_DUP3': 1,
+ 'HAVE_DYLD_SHARED_CACHE_CONTAINS_PATH': 0,
+ 'HAVE_DYNAMIC_LOADING': 1,
+ 'HAVE_ENDIAN_H': 1,
+ 'HAVE_EPOLL': 1,
+ 'HAVE_EPOLL_CREATE1': 1,
+ 'HAVE_ERF': 1,
+ 'HAVE_ERFC': 1,
+ 'HAVE_ERRNO_H': 1,
+ 'HAVE_EVENTFD': 1,
+ 'HAVE_EXECV': 1,
+ 'HAVE_EXPLICIT_BZERO': 0,
+ 'HAVE_EXPLICIT_MEMSET': 0,
+ 'HAVE_EXPM1': 1,
+ 'HAVE_FACCESSAT': 1,
+ 'HAVE_FCHDIR': 1,
+ 'HAVE_FCHMOD': 1,
+ 'HAVE_FCHMODAT': 1,
+ 'HAVE_FCHOWN': 1,
+ 'HAVE_FCHOWNAT': 1,
+ 'HAVE_FCNTL_H': 1,
+ 'HAVE_FDATASYNC': 1,
+ 'HAVE_FDOPENDIR': 1,
+ 'HAVE_FDWALK': 0,
+ 'HAVE_FEXECVE': 1,
+ 'HAVE_FINITE': 1,
+ 'HAVE_FLOCK': 1,
+ 'HAVE_FORK': 1,
+ 'HAVE_FORKPTY': 1,
+ 'HAVE_FPATHCONF': 1,
+ 'HAVE_FSEEK64': 0,
+ 'HAVE_FSEEKO': 1,
+ 'HAVE_FSTATAT': 1,
+ 'HAVE_FSTATVFS': 1,
+ 'HAVE_FSYNC': 1,
+ 'HAVE_FTELL64': 0,
+ 'HAVE_FTELLO': 1,
+ 'HAVE_FTIME': 1,
+ 'HAVE_FTRUNCATE': 1,
+ 'HAVE_FUTIMENS': 1,
+ 'HAVE_FUTIMES': 1,
+ 'HAVE_FUTIMESAT': 1,
+ 'HAVE_GAI_STRERROR': 1,
+ 'HAVE_GAMMA': 1,
+ 'HAVE_GCC_ASM_FOR_MC68881': 0,
+ 'HAVE_GCC_ASM_FOR_X64': 1,
+ 'HAVE_GCC_ASM_FOR_X87': 1,
+ 'HAVE_GCC_UINT128_T': 1,
+ 'HAVE_GETADDRINFO': 1,
+ 'HAVE_GETC_UNLOCKED': 1,
+ 'HAVE_GETENTROPY': 0,
+ 'HAVE_GETGRGID_R': 1,
+ 'HAVE_GETGRNAM_R': 1,
+ 'HAVE_GETGROUPLIST': 1,
+ 'HAVE_GETGROUPS': 1,
+ 'HAVE_GETHOSTBYNAME': 0,
+ 'HAVE_GETHOSTBYNAME_R': 1,
+ 'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
+ 'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
+ 'HAVE_GETHOSTBYNAME_R_6_ARG': 1,
+ 'HAVE_GETITIMER': 1,
+ 'HAVE_GETLOADAVG': 1,
+ 'HAVE_GETLOGIN': 1,
+ 'HAVE_GETNAMEINFO': 1,
+ 'HAVE_GETPAGESIZE': 1,
+ 'HAVE_GETPEERNAME': 1,
+ 'HAVE_GETPGID': 1,
+ 'HAVE_GETPGRP': 1,
+ 'HAVE_GETPID': 1,
+ 'HAVE_GETPRIORITY': 1,
+ 'HAVE_GETPWENT': 1,
+ 'HAVE_GETPWNAM_R': 1,
+ 'HAVE_GETPWUID_R': 1,
+ 'HAVE_GETRANDOM': 0,
+ 'HAVE_GETRANDOM_SYSCALL': 1,
+ 'HAVE_GETRESGID': 1,
+ 'HAVE_GETRESUID': 1,
+ 'HAVE_GETSID': 1,
+ 'HAVE_GETSPENT': 1,
+ 'HAVE_GETSPNAM': 1,
+ 'HAVE_GETWD': 1,
+ 'HAVE_GLIBC_MEMMOVE_BUG': 0,
+ 'HAVE_GRP_H': 1,
+ 'HAVE_HSTRERROR': 1,
+ 'HAVE_HTOLE64': 1,
+ 'HAVE_HYPOT': 1,
+ 'HAVE_IEEEFP_H': 0,
+ 'HAVE_IF_NAMEINDEX': 1,
+ 'HAVE_INET_ATON': 1,
+ 'HAVE_INET_PTON': 1,
+ 'HAVE_INITGROUPS': 1,
+ 'HAVE_INTTYPES_H': 1,
+ 'HAVE_IO_H': 0,
+ 'HAVE_IPA_PURE_CONST_BUG': 0,
+ 'HAVE_KILL': 1,
+ 'HAVE_KILLPG': 1,
+ 'HAVE_KQUEUE': 0,
+ 'HAVE_LANGINFO_H': 1,
+ 'HAVE_LARGEFILE_SUPPORT': 0,
+ 'HAVE_LCHFLAGS': 0,
+ 'HAVE_LCHMOD': 0,
+ 'HAVE_LCHOWN': 1,
+ 'HAVE_LGAMMA': 1,
+ 'HAVE_LIBDL': 1,
+ 'HAVE_LIBDLD': 0,
+ 'HAVE_LIBIEEE': 0,
+ 'HAVE_LIBINTL_H': 1,
+ 'HAVE_LIBREADLINE': 1,
+ 'HAVE_LIBRESOLV': 0,
+ 'HAVE_LIBSENDFILE': 0,
+ 'HAVE_LIBUTIL_H': 0,
+ 'HAVE_LIBUUID': 1,
+ 'HAVE_LINK': 1,
+ 'HAVE_LINKAT': 1,
+ 'HAVE_LINUX_AUXVEC_H': 1,
+ 'HAVE_LINUX_CAN_BCM_H': 1,
+ 'HAVE_LINUX_CAN_H': 1,
+ 'HAVE_LINUX_CAN_J1939_H': 0,
+ 'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1,
+ 'HAVE_LINUX_CAN_RAW_H': 1,
+ 'HAVE_LINUX_CAN_RAW_JOIN_FILTERS': 1,
+ 'HAVE_LINUX_MEMFD_H': 1,
+ 'HAVE_LINUX_NETLINK_H': 1,
+ 'HAVE_LINUX_QRTR_H': 0,
+ 'HAVE_LINUX_RANDOM_H': 1,
+ 'HAVE_LINUX_TIPC_H': 1,
+ 'HAVE_LINUX_VM_SOCKETS_H': 1,
+ 'HAVE_LINUX_WAIT_H': 1,
+ 'HAVE_LOCKF': 1,
+ 'HAVE_LOG1P': 1,
+ 'HAVE_LOG2': 1,
+ 'HAVE_LONG_DOUBLE': 1,
+ 'HAVE_LSTAT': 1,
+ 'HAVE_LUTIMES': 1,
+ 'HAVE_MADVISE': 1,
+ 'HAVE_MAKEDEV': 1,
+ 'HAVE_MBRTOWC': 1,
+ 'HAVE_MEMFD_CREATE': 0,
+ 'HAVE_MEMORY_H': 1,
+ 'HAVE_MEMRCHR': 1,
+ 'HAVE_MKDIRAT': 1,
+ 'HAVE_MKFIFO': 1,
+ 'HAVE_MKFIFOAT': 1,
+ 'HAVE_MKNOD': 1,
+ 'HAVE_MKNODAT': 1,
+ 'HAVE_MKTIME': 1,
+ 'HAVE_MMAP': 1,
+ 'HAVE_MREMAP': 1,
+ 'HAVE_NCURSES_H': 1,
+ 'HAVE_NDIR_H': 0,
+ 'HAVE_NETPACKET_PACKET_H': 1,
+ 'HAVE_NET_IF_H': 1,
+ 'HAVE_NICE': 1,
+ 'HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION': 0,
+ 'HAVE_OPENAT': 1,
+ 'HAVE_OPENPTY': 1,
+ 'HAVE_PATHCONF': 1,
+ 'HAVE_PAUSE': 1,
+ 'HAVE_PIPE2': 1,
+ 'HAVE_PLOCK': 0,
+ 'HAVE_POLL': 1,
+ 'HAVE_POLL_H': 1,
+ 'HAVE_POSIX_FADVISE': 1,
+ 'HAVE_POSIX_FALLOCATE': 1,
+ 'HAVE_POSIX_SPAWN': 1,
+ 'HAVE_POSIX_SPAWNP': 1,
+ 'HAVE_PREAD': 1,
+ 'HAVE_PREADV': 1,
+ 'HAVE_PREADV2': 0,
+ 'HAVE_PRLIMIT': 1,
+ 'HAVE_PROCESS_H': 0,
+ 'HAVE_PROTOTYPES': 1,
+ 'HAVE_PTHREAD_CONDATTR_SETCLOCK': 1,
+ 'HAVE_PTHREAD_DESTRUCTOR': 0,
+ 'HAVE_PTHREAD_GETCPUCLOCKID': 1,
+ 'HAVE_PTHREAD_H': 1,
+ 'HAVE_PTHREAD_INIT': 0,
+ 'HAVE_PTHREAD_KILL': 1,
+ 'HAVE_PTHREAD_SIGMASK': 1,
+ 'HAVE_PTY_H': 1,
+ 'HAVE_PWRITE': 1,
+ 'HAVE_PWRITEV': 1,
+ 'HAVE_PWRITEV2': 0,
+ 'HAVE_READLINK': 1,
+ 'HAVE_READLINKAT': 1,
+ 'HAVE_READV': 1,
+ 'HAVE_REALPATH': 1,
+ 'HAVE_RENAMEAT': 1,
+ 'HAVE_RL_APPEND_HISTORY': 1,
+ 'HAVE_RL_CATCH_SIGNAL': 1,
+ 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
+ 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1,
+ 'HAVE_RL_COMPLETION_MATCHES': 1,
+ 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1,
+ 'HAVE_RL_PRE_INPUT_HOOK': 1,
+ 'HAVE_RL_RESIZE_TERMINAL': 1,
+ 'HAVE_ROUND': 1,
+ 'HAVE_RTPSPAWN': 0,
+ 'HAVE_SCHED_GET_PRIORITY_MAX': 1,
+ 'HAVE_SCHED_H': 1,
+ 'HAVE_SCHED_RR_GET_INTERVAL': 1,
+ 'HAVE_SCHED_SETAFFINITY': 1,
+ 'HAVE_SCHED_SETPARAM': 1,
+ 'HAVE_SCHED_SETSCHEDULER': 1,
+ 'HAVE_SEM_CLOCKWAIT': 0,
+ 'HAVE_SEM_GETVALUE': 1,
+ 'HAVE_SEM_OPEN': 1,
+ 'HAVE_SEM_TIMEDWAIT': 1,
+ 'HAVE_SEM_UNLINK': 1,
+ 'HAVE_SENDFILE': 1,
+ 'HAVE_SETEGID': 1,
+ 'HAVE_SETEUID': 1,
+ 'HAVE_SETGID': 1,
+ 'HAVE_SETGROUPS': 1,
+ 'HAVE_SETHOSTNAME': 1,
+ 'HAVE_SETITIMER': 1,
+ 'HAVE_SETLOCALE': 1,
+ 'HAVE_SETPGID': 1,
+ 'HAVE_SETPGRP': 1,
+ 'HAVE_SETPRIORITY': 1,
+ 'HAVE_SETREGID': 1,
+ 'HAVE_SETRESGID': 1,
+ 'HAVE_SETRESUID': 1,
+ 'HAVE_SETREUID': 1,
+ 'HAVE_SETSID': 1,
+ 'HAVE_SETUID': 1,
+ 'HAVE_SETVBUF': 1,
+ 'HAVE_SHADOW_H': 1,
+ 'HAVE_SHM_OPEN': 1,
+ 'HAVE_SHM_UNLINK': 1,
+ 'HAVE_SIGACTION': 1,
+ 'HAVE_SIGALTSTACK': 1,
+ 'HAVE_SIGFILLSET': 1,
+ 'HAVE_SIGINFO_T_SI_BAND': 1,
+ 'HAVE_SIGINTERRUPT': 1,
+ 'HAVE_SIGNAL_H': 1,
+ 'HAVE_SIGPENDING': 1,
+ 'HAVE_SIGRELSE': 1,
+ 'HAVE_SIGTIMEDWAIT': 1,
+ 'HAVE_SIGWAIT': 1,
+ 'HAVE_SIGWAITINFO': 1,
+ 'HAVE_SNPRINTF': 1,
+ 'HAVE_SOCKADDR_ALG': 1,
+ 'HAVE_SOCKADDR_SA_LEN': 0,
+ 'HAVE_SOCKADDR_STORAGE': 1,
+ 'HAVE_SOCKETPAIR': 1,
+ 'HAVE_SPAWN_H': 1,
+ 'HAVE_SPLICE': 1,
+ 'HAVE_SSIZE_T': 1,
+ 'HAVE_STATVFS': 1,
+ 'HAVE_STAT_TV_NSEC': 1,
+ 'HAVE_STAT_TV_NSEC2': 0,
+ 'HAVE_STDARG_PROTOTYPES': 1,
+ 'HAVE_STDINT_H': 1,
+ 'HAVE_STDLIB_H': 1,
+ 'HAVE_STD_ATOMIC': 1,
+ 'HAVE_STRFTIME': 1,
+ 'HAVE_STRINGS_H': 1,
+ 'HAVE_STRING_H': 1,
+ 'HAVE_STRLCPY': 0,
+ 'HAVE_STROPTS_H': 0,
+ 'HAVE_STRSIGNAL': 1,
+ 'HAVE_STRUCT_PASSWD_PW_GECOS': 1,
+ 'HAVE_STRUCT_PASSWD_PW_PASSWD': 1,
+ 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0,
+ 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
+ 'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
+ 'HAVE_STRUCT_STAT_ST_FLAGS': 0,
+ 'HAVE_STRUCT_STAT_ST_GEN': 0,
+ 'HAVE_STRUCT_STAT_ST_RDEV': 1,
+ 'HAVE_STRUCT_TM_TM_ZONE': 1,
+ 'HAVE_SYMLINK': 1,
+ 'HAVE_SYMLINKAT': 1,
+ 'HAVE_SYNC': 1,
+ 'HAVE_SYSCONF': 1,
+ 'HAVE_SYSEXITS_H': 1,
+ 'HAVE_SYS_AUDIOIO_H': 0,
+ 'HAVE_SYS_AUXV_H': 1,
+ 'HAVE_SYS_BSDTTY_H': 0,
+ 'HAVE_SYS_DEVPOLL_H': 0,
+ 'HAVE_SYS_DIR_H': 0,
+ 'HAVE_SYS_ENDIAN_H': 0,
+ 'HAVE_SYS_EPOLL_H': 1,
+ 'HAVE_SYS_EVENTFD_H': 1,
+ 'HAVE_SYS_EVENT_H': 0,
+ 'HAVE_SYS_FILE_H': 1,
+ 'HAVE_SYS_IOCTL_H': 1,
+ 'HAVE_SYS_KERN_CONTROL_H': 0,
+ 'HAVE_SYS_LOADAVG_H': 0,
+ 'HAVE_SYS_LOCK_H': 0,
+ 'HAVE_SYS_MEMFD_H': 0,
+ 'HAVE_SYS_MKDEV_H': 0,
+ 'HAVE_SYS_MMAN_H': 1,
+ 'HAVE_SYS_MODEM_H': 0,
+ 'HAVE_SYS_NDIR_H': 0,
+ 'HAVE_SYS_PARAM_H': 1,
+ 'HAVE_SYS_POLL_H': 1,
+ 'HAVE_SYS_RANDOM_H': 0,
+ 'HAVE_SYS_RESOURCE_H': 1,
+ 'HAVE_SYS_SELECT_H': 1,
+ 'HAVE_SYS_SENDFILE_H': 1,
+ 'HAVE_SYS_SOCKET_H': 1,
+ 'HAVE_SYS_STATVFS_H': 1,
+ 'HAVE_SYS_STAT_H': 1,
+ 'HAVE_SYS_SYSCALL_H': 1,
+ 'HAVE_SYS_SYSMACROS_H': 1,
+ 'HAVE_SYS_SYS_DOMAIN_H': 0,
+ 'HAVE_SYS_TERMIO_H': 0,
+ 'HAVE_SYS_TIMES_H': 1,
+ 'HAVE_SYS_TIME_H': 1,
+ 'HAVE_SYS_TYPES_H': 1,
+ 'HAVE_SYS_UIO_H': 1,
+ 'HAVE_SYS_UN_H': 1,
+ 'HAVE_SYS_UTSNAME_H': 1,
+ 'HAVE_SYS_WAIT_H': 1,
+ 'HAVE_SYS_XATTR_H': 1,
+ 'HAVE_TCGETPGRP': 1,
+ 'HAVE_TCSETPGRP': 1,
+ 'HAVE_TEMPNAM': 1,
+ 'HAVE_TERMIOS_H': 1,
+ 'HAVE_TERM_H': 1,
+ 'HAVE_TGAMMA': 1,
+ 'HAVE_TIMEGM': 1,
+ 'HAVE_TIMES': 1,
+ 'HAVE_TMPFILE': 1,
+ 'HAVE_TMPNAM': 1,
+ 'HAVE_TMPNAM_R': 1,
+ 'HAVE_TM_ZONE': 1,
+ 'HAVE_TRUNCATE': 1,
+ 'HAVE_TZNAME': 0,
+ 'HAVE_UCS4_TCL': 0,
+ 'HAVE_UNAME': 1,
+ 'HAVE_UNISTD_H': 1,
+ 'HAVE_UNLINKAT': 1,
+ 'HAVE_USABLE_WCHAR_T': 0,
+ 'HAVE_UTIL_H': 0,
+ 'HAVE_UTIMENSAT': 1,
+ 'HAVE_UTIMES': 1,
+ 'HAVE_UTIME_H': 1,
+ 'HAVE_UUID_CREATE': 0,
+ 'HAVE_UUID_ENC_BE': 0,
+ 'HAVE_UUID_GENERATE_TIME_SAFE': 1,
+ 'HAVE_UUID_H': 1,
+ 'HAVE_UUID_UUID_H': 1,
+ 'HAVE_VFORK': 1,
+ 'HAVE_WAIT3': 1,
+ 'HAVE_WAIT4': 1,
+ 'HAVE_WAITID': 1,
+ 'HAVE_WAITPID': 1,
+ 'HAVE_WCHAR_H': 1,
+ 'HAVE_WCSCOLL': 1,
+ 'HAVE_WCSFTIME': 1,
+ 'HAVE_WCSXFRM': 1,
+ 'HAVE_WMEMCMP': 1,
+ 'HAVE_WORKING_TZSET': 1,
+ 'HAVE_WRITEV': 1,
+ 'HAVE_ZLIB_COPY': 1,
+ 'HAVE__GETPTY': 0,
+ 'HOST_GNU_TYPE': 'x86_64-conda-linux-gnu',
+ 'INCLDIRSTOMAKE': '/root/envs/evalkit_cambrian/include '
+ '/root/envs/evalkit_cambrian/include '
+ '/root/envs/evalkit_cambrian/include/python3.10 '
+ '/root/envs/evalkit_cambrian/include/python3.10',
+ 'INCLUDEDIR': '/root/envs/evalkit_cambrian/include',
+ 'INCLUDEPY': '/root/envs/evalkit_cambrian/include/python3.10',
+ 'INSTALL': '/usr/bin/install -c',
+ 'INSTALL_DATA': '/usr/bin/install -c -m 644',
+ 'INSTALL_PROGRAM': '/usr/bin/install -c',
+ 'INSTALL_SCRIPT': '/usr/bin/install -c',
+ 'INSTALL_SHARED': '/usr/bin/install -c -m 755',
+ 'INSTSONAME': 'libpython3.10.a',
+ 'IO_H': 'Modules/_io/_iomodule.h',
+ 'IO_OBJS': '\\',
+ 'LDCXXSHARED': 'x86_64-conda-linux-gnu-c++ -pthread -shared',
+ 'LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now '
+ '-Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now '
+ '-Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib',
+ 'LDLIBRARY': 'libpython3.10.a',
+ 'LDLIBRARYDIR': '',
+ 'LDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 '
+ '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now '
+ '-Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib',
+ 'LDVERSION': '3.10',
+ 'LIBC': '',
+ 'LIBDEST': '/root/envs/evalkit_cambrian/lib/python3.10',
+ 'LIBDIR': '/root/envs/evalkit_cambrian/lib',
+ 'LIBFFI_INCLUDEDIR': '/root/envs/evalkit_cambrian/include',
+ 'LIBM': '-lm',
+ 'LIBOBJDIR': 'Python/',
+ 'LIBOBJS': '',
+ 'LIBPC': '/root/envs/evalkit_cambrian/lib/pkgconfig',
+ 'LIBPL': '/root/envs/evalkit_cambrian/lib/python3.10/config-3.10-x86_64-linux-gnu',
+ 'LIBPYTHON': '',
+ 'LIBRARY': 'libpython3.10.a',
+ 'LIBRARY_DEPS': 'libpython3.10.a',
+ 'LIBRARY_OBJS': '\\',
+ 'LIBRARY_OBJS_OMIT_FROZEN': '\\',
+ 'LIBS': '-lcrypt -lpthread -ldl -lutil -lm',
+ 'LIBSUBDIRS': 'asyncio \\',
+ 'LINKCC': 'x86_64-conda-linux-gnu-gcc -pthread',
+ 'LINKFORSHARED': '-Xlinker -export-dynamic',
+ 'LIPO_32BIT_FLAGS': '',
+ 'LIPO_INTEL64_FLAGS': '',
+ 'LLVM_PROF_ERR': 'no',
+ 'LLVM_PROF_FILE': '',
+ 'LLVM_PROF_MERGER': 'true',
+ 'LN': 'ln',
+ 'LOCALMODLIBS': '',
+ 'MACHDEP': 'linux',
+ 'MACHDEP_OBJS': '',
+ 'MACHDESTLIB': '/root/envs/evalkit_cambrian/lib/python3.10',
+ 'MACOSX_DEPLOYMENT_TARGET': '',
+ 'MAINCC': 'x86_64-conda-linux-gnu-gcc -pthread',
+ 'MAJOR_IN_MKDEV': 0,
+ 'MAJOR_IN_SYSMACROS': 0,
+ 'MAKESETUP': '/croot/python-split_1733933809325/work/Modules/makesetup',
+ 'MANDIR': '/root/envs/evalkit_cambrian/share/man',
+ 'MKDIR_P': '/usr/bin/mkdir -p',
+ 'MODBUILT_NAMES': 'posix errno pwd _sre _codecs _weakref _functools '
+ '_operator _collections _abc itertools atexit _signal '
+ '_stat time _thread _locale _io faulthandler '
+ '_tracemalloc _symtable xxsubtype',
+ 'MODDISABLED_NAMES': '',
+ 'MODLIBS': '',
+ 'MODOBJS': 'Modules/posixmodule.o Modules/errnomodule.o '
+ 'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o '
+ 'Modules/_weakref.o Modules/_functoolsmodule.o '
+ 'Modules/_operator.o Modules/_collectionsmodule.o '
+ 'Modules/_abc.o Modules/itertoolsmodule.o '
+ 'Modules/atexitmodule.o Modules/signalmodule.o Modules/_stat.o '
+ 'Modules/timemodule.o Modules/_threadmodule.o '
+ 'Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o '
+ 'Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o '
+ 'Modules/textio.o Modules/stringio.o Modules/faulthandler.o '
+ 'Modules/_tracemalloc.o Modules/symtablemodule.o '
+ 'Modules/xxsubtype.o',
+ 'MODULE_OBJS': '\\',
+ 'MULTIARCH': 'x86_64-linux-gnu',
+ 'MULTIARCH_CPPFLAGS': '-DMULTIARCH=\\"x86_64-linux-gnu\\"',
+ 'MVWDELCH_IS_EXPRESSION': 1,
+ 'NO_AS_NEEDED': '-Wl,--no-as-needed',
+ 'OBJECT_OBJS': '\\',
+ 'OPENSSL_INCLUDES': '-I/root/envs/evalkit_cambrian/include',
+ 'OPENSSL_LDFLAGS': '-L/root/envs/evalkit_cambrian/lib',
+ 'OPENSSL_LIBS': '-lssl -lcrypto',
+ 'OPENSSL_RPATH': '',
+ 'OPT': '-DNDEBUG -fwrapv -O2 -Wall',
+ 'OTHER_LIBTOOL_OPT': '',
+ 'PACKAGE_BUGREPORT': 0,
+ 'PACKAGE_NAME': 0,
+ 'PACKAGE_STRING': 0,
+ 'PACKAGE_TARNAME': 0,
+ 'PACKAGE_URL': 0,
+ 'PACKAGE_VERSION': 0,
+ 'PARSER_HEADERS': '\\',
+ 'PARSER_OBJS': '\\ \\ Parser/myreadline.o Parser/tokenizer.o',
+ 'PEGEN_HEADERS': '\\',
+ 'PEGEN_OBJS': '\\',
+ 'PGO_PROF_GEN_FLAG': '-fprofile-generate',
+ 'PGO_PROF_USE_FLAG': ' ',
+ 'PLATLIBDIR': 'lib',
+ 'POBJS': '\\',
+ 'POSIX_SEMAPHORES_NOT_ENABLED': 0,
+ 'PROFILE_TASK': '-m test --pgo',
+ 'PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT': 1,
+ 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1,
+ 'PURIFY': '',
+ 'PY3LIBRARY': '',
+ 'PYLONG_BITS_IN_DIGIT': 0,
+ 'PYTHON': 'python',
+ 'PYTHONFRAMEWORK': '',
+ 'PYTHONFRAMEWORKDIR': 'no-framework',
+ 'PYTHONFRAMEWORKINSTALLDIR': '',
+ 'PYTHONFRAMEWORKPREFIX': '',
+ 'PYTHONPATH': '',
+ 'PYTHON_FOR_BUILD': './python -E',
+ 'PYTHON_FOR_REGEN': '',
+ 'PYTHON_HEADERS': '\\',
+ 'PYTHON_OBJS': '\\',
+ 'PY_BUILD_ENVIRON': '',
+ 'PY_BUILTIN_HASHLIB_HASHES': '"md5,sha1,sha256,sha512,sha3,blake2"',
+ 'PY_BUILTIN_MODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG '
+ '-fwrapv -O2 -Wall -march=nocona -mtune=haswell '
+ '-ftree-vectorize -fPIC -fstack-protector-strong '
+ '-fno-plt -O2 -ffunction-sections -pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' -march=nocona '
+ '-mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 '
+ '-ffunction-sections -pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' '
+ '-fno-semantic-interposition '
+ ' '
+ ' -g -std=c99 -Wextra '
+ '-Wno-unused-result -Wno-unused-parameter '
+ '-Wno-missing-field-initializers '
+ '-Werror=implicit-function-declaration '
+ '-fvisibility=hidden '
+ ' '
+ '-I/croot/python-split_1733933809325/work/Include/internal '
+ '-IObjects -IInclude -IPython -I. '
+ '-I/croot/python-split_1733933809325/work/Include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DPy_BUILD_CORE_BUILTIN',
+ 'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall '
+ '-march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe '
+ '-isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe '
+ '-isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ '',
+ 'PY_CFLAGS_NODIST': '-fno-semantic-interposition '
+ ' -g -std=c99 '
+ '-Wextra -Wno-unused-result -Wno-unused-parameter '
+ '-Wno-missing-field-initializers '
+ '-Werror=implicit-function-declaration '
+ '-fvisibility=hidden '
+ '-I/croot/python-split_1733933809325/work/Include/internal',
+ 'PY_COERCE_C_LOCALE': 1,
+ 'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 '
+ '-Wall -march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections '
+ '-pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 -ffunction-sections '
+ '-pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' -fno-semantic-interposition '
+ ' '
+ '-g -std=c99 -Wextra -Wno-unused-result '
+ '-Wno-unused-parameter -Wno-missing-field-initializers '
+ '-Werror=implicit-function-declaration -fvisibility=hidden '
+ ' '
+ '-I/croot/python-split_1733933809325/work/Include/internal '
+ '-IObjects -IInclude -IPython -I. '
+ '-I/croot/python-split_1733933809325/work/Include -DNDEBUG '
+ '-D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DPy_BUILD_CORE',
+ 'PY_CORE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-fno-semantic-interposition '
+ ' -g',
+ 'PY_CPPFLAGS': '-IObjects -IInclude -IPython -I. '
+ '-I/croot/python-split_1733933809325/work/Include -DNDEBUG '
+ '-D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include',
+ 'PY_ENABLE_SHARED': 0,
+ 'PY_FORMAT_SIZE_T': '"z"',
+ 'PY_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib '
+ '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro '
+ '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections '
+ '-Wl,-rpath,/root/envs/evalkit_cambrian/lib '
+ '-Wl,-rpath-link,/root/envs/evalkit_cambrian/lib '
+ '-L/root/envs/evalkit_cambrian/lib',
+ 'PY_LDFLAGS_NODIST': '-fno-semantic-interposition '
+ ' -g',
+ 'PY_SSL_DEFAULT_CIPHERS': 1,
+ 'PY_SSL_DEFAULT_CIPHER_STRING': 0,
+ 'PY_STDMODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv '
+ '-O2 -Wall -march=nocona -mtune=haswell '
+ '-ftree-vectorize -fPIC -fstack-protector-strong '
+ '-fno-plt -O2 -ffunction-sections -pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' -march=nocona '
+ '-mtune=haswell -ftree-vectorize -fPIC '
+ '-fstack-protector-strong -fno-plt -O2 '
+ '-ffunction-sections -pipe -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 '
+ '-fdebug-prefix-map=/root/envs/evalkit_cambrian=/usr/local/src/conda-prefix '
+ ' '
+ ' '
+ '-fno-semantic-interposition '
+ ' -g -std=c99 '
+ '-Wextra -Wno-unused-result -Wno-unused-parameter '
+ '-Wno-missing-field-initializers '
+ '-Werror=implicit-function-declaration '
+ '-fvisibility=hidden '
+ ' '
+ '-I/croot/python-split_1733933809325/work/Include/internal '
+ '-IObjects -IInclude -IPython -I. '
+ '-I/croot/python-split_1733933809325/work/Include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include '
+ '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem '
+ '/root/envs/evalkit_cambrian/include '
+ '-I/root/envs/evalkit_cambrian/include',
+ 'Py_DEBUG': 0,
+ 'Py_ENABLE_SHARED': 0,
+ 'Py_HASH_ALGORITHM': 0,
+ 'Py_TRACE_REFS': 0,
+ 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\',
+ 'READELF': 'x86_64-conda-linux-gnu-readelf',
+ 'RESSRCDIR': 'Mac/Resources/framework',
+ 'RETSIGTYPE': 'void',
+ 'RUNSHARED': '',
+ 'SCRIPTDIR': '/root/envs/evalkit_cambrian/lib',
+ 'SETPGRP_HAVE_ARG': 0,
+ 'SHELL': '/bin/sh',
+ 'SHLIBS': '-lcrypt -lpthread -ldl -lutil -lm',
+ 'SHLIB_SUFFIX': '.so',
+ 'SHM_NEEDS_LIBRT': 1,
+ 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0,
+ 'SITEPATH': '',
+ 'SIZEOF_DOUBLE': 8,
+ 'SIZEOF_FLOAT': 4,
+ 'SIZEOF_FPOS_T': 16,
+ 'SIZEOF_INT': 4,
+ 'SIZEOF_LONG': 8,
+ 'SIZEOF_LONG_DOUBLE': 16,
+ 'SIZEOF_LONG_LONG': 8,
+ 'SIZEOF_OFF_T': 8,
+ 'SIZEOF_PID_T': 4,
+ 'SIZEOF_PTHREAD_KEY_T': 4,
+ 'SIZEOF_PTHREAD_T': 8,
+ 'SIZEOF_SHORT': 2,
+ 'SIZEOF_SIZE_T': 8,
+ 'SIZEOF_TIME_T': 8,
+ 'SIZEOF_UINTPTR_T': 8,
+ 'SIZEOF_VOID_P': 8,
+ 'SIZEOF_WCHAR_T': 4,
+ 'SIZEOF__BOOL': 1,
+ 'SOABI': 'cpython-310-x86_64-linux-gnu',
+ 'SRCDIRS': 'Parser Objects Python Modules Modules/_io Programs',
+ 'SRC_GDB_HOOKS': '/croot/python-split_1733933809325/work/Tools/gdb/libpython.py',
+ 'STATIC_LIBPYTHON': 1,
+ 'STDC_HEADERS': 1,
+ 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
+ 'STRIPFLAG': '-s',
+ 'SUBDIRS': '',
+ 'SUBDIRSTOO': 'Include Lib Misc',
+ 'SYSLIBS': '-lm',
+ 'SYS_SELECT_WITH_SYS_TIME': 1,
+ 'TCLTK_INCLUDES': '-I/root/envs/evalkit_cambrian/include',
+ 'TCLTK_LIBS': '-L/root/envs/evalkit_cambrian/lib '
+ '-ltcl8.6 -ltk8.6',
+ 'TESTOPTS': '',
+ 'TESTPATH': '',
+ 'TESTPYTHON': './python',
+ 'TESTPYTHONOPTS': '',
+ 'TESTRUNNER': './python '
+ '/croot/python-split_1733933809325/work/Tools/scripts/run_tests.py',
+ 'TESTSUBDIRS': 'ctypes/test \\',
+ 'TESTTIMEOUT': 1200,
+ 'TEST_MODULES': 'yes',
+ 'THREAD_STACK_SIZE': 0,
+ 'TIMEMODULE_LIB': 0,
+ 'TIME_WITH_SYS_TIME': 1,
+ 'TM_IN_SYS_TIME': 0,
+ 'TZPATH': '/root/envs/evalkit_cambrian/share/zoneinfo',
+ 'UNICODE_DEPS': '\\',
+ 'UNIVERSALSDK': '',
+ 'UPDATE_FILE': '/croot/python-split_1733933809325/work/Tools/scripts/update_file.py',
+ 'USE_COMPUTED_GOTOS': 1,
+ 'VERSION': '3.10',
+ 'VPATH': '/croot/python-split_1733933809325/work',
+ 'WHEEL_PKG_DIR': '',
+ 'WINDOW_HAS_FLAGS': 1,
+ 'WITH_DECIMAL_CONTEXTVAR': 1,
+ 'WITH_DOC_STRINGS': 1,
+ 'WITH_DTRACE': 0,
+ 'WITH_DYLD': 0,
+ 'WITH_EDITLINE': 0,
+ 'WITH_LIBINTL': 0,
+ 'WITH_NEXT_FRAMEWORK': 0,
+ 'WITH_PYMALLOC': 1,
+ 'WITH_VALGRIND': 0,
+ 'X87_DOUBLE_ROUNDING': 0,
+ 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
+ 'abs_builddir': '/croot/python-split_1733933809325/work/build-static',
+ 'abs_srcdir': '/croot/python-split_1733933809325/work',
+ 'datarootdir': '/root/envs/evalkit_cambrian/share',
+ 'exec_prefix': '/root/envs/evalkit_cambrian',
+ 'prefix': '/root/envs/evalkit_cambrian',
+ 'srcdir': '/croot/python-split_1733933809325/work'}
diff --git a/evalkit_cambrian/lib/python3.10/_threading_local.py b/evalkit_cambrian/lib/python3.10/_threading_local.py
new file mode 100644
index 0000000000000000000000000000000000000000..b006d76c4e23df7dbf09bc7e668b9eb87e4044af
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/_threading_local.py
@@ -0,0 +1,242 @@
+"""Thread-local objects.
+
+(Note that this module provides a Python version of the threading.local
+ class. Depending on the version of Python you're using, there may be a
+ faster one available. You should always import the `local` class from
+ `threading`.)
+
+Thread-local objects support the management of thread-local data.
+If you have data that you want to be local to a thread, simply create
+a thread-local object and use its attributes:
+
+ >>> mydata = local()
+ >>> mydata.number = 42
+ >>> mydata.number
+ 42
+
+You can also access the local-object's dictionary:
+
+ >>> mydata.__dict__
+ {'number': 42}
+ >>> mydata.__dict__.setdefault('widgets', [])
+ []
+ >>> mydata.widgets
+ []
+
+What's important about thread-local objects is that their data are
+local to a thread. If we access the data in a different thread:
+
+ >>> log = []
+ >>> def f():
+ ... items = sorted(mydata.__dict__.items())
+ ... log.append(items)
+ ... mydata.number = 11
+ ... log.append(mydata.number)
+
+ >>> import threading
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+ >>> log
+ [[], 11]
+
+we get different data. Furthermore, changes made in the other thread
+don't affect data seen in this thread:
+
+ >>> mydata.number
+ 42
+
+Of course, values you get from a local object, including a __dict__
+attribute, are for whatever thread was current at the time the
+attribute was read. For that reason, you generally don't want to save
+these values across threads, as they apply only to the thread they
+came from.
+
+You can create custom local objects by subclassing the local class:
+
+ >>> class MyLocal(local):
+ ... number = 2
+ ... def __init__(self, /, **kw):
+ ... self.__dict__.update(kw)
+ ... def squared(self):
+ ... return self.number ** 2
+
+This can be useful to support default values, methods and
+initialization. Note that if you define an __init__ method, it will be
+called each time the local object is used in a separate thread. This
+is necessary to initialize each thread's dictionary.
+
+Now if we create a local object:
+
+ >>> mydata = MyLocal(color='red')
+
+Now we have a default number:
+
+ >>> mydata.number
+ 2
+
+an initial color:
+
+ >>> mydata.color
+ 'red'
+ >>> del mydata.color
+
+And a method that operates on the data:
+
+ >>> mydata.squared()
+ 4
+
+As before, we can access the data in a separate thread:
+
+ >>> log = []
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+ >>> log
+ [[('color', 'red')], 11]
+
+without affecting this thread's data:
+
+ >>> mydata.number
+ 2
+ >>> mydata.color
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'MyLocal' object has no attribute 'color'
+
+Note that subclasses can define slots, but they are not thread
+local. They are shared across threads:
+
+ >>> class MyLocal(local):
+ ... __slots__ = 'number'
+
+ >>> mydata = MyLocal()
+ >>> mydata.number = 42
+ >>> mydata.color = 'red'
+
+So, the separate thread:
+
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+
+affects what we see:
+
+ >>> mydata.number
+ 11
+
+>>> del mydata
+"""
+
+from weakref import ref
+from contextlib import contextmanager
+
+__all__ = ["local"]
+
+# We need to use objects from the threading module, but the threading
+# module may also want to use our `local` class, if support for locals
+# isn't compiled in to the `thread` module. This creates potential problems
+# with circular imports. For that reason, we don't import `threading`
+# until the bottom of this file (a hack sufficient to worm around the
+# potential problems). Note that all platforms on CPython do have support
+# for locals in the `thread` module, and there is no circular import problem
+# then, so problems introduced by fiddling the order of imports here won't
+# manifest.
+
+class _localimpl:
+ """A class managing thread-local dicts"""
+ __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
+
+ def __init__(self):
+ # The key used in the Thread objects' attribute dicts.
+ # We keep it a string for speed but make it unlikely to clash with
+ # a "real" attribute.
+ self.key = '_threading_local._localimpl.' + str(id(self))
+ # { id(Thread) -> (ref(Thread), thread-local dict) }
+ self.dicts = {}
+
+ def get_dict(self):
+ """Return the dict for the current thread. Raises KeyError if none
+ defined."""
+ thread = current_thread()
+ return self.dicts[id(thread)][1]
+
+ def create_dict(self):
+ """Create a new dict for the current thread, and return it."""
+ localdict = {}
+ key = self.key
+ thread = current_thread()
+ idt = id(thread)
+ def local_deleted(_, key=key):
+ # When the localimpl is deleted, remove the thread attribute.
+ thread = wrthread()
+ if thread is not None:
+ del thread.__dict__[key]
+ def thread_deleted(_, idt=idt):
+ # When the thread is deleted, remove the local dict.
+ # Note that this is suboptimal if the thread object gets
+ # caught in a reference loop. We would like to be called
+ # as soon as the OS-level thread ends instead.
+ local = wrlocal()
+ if local is not None:
+ dct = local.dicts.pop(idt)
+ wrlocal = ref(self, local_deleted)
+ wrthread = ref(thread, thread_deleted)
+ thread.__dict__[key] = wrlocal
+ self.dicts[idt] = wrthread, localdict
+ return localdict
+
+
+@contextmanager
+def _patch(self):
+ impl = object.__getattribute__(self, '_local__impl')
+ try:
+ dct = impl.get_dict()
+ except KeyError:
+ dct = impl.create_dict()
+ args, kw = impl.localargs
+ self.__init__(*args, **kw)
+ with impl.locallock:
+ object.__setattr__(self, '__dict__', dct)
+ yield
+
+
+class local:
+ __slots__ = '_local__impl', '__dict__'
+
+ def __new__(cls, /, *args, **kw):
+ if (args or kw) and (cls.__init__ is object.__init__):
+ raise TypeError("Initialization arguments are not supported")
+ self = object.__new__(cls)
+ impl = _localimpl()
+ impl.localargs = (args, kw)
+ impl.locallock = RLock()
+ object.__setattr__(self, '_local__impl', impl)
+ # We need to create the thread dict in anticipation of
+ # __init__ being called, to make sure we don't call it
+ # again ourselves.
+ impl.create_dict()
+ return self
+
+ def __getattribute__(self, name):
+ with _patch(self):
+ return object.__getattribute__(self, name)
+
+ def __setattr__(self, name, value):
+ if name == '__dict__':
+ raise AttributeError(
+ "%r object attribute '__dict__' is read-only"
+ % self.__class__.__name__)
+ with _patch(self):
+ return object.__setattr__(self, name, value)
+
+ def __delattr__(self, name):
+ if name == '__dict__':
+ raise AttributeError(
+ "%r object attribute '__dict__' is read-only"
+ % self.__class__.__name__)
+ with _patch(self):
+ return object.__delattr__(self, name)
+
+
+from threading import current_thread, RLock
diff --git a/evalkit_cambrian/lib/python3.10/abc.py b/evalkit_cambrian/lib/python3.10/abc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c552cebb4226c02bd6f741c5446623a0cd6a7ed
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/abc.py
@@ -0,0 +1,188 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Abstract Base Classes (ABCs) according to PEP 3119."""
+
+
+def abstractmethod(funcobj):
+ """A decorator indicating abstract methods.
+
+ Requires that the metaclass is ABCMeta or derived from it. A
+ class that has a metaclass derived from ABCMeta cannot be
+ instantiated unless all of its abstract methods are overridden.
+ The abstract methods can be called using any of the normal
+ 'super' call mechanisms. abstractmethod() may be used to declare
+ abstract methods for properties and descriptors.
+
+ Usage:
+
+ class C(metaclass=ABCMeta):
+ @abstractmethod
+ def my_abstract_method(self, ...):
+ ...
+ """
+ funcobj.__isabstractmethod__ = True
+ return funcobj
+
+
+class abstractclassmethod(classmethod):
+ """A decorator indicating abstract classmethods.
+
+ Deprecated, use 'classmethod' with 'abstractmethod' instead:
+
+ class C(ABC):
+ @classmethod
+ @abstractmethod
+ def my_abstract_classmethod(cls, ...):
+ ...
+
+ """
+
+ __isabstractmethod__ = True
+
+ def __init__(self, callable):
+ callable.__isabstractmethod__ = True
+ super().__init__(callable)
+
+
+class abstractstaticmethod(staticmethod):
+ """A decorator indicating abstract staticmethods.
+
+ Deprecated, use 'staticmethod' with 'abstractmethod' instead:
+
+ class C(ABC):
+ @staticmethod
+ @abstractmethod
+ def my_abstract_staticmethod(...):
+ ...
+
+ """
+
+ __isabstractmethod__ = True
+
+ def __init__(self, callable):
+ callable.__isabstractmethod__ = True
+ super().__init__(callable)
+
+
+class abstractproperty(property):
+ """A decorator indicating abstract properties.
+
+ Deprecated, use 'property' with 'abstractmethod' instead:
+
+ class C(ABC):
+ @property
+ @abstractmethod
+ def my_abstract_property(self):
+ ...
+
+ """
+
+ __isabstractmethod__ = True
+
+
+try:
+ from _abc import (get_cache_token, _abc_init, _abc_register,
+ _abc_instancecheck, _abc_subclasscheck, _get_dump,
+ _reset_registry, _reset_caches)
+except ImportError:
+ from _py_abc import ABCMeta, get_cache_token
+ ABCMeta.__module__ = 'abc'
+else:
+ class ABCMeta(type):
+ """Metaclass for defining Abstract Base Classes (ABCs).
+
+ Use this metaclass to create an ABC. An ABC can be subclassed
+ directly, and then acts as a mix-in class. You can also register
+ unrelated concrete classes (even built-in classes) and unrelated
+ ABCs as 'virtual subclasses' -- these and their descendants will
+ be considered subclasses of the registering ABC by the built-in
+ issubclass() function, but the registering ABC won't show up in
+ their MRO (Method Resolution Order) nor will method
+ implementations defined by the registering ABC be callable (not
+ even via super()).
+ """
+ def __new__(mcls, name, bases, namespace, **kwargs):
+ cls = super().__new__(mcls, name, bases, namespace, **kwargs)
+ _abc_init(cls)
+ return cls
+
+ def register(cls, subclass):
+ """Register a virtual subclass of an ABC.
+
+ Returns the subclass, to allow usage as a class decorator.
+ """
+ return _abc_register(cls, subclass)
+
+ def __instancecheck__(cls, instance):
+ """Override for isinstance(instance, cls)."""
+ return _abc_instancecheck(cls, instance)
+
+ def __subclasscheck__(cls, subclass):
+ """Override for issubclass(subclass, cls)."""
+ return _abc_subclasscheck(cls, subclass)
+
+ def _dump_registry(cls, file=None):
+ """Debug helper to print the ABC registry."""
+ print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file)
+ print(f"Inv. counter: {get_cache_token()}", file=file)
+ (_abc_registry, _abc_cache, _abc_negative_cache,
+ _abc_negative_cache_version) = _get_dump(cls)
+ print(f"_abc_registry: {_abc_registry!r}", file=file)
+ print(f"_abc_cache: {_abc_cache!r}", file=file)
+ print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file)
+ print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}",
+ file=file)
+
+ def _abc_registry_clear(cls):
+ """Clear the registry (for debugging or testing)."""
+ _reset_registry(cls)
+
+ def _abc_caches_clear(cls):
+ """Clear the caches (for debugging or testing)."""
+ _reset_caches(cls)
+
+
+def update_abstractmethods(cls):
+ """Recalculate the set of abstract methods of an abstract class.
+
+ If a class has had one of its abstract methods implemented after the
+ class was created, the method will not be considered implemented until
+ this function is called. Alternatively, if a new abstract method has been
+ added to the class, it will only be considered an abstract method of the
+ class after this function is called.
+
+ This function should be called before any use is made of the class,
+ usually in class decorators that add methods to the subject class.
+
+ Returns cls, to allow usage as a class decorator.
+
+ If cls is not an instance of ABCMeta, does nothing.
+ """
+ if not hasattr(cls, '__abstractmethods__'):
+ # We check for __abstractmethods__ here because cls might by a C
+ # implementation or a python implementation (especially during
+ # testing), and we want to handle both cases.
+ return cls
+
+ abstracts = set()
+ # Check the existing abstract methods of the parents, keep only the ones
+ # that are not implemented.
+ for scls in cls.__bases__:
+ for name in getattr(scls, '__abstractmethods__', ()):
+ value = getattr(cls, name, None)
+ if getattr(value, "__isabstractmethod__", False):
+ abstracts.add(name)
+ # Also add any other newly added abstract methods.
+ for name, value in cls.__dict__.items():
+ if getattr(value, "__isabstractmethod__", False):
+ abstracts.add(name)
+ cls.__abstractmethods__ = frozenset(abstracts)
+ return cls
+
+
+class ABC(metaclass=ABCMeta):
+ """Helper class that provides a standard way to create an ABC using
+ inheritance.
+ """
+ __slots__ = ()
diff --git a/evalkit_cambrian/lib/python3.10/ast.py b/evalkit_cambrian/lib/python3.10/ast.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f5f9827146dfd8731196121970b21724102c372
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/ast.py
@@ -0,0 +1,1709 @@
+"""
+ ast
+ ~~~
+
+ The `ast` module helps Python applications to process trees of the Python
+ abstract syntax grammar. The abstract syntax itself might change with
+ each Python release; this module helps to find out programmatically what
+ the current grammar looks like and allows modifications of it.
+
+ An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
+ a flag to the `compile()` builtin function or by using the `parse()`
+ function from this module. The result will be a tree of objects whose
+ classes all inherit from `ast.AST`.
+
+ A modified abstract syntax tree can be compiled into a Python code object
+ using the built-in `compile()` function.
+
+ Additionally various helper functions are provided that make working with
+ the trees simpler. The main intention of the helper functions and this
+ module in general is to provide an easy to use interface for libraries
+ that work tightly with the python syntax (template engines for example).
+
+
+ :copyright: Copyright 2008 by Armin Ronacher.
+ :license: Python License.
+"""
+import sys
+from _ast import *
+from contextlib import contextmanager, nullcontext
+from enum import IntEnum, auto
+
+
+def parse(source, filename='', mode='exec', *,
+ type_comments=False, feature_version=None):
+ """
+ Parse the source into an AST node.
+ Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
+ Pass type_comments=True to get back type comments where the syntax allows.
+ """
+ flags = PyCF_ONLY_AST
+ if type_comments:
+ flags |= PyCF_TYPE_COMMENTS
+ if isinstance(feature_version, tuple):
+ major, minor = feature_version # Should be a 2-tuple.
+ assert major == 3
+ feature_version = minor
+ elif feature_version is None:
+ feature_version = -1
+ # Else it should be an int giving the minor version for 3.x.
+ return compile(source, filename, mode, flags,
+ _feature_version=feature_version)
+
+
+def literal_eval(node_or_string):
+ """
+ Evaluate an expression node or a string containing only a Python
+ expression. The string or node provided may only consist of the following
+ Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
+ sets, booleans, and None.
+
+ Caution: A complex expression can overflow the C stack and cause a crash.
+ """
+ if isinstance(node_or_string, str):
+ node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval')
+ if isinstance(node_or_string, Expression):
+ node_or_string = node_or_string.body
+ def _raise_malformed_node(node):
+ msg = "malformed node or string"
+ if lno := getattr(node, 'lineno', None):
+ msg += f' on line {lno}'
+ raise ValueError(msg + f': {node!r}')
+ def _convert_num(node):
+ if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
+ _raise_malformed_node(node)
+ return node.value
+ def _convert_signed_num(node):
+ if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
+ operand = _convert_num(node.operand)
+ if isinstance(node.op, UAdd):
+ return + operand
+ else:
+ return - operand
+ return _convert_num(node)
+ def _convert(node):
+ if isinstance(node, Constant):
+ return node.value
+ elif isinstance(node, Tuple):
+ return tuple(map(_convert, node.elts))
+ elif isinstance(node, List):
+ return list(map(_convert, node.elts))
+ elif isinstance(node, Set):
+ return set(map(_convert, node.elts))
+ elif (isinstance(node, Call) and isinstance(node.func, Name) and
+ node.func.id == 'set' and node.args == node.keywords == []):
+ return set()
+ elif isinstance(node, Dict):
+ if len(node.keys) != len(node.values):
+ _raise_malformed_node(node)
+ return dict(zip(map(_convert, node.keys),
+ map(_convert, node.values)))
+ elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
+ left = _convert_signed_num(node.left)
+ right = _convert_num(node.right)
+ if isinstance(left, (int, float)) and isinstance(right, complex):
+ if isinstance(node.op, Add):
+ return left + right
+ else:
+ return left - right
+ return _convert_signed_num(node)
+ return _convert(node_or_string)
+
+
+def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
+ """
+ Return a formatted dump of the tree in node. This is mainly useful for
+ debugging purposes. If annotate_fields is true (by default),
+ the returned string will show the names and the values for fields.
+ If annotate_fields is false, the result string will be more compact by
+ omitting unambiguous field names. Attributes such as line
+ numbers and column offsets are not dumped by default. If this is wanted,
+ include_attributes can be set to true. If indent is a non-negative
+ integer or string, then the tree will be pretty-printed with that indent
+ level. None (the default) selects the single line representation.
+ """
+ def _format(node, level=0):
+ if indent is not None:
+ level += 1
+ prefix = '\n' + indent * level
+ sep = ',\n' + indent * level
+ else:
+ prefix = ''
+ sep = ', '
+ if isinstance(node, AST):
+ cls = type(node)
+ args = []
+ allsimple = True
+ keywords = annotate_fields
+ for name in node._fields:
+ try:
+ value = getattr(node, name)
+ except AttributeError:
+ keywords = True
+ continue
+ if value is None and getattr(cls, name, ...) is None:
+ keywords = True
+ continue
+ value, simple = _format(value, level)
+ allsimple = allsimple and simple
+ if keywords:
+ args.append('%s=%s' % (name, value))
+ else:
+ args.append(value)
+ if include_attributes and node._attributes:
+ for name in node._attributes:
+ try:
+ value = getattr(node, name)
+ except AttributeError:
+ continue
+ if value is None and getattr(cls, name, ...) is None:
+ continue
+ value, simple = _format(value, level)
+ allsimple = allsimple and simple
+ args.append('%s=%s' % (name, value))
+ if allsimple and len(args) <= 3:
+ return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args
+ return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False
+ elif isinstance(node, list):
+ if not node:
+ return '[]', True
+ return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False
+ return repr(node), True
+
+ if not isinstance(node, AST):
+ raise TypeError('expected AST, got %r' % node.__class__.__name__)
+ if indent is not None and not isinstance(indent, str):
+ indent = ' ' * indent
+ return _format(node)[0]
+
+
+def copy_location(new_node, old_node):
+ """
+ Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset`
+ attributes) from *old_node* to *new_node* if possible, and return *new_node*.
+ """
+ for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset':
+ if attr in old_node._attributes and attr in new_node._attributes:
+ value = getattr(old_node, attr, None)
+ # end_lineno and end_col_offset are optional attributes, and they
+ # should be copied whether the value is None or not.
+ if value is not None or (
+ hasattr(old_node, attr) and attr.startswith("end_")
+ ):
+ setattr(new_node, attr, value)
+ return new_node
+
+
+def fix_missing_locations(node):
+ """
+ When you compile a node tree with compile(), the compiler expects lineno and
+ col_offset attributes for every node that supports them. This is rather
+ tedious to fill in for generated nodes, so this helper adds these attributes
+ recursively where not already set, by setting them to the values of the
+ parent node. It works recursively starting at *node*.
+ """
+ def _fix(node, lineno, col_offset, end_lineno, end_col_offset):
+ if 'lineno' in node._attributes:
+ if not hasattr(node, 'lineno'):
+ node.lineno = lineno
+ else:
+ lineno = node.lineno
+ if 'end_lineno' in node._attributes:
+ if getattr(node, 'end_lineno', None) is None:
+ node.end_lineno = end_lineno
+ else:
+ end_lineno = node.end_lineno
+ if 'col_offset' in node._attributes:
+ if not hasattr(node, 'col_offset'):
+ node.col_offset = col_offset
+ else:
+ col_offset = node.col_offset
+ if 'end_col_offset' in node._attributes:
+ if getattr(node, 'end_col_offset', None) is None:
+ node.end_col_offset = end_col_offset
+ else:
+ end_col_offset = node.end_col_offset
+ for child in iter_child_nodes(node):
+ _fix(child, lineno, col_offset, end_lineno, end_col_offset)
+ _fix(node, 1, 0, 1, 0)
+ return node
+
+
+def increment_lineno(node, n=1):
+ """
+ Increment the line number and end line number of each node in the tree
+ starting at *node* by *n*. This is useful to "move code" to a different
+ location in a file.
+ """
+ for child in walk(node):
+ # TypeIgnore is a special case where lineno is not an attribute
+ # but rather a field of the node itself.
+ if isinstance(child, TypeIgnore):
+ child.lineno = getattr(child, 'lineno', 0) + n
+ continue
+
+ if 'lineno' in child._attributes:
+ child.lineno = getattr(child, 'lineno', 0) + n
+ if (
+ "end_lineno" in child._attributes
+ and (end_lineno := getattr(child, "end_lineno", 0)) is not None
+ ):
+ child.end_lineno = end_lineno + n
+ return node
+
+
+def iter_fields(node):
+ """
+ Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
+ that is present on *node*.
+ """
+ for field in node._fields:
+ try:
+ yield field, getattr(node, field)
+ except AttributeError:
+ pass
+
+
+def iter_child_nodes(node):
+ """
+ Yield all direct child nodes of *node*, that is, all fields that are nodes
+ and all items of fields that are lists of nodes.
+ """
+ for name, field in iter_fields(node):
+ if isinstance(field, AST):
+ yield field
+ elif isinstance(field, list):
+ for item in field:
+ if isinstance(item, AST):
+ yield item
+
+
+def get_docstring(node, clean=True):
+ """
+ Return the docstring for the given node or None if no docstring can
+ be found. If the node provided does not have docstrings a TypeError
+ will be raised.
+
+ If *clean* is `True`, all tabs are expanded to spaces and any whitespace
+ that can be uniformly removed from the second line onwards is removed.
+ """
+ if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):
+ raise TypeError("%r can't have docstrings" % node.__class__.__name__)
+ if not(node.body and isinstance(node.body[0], Expr)):
+ return None
+ node = node.body[0].value
+ if isinstance(node, Str):
+ text = node.s
+ elif isinstance(node, Constant) and isinstance(node.value, str):
+ text = node.value
+ else:
+ return None
+ if clean:
+ import inspect
+ text = inspect.cleandoc(text)
+ return text
+
+
+def _splitlines_no_ff(source):
+ """Split a string into lines ignoring form feed and other chars.
+
+ This mimics how the Python parser splits source code.
+ """
+ idx = 0
+ lines = []
+ next_line = ''
+ while idx < len(source):
+ c = source[idx]
+ next_line += c
+ idx += 1
+ # Keep \r\n together
+ if c == '\r' and idx < len(source) and source[idx] == '\n':
+ next_line += '\n'
+ idx += 1
+ if c in '\r\n':
+ lines.append(next_line)
+ next_line = ''
+
+ if next_line:
+ lines.append(next_line)
+ return lines
+
+
+def _pad_whitespace(source):
+ r"""Replace all chars except '\f\t' in a line with spaces."""
+ result = ''
+ for c in source:
+ if c in '\f\t':
+ result += c
+ else:
+ result += ' '
+ return result
+
+
+def get_source_segment(source, node, *, padded=False):
+ """Get source code segment of the *source* that generated *node*.
+
+ If some location information (`lineno`, `end_lineno`, `col_offset`,
+ or `end_col_offset`) is missing, return None.
+
+ If *padded* is `True`, the first line of a multi-line statement will
+ be padded with spaces to match its original position.
+ """
+ try:
+ if node.end_lineno is None or node.end_col_offset is None:
+ return None
+ lineno = node.lineno - 1
+ end_lineno = node.end_lineno - 1
+ col_offset = node.col_offset
+ end_col_offset = node.end_col_offset
+ except AttributeError:
+ return None
+
+ lines = _splitlines_no_ff(source)
+ if end_lineno == lineno:
+ return lines[lineno].encode()[col_offset:end_col_offset].decode()
+
+ if padded:
+ padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())
+ else:
+ padding = ''
+
+ first = padding + lines[lineno].encode()[col_offset:].decode()
+ last = lines[end_lineno].encode()[:end_col_offset].decode()
+ lines = lines[lineno+1:end_lineno]
+
+ lines.insert(0, first)
+ lines.append(last)
+ return ''.join(lines)
+
+
+def walk(node):
+ """
+ Recursively yield all descendant nodes in the tree starting at *node*
+ (including *node* itself), in no specified order. This is useful if you
+ only want to modify nodes in place and don't care about the context.
+ """
+ from collections import deque
+ todo = deque([node])
+ while todo:
+ node = todo.popleft()
+ todo.extend(iter_child_nodes(node))
+ yield node
+
+
+class NodeVisitor(object):
+ """
+ A node visitor base class that walks the abstract syntax tree and calls a
+ visitor function for every node found. This function may return a value
+ which is forwarded by the `visit` method.
+
+ This class is meant to be subclassed, with the subclass adding visitor
+ methods.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the node. So a `TryFinally` node visit function would
+ be `visit_TryFinally`. This behavior can be changed by overriding
+ the `visit` method. If no visitor function exists for a node
+ (return value `None`) the `generic_visit` visitor is used instead.
+
+ Don't use the `NodeVisitor` if you want to apply changes to nodes during
+ traversing. For this a special visitor exists (`NodeTransformer`) that
+ allows modifications.
+ """
+
+ def visit(self, node):
+ """Visit a node."""
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ return visitor(node)
+
+ def generic_visit(self, node):
+ """Called if no explicit visitor function exists for a node."""
+ for field, value in iter_fields(node):
+ if isinstance(value, list):
+ for item in value:
+ if isinstance(item, AST):
+ self.visit(item)
+ elif isinstance(value, AST):
+ self.visit(value)
+
+ def visit_Constant(self, node):
+ value = node.value
+ type_name = _const_node_type_names.get(type(value))
+ if type_name is None:
+ for cls, name in _const_node_type_names.items():
+ if isinstance(value, cls):
+ type_name = name
+ break
+ if type_name is not None:
+ method = 'visit_' + type_name
+ try:
+ visitor = getattr(self, method)
+ except AttributeError:
+ pass
+ else:
+ import warnings
+ warnings.warn(f"{method} is deprecated; add visit_Constant",
+ DeprecationWarning, 2)
+ return visitor(node)
+ return self.generic_visit(node)
+
+
+class NodeTransformer(NodeVisitor):
+ """
+ A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
+ allows modification of nodes.
+
+ The `NodeTransformer` will walk the AST and use the return value of the
+ visitor methods to replace or remove the old node. If the return value of
+ the visitor method is ``None``, the node will be removed from its location,
+ otherwise it is replaced with the return value. The return value may be the
+ original node in which case no replacement takes place.
+
+ Here is an example transformer that rewrites all occurrences of name lookups
+ (``foo``) to ``data['foo']``::
+
+ class RewriteName(NodeTransformer):
+
+ def visit_Name(self, node):
+ return Subscript(
+ value=Name(id='data', ctx=Load()),
+ slice=Constant(value=node.id),
+ ctx=node.ctx
+ )
+
+ Keep in mind that if the node you're operating on has child nodes you must
+ either transform the child nodes yourself or call the :meth:`generic_visit`
+ method for the node first.
+
+ For nodes that were part of a collection of statements (that applies to all
+ statement nodes), the visitor may also return a list of nodes rather than
+ just a single node.
+
+ Usually you use the transformer like this::
+
+ node = YourTransformer().visit(node)
+ """
+
+ def generic_visit(self, node):
+ for field, old_value in iter_fields(node):
+ if isinstance(old_value, list):
+ new_values = []
+ for value in old_value:
+ if isinstance(value, AST):
+ value = self.visit(value)
+ if value is None:
+ continue
+ elif not isinstance(value, AST):
+ new_values.extend(value)
+ continue
+ new_values.append(value)
+ old_value[:] = new_values
+ elif isinstance(old_value, AST):
+ new_node = self.visit(old_value)
+ if new_node is None:
+ delattr(node, field)
+ else:
+ setattr(node, field, new_node)
+ return node
+
+
+# If the ast module is loaded more than once, only add deprecated methods once
+if not hasattr(Constant, 'n'):
+ # The following code is for backward compatibility.
+ # It will be removed in future.
+
+ def _getter(self):
+ """Deprecated. Use value instead."""
+ return self.value
+
+ def _setter(self, value):
+ self.value = value
+
+ Constant.n = property(_getter, _setter)
+ Constant.s = property(_getter, _setter)
+
+class _ABC(type):
+
+ def __init__(cls, *args):
+ cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead"""
+
+ def __instancecheck__(cls, inst):
+ if not isinstance(inst, Constant):
+ return False
+ if cls in _const_types:
+ try:
+ value = inst.value
+ except AttributeError:
+ return False
+ else:
+ return (
+ isinstance(value, _const_types[cls]) and
+ not isinstance(value, _const_types_not.get(cls, ()))
+ )
+ return type.__instancecheck__(cls, inst)
+
+def _new(cls, *args, **kwargs):
+ for key in kwargs:
+ if key not in cls._fields:
+ # arbitrary keyword arguments are accepted
+ continue
+ pos = cls._fields.index(key)
+ if pos < len(args):
+ raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}")
+ if cls in _const_types:
+ return Constant(*args, **kwargs)
+ return Constant.__new__(cls, *args, **kwargs)
+
+class Num(Constant, metaclass=_ABC):
+ _fields = ('n',)
+ __new__ = _new
+
+class Str(Constant, metaclass=_ABC):
+ _fields = ('s',)
+ __new__ = _new
+
+class Bytes(Constant, metaclass=_ABC):
+ _fields = ('s',)
+ __new__ = _new
+
+class NameConstant(Constant, metaclass=_ABC):
+ __new__ = _new
+
+class Ellipsis(Constant, metaclass=_ABC):
+ _fields = ()
+
+ def __new__(cls, *args, **kwargs):
+ if cls is Ellipsis:
+ return Constant(..., *args, **kwargs)
+ return Constant.__new__(cls, *args, **kwargs)
+
+_const_types = {
+ Num: (int, float, complex),
+ Str: (str,),
+ Bytes: (bytes,),
+ NameConstant: (type(None), bool),
+ Ellipsis: (type(...),),
+}
+_const_types_not = {
+ Num: (bool,),
+}
+
+_const_node_type_names = {
+ bool: 'NameConstant', # should be before int
+ type(None): 'NameConstant',
+ int: 'Num',
+ float: 'Num',
+ complex: 'Num',
+ str: 'Str',
+ bytes: 'Bytes',
+ type(...): 'Ellipsis',
+}
+
+class slice(AST):
+ """Deprecated AST node class."""
+
+class Index(slice):
+ """Deprecated AST node class. Use the index value directly instead."""
+ def __new__(cls, value, **kwargs):
+ return value
+
+class ExtSlice(slice):
+ """Deprecated AST node class. Use ast.Tuple instead."""
+ def __new__(cls, dims=(), **kwargs):
+ return Tuple(list(dims), Load(), **kwargs)
+
+# If the ast module is loaded more than once, only add deprecated methods once
+if not hasattr(Tuple, 'dims'):
+ # The following code is for backward compatibility.
+ # It will be removed in future.
+
+ def _dims_getter(self):
+ """Deprecated. Use elts instead."""
+ return self.elts
+
+ def _dims_setter(self, value):
+ self.elts = value
+
+ Tuple.dims = property(_dims_getter, _dims_setter)
+
+class Suite(mod):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class AugLoad(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class AugStore(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class Param(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+
+# Large float and imaginary literals get turned into infinities in the AST.
+# We unparse those infinities to INFSTR.
+_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
+
+class _Precedence(IntEnum):
+ """Precedence table that originated from python grammar."""
+
+ TUPLE = auto()
+ YIELD = auto() # 'yield', 'yield from'
+ TEST = auto() # 'if'-'else', 'lambda'
+ OR = auto() # 'or'
+ AND = auto() # 'and'
+ NOT = auto() # 'not'
+ CMP = auto() # '<', '>', '==', '>=', '<=', '!=',
+ # 'in', 'not in', 'is', 'is not'
+ EXPR = auto()
+ BOR = EXPR # '|'
+ BXOR = auto() # '^'
+ BAND = auto() # '&'
+ SHIFT = auto() # '<<', '>>'
+ ARITH = auto() # '+', '-'
+ TERM = auto() # '*', '@', '/', '%', '//'
+ FACTOR = auto() # unary '+', '-', '~'
+ POWER = auto() # '**'
+ AWAIT = auto() # 'await'
+ ATOM = auto()
+
+ def next(self):
+ try:
+ return self.__class__(self + 1)
+ except ValueError:
+ return self
+
+
+_SINGLE_QUOTES = ("'", '"')
+_MULTI_QUOTES = ('"""', "'''")
+_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES)
+
+class _Unparser(NodeVisitor):
+ """Methods in this class recursively traverse an AST and
+ output source code for the abstract syntax; original formatting
+ is disregarded."""
+
+ def __init__(self, *, _avoid_backslashes=False):
+ self._source = []
+ self._buffer = []
+ self._precedences = {}
+ self._type_ignores = {}
+ self._indent = 0
+ self._avoid_backslashes = _avoid_backslashes
+
+ def interleave(self, inter, f, seq):
+ """Call f on each item in seq, calling inter() in between."""
+ seq = iter(seq)
+ try:
+ f(next(seq))
+ except StopIteration:
+ pass
+ else:
+ for x in seq:
+ inter()
+ f(x)
+
+ def items_view(self, traverser, items):
+ """Traverse and separate the given *items* with a comma and append it to
+ the buffer. If *items* is a single item sequence, a trailing comma
+ will be added."""
+ if len(items) == 1:
+ traverser(items[0])
+ self.write(",")
+ else:
+ self.interleave(lambda: self.write(", "), traverser, items)
+
+ def maybe_newline(self):
+ """Adds a newline if it isn't the start of generated source"""
+ if self._source:
+ self.write("\n")
+
+ def fill(self, text=""):
+ """Indent a piece of text and append it, according to the current
+ indentation level"""
+ self.maybe_newline()
+ self.write(" " * self._indent + text)
+
+ def write(self, text):
+ """Append a piece of text"""
+ self._source.append(text)
+
+ def buffer_writer(self, text):
+ self._buffer.append(text)
+
+ @property
+ def buffer(self):
+ value = "".join(self._buffer)
+ self._buffer.clear()
+ return value
+
+ @contextmanager
+ def block(self, *, extra = None):
+ """A context manager for preparing the source for blocks. It adds
+ the character':', increases the indentation on enter and decreases
+ the indentation on exit. If *extra* is given, it will be directly
+ appended after the colon character.
+ """
+ self.write(":")
+ if extra:
+ self.write(extra)
+ self._indent += 1
+ yield
+ self._indent -= 1
+
+ @contextmanager
+ def delimit(self, start, end):
+ """A context manager for preparing the source for expressions. It adds
+ *start* to the buffer and enters, after exit it adds *end*."""
+
+ self.write(start)
+ yield
+ self.write(end)
+
+ def delimit_if(self, start, end, condition):
+ if condition:
+ return self.delimit(start, end)
+ else:
+ return nullcontext()
+
+ def require_parens(self, precedence, node):
+ """Shortcut to adding precedence related parens"""
+ return self.delimit_if("(", ")", self.get_precedence(node) > precedence)
+
+ def get_precedence(self, node):
+ return self._precedences.get(node, _Precedence.TEST)
+
+ def set_precedence(self, precedence, *nodes):
+ for node in nodes:
+ self._precedences[node] = precedence
+
+ def get_raw_docstring(self, node):
+ """If a docstring node is found in the body of the *node* parameter,
+ return that docstring node, None otherwise.
+
+ Logic mirrored from ``_PyAST_GetDocString``."""
+ if not isinstance(
+ node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)
+ ) or len(node.body) < 1:
+ return None
+ node = node.body[0]
+ if not isinstance(node, Expr):
+ return None
+ node = node.value
+ if isinstance(node, Constant) and isinstance(node.value, str):
+ return node
+
+ def get_type_comment(self, node):
+ comment = self._type_ignores.get(node.lineno) or node.type_comment
+ if comment is not None:
+ return f" # type: {comment}"
+
+ def traverse(self, node):
+ if isinstance(node, list):
+ for item in node:
+ self.traverse(item)
+ else:
+ super().visit(node)
+
+ # Note: as visit() resets the output text, do NOT rely on
+ # NodeVisitor.generic_visit to handle any nodes (as it calls back in to
+ # the subclass visit() method, which resets self._source to an empty list)
+ def visit(self, node):
+ """Outputs a source code string that, if converted back to an ast
+ (using ast.parse) will generate an AST equivalent to *node*"""
+ self._source = []
+ self.traverse(node)
+ return "".join(self._source)
+
+ def _write_docstring_and_traverse_body(self, node):
+ if (docstring := self.get_raw_docstring(node)):
+ self._write_docstring(docstring)
+ self.traverse(node.body[1:])
+ else:
+ self.traverse(node.body)
+
+ def visit_Module(self, node):
+ self._type_ignores = {
+ ignore.lineno: f"ignore{ignore.tag}"
+ for ignore in node.type_ignores
+ }
+ self._write_docstring_and_traverse_body(node)
+ self._type_ignores.clear()
+
+ def visit_FunctionType(self, node):
+ with self.delimit("(", ")"):
+ self.interleave(
+ lambda: self.write(", "), self.traverse, node.argtypes
+ )
+
+ self.write(" -> ")
+ self.traverse(node.returns)
+
+ def visit_Expr(self, node):
+ self.fill()
+ self.set_precedence(_Precedence.YIELD, node.value)
+ self.traverse(node.value)
+
+ def visit_NamedExpr(self, node):
+ with self.require_parens(_Precedence.TUPLE, node):
+ self.set_precedence(_Precedence.ATOM, node.target, node.value)
+ self.traverse(node.target)
+ self.write(" := ")
+ self.traverse(node.value)
+
+ def visit_Import(self, node):
+ self.fill("import ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
+
+ def visit_ImportFrom(self, node):
+ self.fill("from ")
+ self.write("." * (node.level or 0))
+ if node.module:
+ self.write(node.module)
+ self.write(" import ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
+
+ def visit_Assign(self, node):
+ self.fill()
+ for target in node.targets:
+ self.traverse(target)
+ self.write(" = ")
+ self.traverse(node.value)
+ if type_comment := self.get_type_comment(node):
+ self.write(type_comment)
+
+ def visit_AugAssign(self, node):
+ self.fill()
+ self.traverse(node.target)
+ self.write(" " + self.binop[node.op.__class__.__name__] + "= ")
+ self.traverse(node.value)
+
+ def visit_AnnAssign(self, node):
+ self.fill()
+ with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)):
+ self.traverse(node.target)
+ self.write(": ")
+ self.traverse(node.annotation)
+ if node.value:
+ self.write(" = ")
+ self.traverse(node.value)
+
+ def visit_Return(self, node):
+ self.fill("return")
+ if node.value:
+ self.write(" ")
+ self.traverse(node.value)
+
+ def visit_Pass(self, node):
+ self.fill("pass")
+
+ def visit_Break(self, node):
+ self.fill("break")
+
+ def visit_Continue(self, node):
+ self.fill("continue")
+
+ def visit_Delete(self, node):
+ self.fill("del ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.targets)
+
+ def visit_Assert(self, node):
+ self.fill("assert ")
+ self.traverse(node.test)
+ if node.msg:
+ self.write(", ")
+ self.traverse(node.msg)
+
+ def visit_Global(self, node):
+ self.fill("global ")
+ self.interleave(lambda: self.write(", "), self.write, node.names)
+
+ def visit_Nonlocal(self, node):
+ self.fill("nonlocal ")
+ self.interleave(lambda: self.write(", "), self.write, node.names)
+
+ def visit_Await(self, node):
+ with self.require_parens(_Precedence.AWAIT, node):
+ self.write("await")
+ if node.value:
+ self.write(" ")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_Yield(self, node):
+ with self.require_parens(_Precedence.YIELD, node):
+ self.write("yield")
+ if node.value:
+ self.write(" ")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_YieldFrom(self, node):
+ with self.require_parens(_Precedence.YIELD, node):
+ self.write("yield from ")
+ if not node.value:
+ raise ValueError("Node can't be used without a value attribute.")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_Raise(self, node):
+ self.fill("raise")
+ if not node.exc:
+ if node.cause:
+ raise ValueError(f"Node can't use cause without an exception.")
+ return
+ self.write(" ")
+ self.traverse(node.exc)
+ if node.cause:
+ self.write(" from ")
+ self.traverse(node.cause)
+
+ def visit_Try(self, node):
+ self.fill("try")
+ with self.block():
+ self.traverse(node.body)
+ for ex in node.handlers:
+ self.traverse(ex)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+ if node.finalbody:
+ self.fill("finally")
+ with self.block():
+ self.traverse(node.finalbody)
+
+ def visit_ExceptHandler(self, node):
+ self.fill("except")
+ if node.type:
+ self.write(" ")
+ self.traverse(node.type)
+ if node.name:
+ self.write(" as ")
+ self.write(node.name)
+ with self.block():
+ self.traverse(node.body)
+
+ def visit_ClassDef(self, node):
+ self.maybe_newline()
+ for deco in node.decorator_list:
+ self.fill("@")
+ self.traverse(deco)
+ self.fill("class " + node.name)
+ with self.delimit_if("(", ")", condition = node.bases or node.keywords):
+ comma = False
+ for e in node.bases:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+ for e in node.keywords:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+
+ with self.block():
+ self._write_docstring_and_traverse_body(node)
+
+ def visit_FunctionDef(self, node):
+ self._function_helper(node, "def")
+
+ def visit_AsyncFunctionDef(self, node):
+ self._function_helper(node, "async def")
+
+ def _function_helper(self, node, fill_suffix):
+ self.maybe_newline()
+ for deco in node.decorator_list:
+ self.fill("@")
+ self.traverse(deco)
+ def_str = fill_suffix + " " + node.name
+ self.fill(def_str)
+ with self.delimit("(", ")"):
+ self.traverse(node.args)
+ if node.returns:
+ self.write(" -> ")
+ self.traverse(node.returns)
+ with self.block(extra=self.get_type_comment(node)):
+ self._write_docstring_and_traverse_body(node)
+
+ def visit_For(self, node):
+ self._for_helper("for ", node)
+
+ def visit_AsyncFor(self, node):
+ self._for_helper("async for ", node)
+
+ def _for_helper(self, fill, node):
+ self.fill(fill)
+ self.traverse(node.target)
+ self.write(" in ")
+ self.traverse(node.iter)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_If(self, node):
+ self.fill("if ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ # collapse nested ifs into equivalent elifs.
+ while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If):
+ node = node.orelse[0]
+ self.fill("elif ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ # final else
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_While(self, node):
+ self.fill("while ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_With(self, node):
+ self.fill("with ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+
+ def visit_AsyncWith(self, node):
+ self.fill("async with ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+
+ def _str_literal_helper(
+ self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False
+ ):
+ """Helper for writing string literals, minimizing escapes.
+ Returns the tuple (string literal to write, possible quote types).
+ """
+ def escape_char(c):
+ # \n and \t are non-printable, but we only escape them if
+ # escape_special_whitespace is True
+ if not escape_special_whitespace and c in "\n\t":
+ return c
+ # Always escape backslashes and other non-printable characters
+ if c == "\\" or not c.isprintable():
+ return c.encode("unicode_escape").decode("ascii")
+ return c
+
+ escaped_string = "".join(map(escape_char, string))
+ possible_quotes = quote_types
+ if "\n" in escaped_string:
+ possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES]
+ possible_quotes = [q for q in possible_quotes if q not in escaped_string]
+ if not possible_quotes:
+ # If there aren't any possible_quotes, fallback to using repr
+ # on the original string. Try to use a quote from quote_types,
+ # e.g., so that we use triple quotes for docstrings.
+ string = repr(string)
+ quote = next((q for q in quote_types if string[0] in q), string[0])
+ return string[1:-1], [quote]
+ if escaped_string:
+ # Sort so that we prefer '''"''' over """\""""
+ possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1])
+ # If we're using triple quotes and we'd need to escape a final
+ # quote, escape it
+ if possible_quotes[0][0] == escaped_string[-1]:
+ assert len(possible_quotes[0]) == 3
+ escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1]
+ return escaped_string, possible_quotes
+
+ def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):
+ """Write string literal value with a best effort attempt to avoid backslashes."""
+ string, quote_types = self._str_literal_helper(string, quote_types=quote_types)
+ quote_type = quote_types[0]
+ self.write(f"{quote_type}{string}{quote_type}")
+
+ def visit_JoinedStr(self, node):
+ self.write("f")
+ if self._avoid_backslashes:
+ self._fstring_JoinedStr(node, self.buffer_writer)
+ self._write_str_avoiding_backslashes(self.buffer)
+ return
+
+ # If we don't need to avoid backslashes globally (i.e., we only need
+ # to avoid them inside FormattedValues), it's cosmetically preferred
+ # to use escaped whitespace. That is, it's preferred to use backslashes
+ # for cases like: f"{x}\n". To accomplish this, we keep track of what
+ # in our buffer corresponds to FormattedValues and what corresponds to
+ # Constant parts of the f-string, and allow escapes accordingly.
+ buffer = []
+ for value in node.values:
+ meth = getattr(self, "_fstring_" + type(value).__name__)
+ meth(value, self.buffer_writer)
+ buffer.append((self.buffer, isinstance(value, Constant)))
+ new_buffer = []
+ quote_types = _ALL_QUOTES
+ for value, is_constant in buffer:
+ # Repeatedly narrow down the list of possible quote_types
+ value, quote_types = self._str_literal_helper(
+ value, quote_types=quote_types,
+ escape_special_whitespace=is_constant
+ )
+ new_buffer.append(value)
+ value = "".join(new_buffer)
+ quote_type = quote_types[0]
+ self.write(f"{quote_type}{value}{quote_type}")
+
+ def visit_FormattedValue(self, node):
+ self.write("f")
+ self._fstring_FormattedValue(node, self.buffer_writer)
+ self._write_str_avoiding_backslashes(self.buffer)
+
+ def _fstring_JoinedStr(self, node, write):
+ for value in node.values:
+ meth = getattr(self, "_fstring_" + type(value).__name__)
+ meth(value, write)
+
+ def _fstring_Constant(self, node, write):
+ if not isinstance(node.value, str):
+ raise ValueError("Constants inside JoinedStr should be a string.")
+ value = node.value.replace("{", "{{").replace("}", "}}")
+ write(value)
+
+ def _fstring_FormattedValue(self, node, write):
+ write("{")
+ unparser = type(self)(_avoid_backslashes=True)
+ unparser.set_precedence(_Precedence.TEST.next(), node.value)
+ expr = unparser.visit(node.value)
+ if expr.startswith("{"):
+ write(" ") # Separate pair of opening brackets as "{ {"
+ if "\\" in expr:
+ raise ValueError("Unable to avoid backslash in f-string expression part")
+ write(expr)
+ if node.conversion != -1:
+ conversion = chr(node.conversion)
+ if conversion not in "sra":
+ raise ValueError("Unknown f-string conversion.")
+ write(f"!{conversion}")
+ if node.format_spec:
+ write(":")
+ meth = getattr(self, "_fstring_" + type(node.format_spec).__name__)
+ meth(node.format_spec, write)
+ write("}")
+
+ def visit_Name(self, node):
+ self.write(node.id)
+
+ def _write_docstring(self, node):
+ self.fill()
+ if node.kind == "u":
+ self.write("u")
+ self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES)
+
+ def _write_constant(self, value):
+ if isinstance(value, (float, complex)):
+ # Substitute overflowing decimal literal for AST infinities,
+ # and inf - inf for NaNs.
+ self.write(
+ repr(value)
+ .replace("inf", _INFSTR)
+ .replace("nan", f"({_INFSTR}-{_INFSTR})")
+ )
+ elif self._avoid_backslashes and isinstance(value, str):
+ self._write_str_avoiding_backslashes(value)
+ else:
+ self.write(repr(value))
+
+ def visit_Constant(self, node):
+ value = node.value
+ if isinstance(value, tuple):
+ with self.delimit("(", ")"):
+ self.items_view(self._write_constant, value)
+ elif value is ...:
+ self.write("...")
+ else:
+ if node.kind == "u":
+ self.write("u")
+ self._write_constant(node.value)
+
+ def visit_List(self, node):
+ with self.delimit("[", "]"):
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+
+ def visit_ListComp(self, node):
+ with self.delimit("[", "]"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_GeneratorExp(self, node):
+ with self.delimit("(", ")"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_SetComp(self, node):
+ with self.delimit("{", "}"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_DictComp(self, node):
+ with self.delimit("{", "}"):
+ self.traverse(node.key)
+ self.write(": ")
+ self.traverse(node.value)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_comprehension(self, node):
+ if node.is_async:
+ self.write(" async for ")
+ else:
+ self.write(" for ")
+ self.set_precedence(_Precedence.TUPLE, node.target)
+ self.traverse(node.target)
+ self.write(" in ")
+ self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs)
+ self.traverse(node.iter)
+ for if_clause in node.ifs:
+ self.write(" if ")
+ self.traverse(if_clause)
+
+ def visit_IfExp(self, node):
+ with self.require_parens(_Precedence.TEST, node):
+ self.set_precedence(_Precedence.TEST.next(), node.body, node.test)
+ self.traverse(node.body)
+ self.write(" if ")
+ self.traverse(node.test)
+ self.write(" else ")
+ self.set_precedence(_Precedence.TEST, node.orelse)
+ self.traverse(node.orelse)
+
+ def visit_Set(self, node):
+ if node.elts:
+ with self.delimit("{", "}"):
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+ else:
+ # `{}` would be interpreted as a dictionary literal, and
+ # `set` might be shadowed. Thus:
+ self.write('{*()}')
+
+ def visit_Dict(self, node):
+ def write_key_value_pair(k, v):
+ self.traverse(k)
+ self.write(": ")
+ self.traverse(v)
+
+ def write_item(item):
+ k, v = item
+ if k is None:
+ # for dictionary unpacking operator in dicts {**{'y': 2}}
+ # see PEP 448 for details
+ self.write("**")
+ self.set_precedence(_Precedence.EXPR, v)
+ self.traverse(v)
+ else:
+ write_key_value_pair(k, v)
+
+ with self.delimit("{", "}"):
+ self.interleave(
+ lambda: self.write(", "), write_item, zip(node.keys, node.values)
+ )
+
+ def visit_Tuple(self, node):
+ with self.delimit("(", ")"):
+ self.items_view(self.traverse, node.elts)
+
+ unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
+ unop_precedence = {
+ "not": _Precedence.NOT,
+ "~": _Precedence.FACTOR,
+ "+": _Precedence.FACTOR,
+ "-": _Precedence.FACTOR,
+ }
+
+ def visit_UnaryOp(self, node):
+ operator = self.unop[node.op.__class__.__name__]
+ operator_precedence = self.unop_precedence[operator]
+ with self.require_parens(operator_precedence, node):
+ self.write(operator)
+ # factor prefixes (+, -, ~) shouldn't be seperated
+ # from the value they belong, (e.g: +1 instead of + 1)
+ if operator_precedence is not _Precedence.FACTOR:
+ self.write(" ")
+ self.set_precedence(operator_precedence, node.operand)
+ self.traverse(node.operand)
+
+ binop = {
+ "Add": "+",
+ "Sub": "-",
+ "Mult": "*",
+ "MatMult": "@",
+ "Div": "/",
+ "Mod": "%",
+ "LShift": "<<",
+ "RShift": ">>",
+ "BitOr": "|",
+ "BitXor": "^",
+ "BitAnd": "&",
+ "FloorDiv": "//",
+ "Pow": "**",
+ }
+
+ binop_precedence = {
+ "+": _Precedence.ARITH,
+ "-": _Precedence.ARITH,
+ "*": _Precedence.TERM,
+ "@": _Precedence.TERM,
+ "/": _Precedence.TERM,
+ "%": _Precedence.TERM,
+ "<<": _Precedence.SHIFT,
+ ">>": _Precedence.SHIFT,
+ "|": _Precedence.BOR,
+ "^": _Precedence.BXOR,
+ "&": _Precedence.BAND,
+ "//": _Precedence.TERM,
+ "**": _Precedence.POWER,
+ }
+
+ binop_rassoc = frozenset(("**",))
+ def visit_BinOp(self, node):
+ operator = self.binop[node.op.__class__.__name__]
+ operator_precedence = self.binop_precedence[operator]
+ with self.require_parens(operator_precedence, node):
+ if operator in self.binop_rassoc:
+ left_precedence = operator_precedence.next()
+ right_precedence = operator_precedence
+ else:
+ left_precedence = operator_precedence
+ right_precedence = operator_precedence.next()
+
+ self.set_precedence(left_precedence, node.left)
+ self.traverse(node.left)
+ self.write(f" {operator} ")
+ self.set_precedence(right_precedence, node.right)
+ self.traverse(node.right)
+
+ cmpops = {
+ "Eq": "==",
+ "NotEq": "!=",
+ "Lt": "<",
+ "LtE": "<=",
+ "Gt": ">",
+ "GtE": ">=",
+ "Is": "is",
+ "IsNot": "is not",
+ "In": "in",
+ "NotIn": "not in",
+ }
+
+ def visit_Compare(self, node):
+ with self.require_parens(_Precedence.CMP, node):
+ self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators)
+ self.traverse(node.left)
+ for o, e in zip(node.ops, node.comparators):
+ self.write(" " + self.cmpops[o.__class__.__name__] + " ")
+ self.traverse(e)
+
+ boolops = {"And": "and", "Or": "or"}
+ boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR}
+
+ def visit_BoolOp(self, node):
+ operator = self.boolops[node.op.__class__.__name__]
+ operator_precedence = self.boolop_precedence[operator]
+
+ def increasing_level_traverse(node):
+ nonlocal operator_precedence
+ operator_precedence = operator_precedence.next()
+ self.set_precedence(operator_precedence, node)
+ self.traverse(node)
+
+ with self.require_parens(operator_precedence, node):
+ s = f" {operator} "
+ self.interleave(lambda: self.write(s), increasing_level_traverse, node.values)
+
+ def visit_Attribute(self, node):
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+ # Special case: 3.__abs__() is a syntax error, so if node.value
+ # is an integer literal then we need to either parenthesize
+ # it or add an extra space to get 3 .__abs__().
+ if isinstance(node.value, Constant) and isinstance(node.value.value, int):
+ self.write(" ")
+ self.write(".")
+ self.write(node.attr)
+
+ def visit_Call(self, node):
+ self.set_precedence(_Precedence.ATOM, node.func)
+ self.traverse(node.func)
+ with self.delimit("(", ")"):
+ comma = False
+ for e in node.args:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+ for e in node.keywords:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+
+ def visit_Subscript(self, node):
+ def is_simple_tuple(slice_value):
+ # when unparsing a non-empty tuple, the parentheses can be safely
+ # omitted if there aren't any elements that explicitly requires
+ # parentheses (such as starred expressions).
+ return (
+ isinstance(slice_value, Tuple)
+ and slice_value.elts
+ and not any(isinstance(elt, Starred) for elt in slice_value.elts)
+ )
+
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+ with self.delimit("[", "]"):
+ if is_simple_tuple(node.slice):
+ self.items_view(self.traverse, node.slice.elts)
+ else:
+ self.traverse(node.slice)
+
+ def visit_Starred(self, node):
+ self.write("*")
+ self.set_precedence(_Precedence.EXPR, node.value)
+ self.traverse(node.value)
+
+ def visit_Ellipsis(self, node):
+ self.write("...")
+
+ def visit_Slice(self, node):
+ if node.lower:
+ self.traverse(node.lower)
+ self.write(":")
+ if node.upper:
+ self.traverse(node.upper)
+ if node.step:
+ self.write(":")
+ self.traverse(node.step)
+
+ def visit_Match(self, node):
+ self.fill("match ")
+ self.traverse(node.subject)
+ with self.block():
+ for case in node.cases:
+ self.traverse(case)
+
+ def visit_arg(self, node):
+ self.write(node.arg)
+ if node.annotation:
+ self.write(": ")
+ self.traverse(node.annotation)
+
+ def visit_arguments(self, node):
+ first = True
+ # normal arguments
+ all_args = node.posonlyargs + node.args
+ defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults
+ for index, elements in enumerate(zip(all_args, defaults), 1):
+ a, d = elements
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.traverse(a)
+ if d:
+ self.write("=")
+ self.traverse(d)
+ if index == len(node.posonlyargs):
+ self.write(", /")
+
+ # varargs, or bare '*' if no varargs but keyword-only arguments present
+ if node.vararg or node.kwonlyargs:
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.write("*")
+ if node.vararg:
+ self.write(node.vararg.arg)
+ if node.vararg.annotation:
+ self.write(": ")
+ self.traverse(node.vararg.annotation)
+
+ # keyword-only arguments
+ if node.kwonlyargs:
+ for a, d in zip(node.kwonlyargs, node.kw_defaults):
+ self.write(", ")
+ self.traverse(a)
+ if d:
+ self.write("=")
+ self.traverse(d)
+
+ # kwargs
+ if node.kwarg:
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.write("**" + node.kwarg.arg)
+ if node.kwarg.annotation:
+ self.write(": ")
+ self.traverse(node.kwarg.annotation)
+
+ def visit_keyword(self, node):
+ if node.arg is None:
+ self.write("**")
+ else:
+ self.write(node.arg)
+ self.write("=")
+ self.traverse(node.value)
+
+ def visit_Lambda(self, node):
+ with self.require_parens(_Precedence.TEST, node):
+ self.write("lambda ")
+ self.traverse(node.args)
+ self.write(": ")
+ self.set_precedence(_Precedence.TEST, node.body)
+ self.traverse(node.body)
+
+ def visit_alias(self, node):
+ self.write(node.name)
+ if node.asname:
+ self.write(" as " + node.asname)
+
+ def visit_withitem(self, node):
+ self.traverse(node.context_expr)
+ if node.optional_vars:
+ self.write(" as ")
+ self.traverse(node.optional_vars)
+
+ def visit_match_case(self, node):
+ self.fill("case ")
+ self.traverse(node.pattern)
+ if node.guard:
+ self.write(" if ")
+ self.traverse(node.guard)
+ with self.block():
+ self.traverse(node.body)
+
+ def visit_MatchValue(self, node):
+ self.traverse(node.value)
+
+ def visit_MatchSingleton(self, node):
+ self._write_constant(node.value)
+
+ def visit_MatchSequence(self, node):
+ with self.delimit("[", "]"):
+ self.interleave(
+ lambda: self.write(", "), self.traverse, node.patterns
+ )
+
+ def visit_MatchStar(self, node):
+ name = node.name
+ if name is None:
+ name = "_"
+ self.write(f"*{name}")
+
+ def visit_MatchMapping(self, node):
+ def write_key_pattern_pair(pair):
+ k, p = pair
+ self.traverse(k)
+ self.write(": ")
+ self.traverse(p)
+
+ with self.delimit("{", "}"):
+ keys = node.keys
+ self.interleave(
+ lambda: self.write(", "),
+ write_key_pattern_pair,
+ zip(keys, node.patterns, strict=True),
+ )
+ rest = node.rest
+ if rest is not None:
+ if keys:
+ self.write(", ")
+ self.write(f"**{rest}")
+
+ def visit_MatchClass(self, node):
+ self.set_precedence(_Precedence.ATOM, node.cls)
+ self.traverse(node.cls)
+ with self.delimit("(", ")"):
+ patterns = node.patterns
+ self.interleave(
+ lambda: self.write(", "), self.traverse, patterns
+ )
+ attrs = node.kwd_attrs
+ if attrs:
+ def write_attr_pattern(pair):
+ attr, pattern = pair
+ self.write(f"{attr}=")
+ self.traverse(pattern)
+
+ if patterns:
+ self.write(", ")
+ self.interleave(
+ lambda: self.write(", "),
+ write_attr_pattern,
+ zip(attrs, node.kwd_patterns, strict=True),
+ )
+
+ def visit_MatchAs(self, node):
+ name = node.name
+ pattern = node.pattern
+ if name is None:
+ self.write("_")
+ elif pattern is None:
+ self.write(node.name)
+ else:
+ with self.require_parens(_Precedence.TEST, node):
+ self.set_precedence(_Precedence.BOR, node.pattern)
+ self.traverse(node.pattern)
+ self.write(f" as {node.name}")
+
+ def visit_MatchOr(self, node):
+ with self.require_parens(_Precedence.BOR, node):
+ self.set_precedence(_Precedence.BOR.next(), *node.patterns)
+ self.interleave(lambda: self.write(" | "), self.traverse, node.patterns)
+
+def unparse(ast_obj):
+ unparser = _Unparser()
+ return unparser.visit(ast_obj)
+
+
+def main():
+ import argparse
+
+ parser = argparse.ArgumentParser(prog='python -m ast')
+ parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?',
+ default='-',
+ help='the file to parse; defaults to stdin')
+ parser.add_argument('-m', '--mode', default='exec',
+ choices=('exec', 'single', 'eval', 'func_type'),
+ help='specify what kind of code must be parsed')
+ parser.add_argument('--no-type-comments', default=True, action='store_false',
+ help="don't add information about type comments")
+ parser.add_argument('-a', '--include-attributes', action='store_true',
+ help='include attributes such as line numbers and '
+ 'column offsets')
+ parser.add_argument('-i', '--indent', type=int, default=3,
+ help='indentation of nodes (number of spaces)')
+ args = parser.parse_args()
+
+ with args.infile as infile:
+ source = infile.read()
+ tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments)
+ print(dump(tree, include_attributes=args.include_attributes, indent=args.indent))
+
+if __name__ == '__main__':
+ main()
diff --git a/evalkit_cambrian/lib/python3.10/binhex.py b/evalkit_cambrian/lib/python3.10/binhex.py
new file mode 100644
index 0000000000000000000000000000000000000000..ace5217d2713921d2b03c1a956a0f23ed0bdbccb
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/binhex.py
@@ -0,0 +1,502 @@
+"""Macintosh binhex compression/decompression.
+
+easy interface:
+binhex(inputfilename, outputfilename)
+hexbin(inputfilename, outputfilename)
+"""
+
+#
+# Jack Jansen, CWI, August 1995.
+#
+# The module is supposed to be as compatible as possible. Especially the
+# easy interface should work "as expected" on any platform.
+# XXXX Note: currently, textfiles appear in mac-form on all platforms.
+# We seem to lack a simple character-translate in python.
+# (we should probably use ISO-Latin-1 on all but the mac platform).
+# XXXX The simple routines are too simple: they expect to hold the complete
+# files in-core. Should be fixed.
+# XXXX It would be nice to handle AppleDouble format on unix
+# (for servers serving macs).
+# XXXX I don't understand what happens when you get 0x90 times the same byte on
+# input. The resulting code (xx 90 90) would appear to be interpreted as an
+# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
+#
+import binascii
+import contextlib
+import io
+import os
+import struct
+import warnings
+
+warnings.warn('the binhex module is deprecated', DeprecationWarning,
+ stacklevel=2)
+
+
+__all__ = ["binhex","hexbin","Error"]
+
+class Error(Exception):
+ pass
+
+# States (what have we written)
+_DID_HEADER = 0
+_DID_DATA = 1
+
+# Various constants
+REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
+LINELEN = 64
+RUNCHAR = b"\x90"
+
+#
+# This code is no longer byte-order dependent
+
+
+class FInfo:
+ def __init__(self):
+ self.Type = '????'
+ self.Creator = '????'
+ self.Flags = 0
+
+def getfileinfo(name):
+ finfo = FInfo()
+ with io.open(name, 'rb') as fp:
+ # Quick check for textfile
+ data = fp.read(512)
+ if 0 not in data:
+ finfo.Type = 'TEXT'
+ fp.seek(0, 2)
+ dsize = fp.tell()
+ dir, file = os.path.split(name)
+ file = file.replace(':', '-', 1)
+ return file, finfo, dsize, 0
+
+class openrsrc:
+ def __init__(self, *args):
+ pass
+
+ def read(self, *args):
+ return b''
+
+ def write(self, *args):
+ pass
+
+ def close(self):
+ pass
+
+
+# DeprecationWarning is already emitted on "import binhex". There is no need
+# to repeat the warning at each call to deprecated binascii functions.
+@contextlib.contextmanager
+def _ignore_deprecation_warning():
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', '', DeprecationWarning)
+ yield
+
+
+class _Hqxcoderengine:
+ """Write data to the coder in 3-byte chunks"""
+
+ def __init__(self, ofp):
+ self.ofp = ofp
+ self.data = b''
+ self.hqxdata = b''
+ self.linelen = LINELEN - 1
+
+ def write(self, data):
+ self.data = self.data + data
+ datalen = len(self.data)
+ todo = (datalen // 3) * 3
+ data = self.data[:todo]
+ self.data = self.data[todo:]
+ if not data:
+ return
+ with _ignore_deprecation_warning():
+ self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
+ self._flush(0)
+
+ def _flush(self, force):
+ first = 0
+ while first <= len(self.hqxdata) - self.linelen:
+ last = first + self.linelen
+ self.ofp.write(self.hqxdata[first:last] + b'\r')
+ self.linelen = LINELEN
+ first = last
+ self.hqxdata = self.hqxdata[first:]
+ if force:
+ self.ofp.write(self.hqxdata + b':\r')
+
+ def close(self):
+ if self.data:
+ with _ignore_deprecation_warning():
+ self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
+ self._flush(1)
+ self.ofp.close()
+ del self.ofp
+
+class _Rlecoderengine:
+ """Write data to the RLE-coder in suitably large chunks"""
+
+ def __init__(self, ofp):
+ self.ofp = ofp
+ self.data = b''
+
+ def write(self, data):
+ self.data = self.data + data
+ if len(self.data) < REASONABLY_LARGE:
+ return
+ with _ignore_deprecation_warning():
+ rledata = binascii.rlecode_hqx(self.data)
+ self.ofp.write(rledata)
+ self.data = b''
+
+ def close(self):
+ if self.data:
+ with _ignore_deprecation_warning():
+ rledata = binascii.rlecode_hqx(self.data)
+ self.ofp.write(rledata)
+ self.ofp.close()
+ del self.ofp
+
+class BinHex:
+ def __init__(self, name_finfo_dlen_rlen, ofp):
+ name, finfo, dlen, rlen = name_finfo_dlen_rlen
+ close_on_error = False
+ if isinstance(ofp, str):
+ ofname = ofp
+ ofp = io.open(ofname, 'wb')
+ close_on_error = True
+ try:
+ ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
+ hqxer = _Hqxcoderengine(ofp)
+ self.ofp = _Rlecoderengine(hqxer)
+ self.crc = 0
+ if finfo is None:
+ finfo = FInfo()
+ self.dlen = dlen
+ self.rlen = rlen
+ self._writeinfo(name, finfo)
+ self.state = _DID_HEADER
+ except:
+ if close_on_error:
+ ofp.close()
+ raise
+
+ def _writeinfo(self, name, finfo):
+ nl = len(name)
+ if nl > 63:
+ raise Error('Filename too long')
+ d = bytes([nl]) + name.encode("latin-1") + b'\0'
+ tp, cr = finfo.Type, finfo.Creator
+ if isinstance(tp, str):
+ tp = tp.encode("latin-1")
+ if isinstance(cr, str):
+ cr = cr.encode("latin-1")
+ d2 = tp + cr
+
+ # Force all structs to be packed with big-endian
+ d3 = struct.pack('>h', finfo.Flags)
+ d4 = struct.pack('>ii', self.dlen, self.rlen)
+ info = d + d2 + d3 + d4
+ self._write(info)
+ self._writecrc()
+
+ def _write(self, data):
+ self.crc = binascii.crc_hqx(data, self.crc)
+ self.ofp.write(data)
+
+ def _writecrc(self):
+ # XXXX Should this be here??
+ # self.crc = binascii.crc_hqx('\0\0', self.crc)
+ if self.crc < 0:
+ fmt = '>h'
+ else:
+ fmt = '>H'
+ self.ofp.write(struct.pack(fmt, self.crc))
+ self.crc = 0
+
+ def write(self, data):
+ if self.state != _DID_HEADER:
+ raise Error('Writing data at the wrong time')
+ self.dlen = self.dlen - len(data)
+ self._write(data)
+
+ def close_data(self):
+ if self.dlen != 0:
+ raise Error('Incorrect data size, diff=%r' % (self.rlen,))
+ self._writecrc()
+ self.state = _DID_DATA
+
+ def write_rsrc(self, data):
+ if self.state < _DID_DATA:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error('Writing resource data at the wrong time')
+ self.rlen = self.rlen - len(data)
+ self._write(data)
+
+ def close(self):
+ if self.state is None:
+ return
+ try:
+ if self.state < _DID_DATA:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error('Close at the wrong time')
+ if self.rlen != 0:
+ raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
+ self._writecrc()
+ finally:
+ self.state = None
+ ofp = self.ofp
+ del self.ofp
+ ofp.close()
+
+def binhex(inp, out):
+ """binhex(infilename, outfilename): create binhex-encoded copy of a file"""
+ finfo = getfileinfo(inp)
+ ofp = BinHex(finfo, out)
+
+ with io.open(inp, 'rb') as ifp:
+ # XXXX Do textfile translation on non-mac systems
+ while True:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write(d)
+ ofp.close_data()
+
+ ifp = openrsrc(inp, 'rb')
+ while True:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write_rsrc(d)
+ ofp.close()
+ ifp.close()
+
+class _Hqxdecoderengine:
+ """Read data via the decoder in 4-byte chunks"""
+
+ def __init__(self, ifp):
+ self.ifp = ifp
+ self.eof = 0
+
+ def read(self, totalwtd):
+ """Read at least wtd bytes (or until EOF)"""
+ decdata = b''
+ wtd = totalwtd
+ #
+ # The loop here is convoluted, since we don't really now how
+ # much to decode: there may be newlines in the incoming data.
+ while wtd > 0:
+ if self.eof: return decdata
+ wtd = ((wtd + 2) // 3) * 4
+ data = self.ifp.read(wtd)
+ #
+ # Next problem: there may not be a complete number of
+ # bytes in what we pass to a2b. Solve by yet another
+ # loop.
+ #
+ while True:
+ try:
+ with _ignore_deprecation_warning():
+ decdatacur, self.eof = binascii.a2b_hqx(data)
+ break
+ except binascii.Incomplete:
+ pass
+ newdata = self.ifp.read(1)
+ if not newdata:
+ raise Error('Premature EOF on binhex file')
+ data = data + newdata
+ decdata = decdata + decdatacur
+ wtd = totalwtd - len(decdata)
+ if not decdata and not self.eof:
+ raise Error('Premature EOF on binhex file')
+ return decdata
+
+ def close(self):
+ self.ifp.close()
+
+class _Rledecoderengine:
+ """Read data via the RLE-coder"""
+
+ def __init__(self, ifp):
+ self.ifp = ifp
+ self.pre_buffer = b''
+ self.post_buffer = b''
+ self.eof = 0
+
+ def read(self, wtd):
+ if wtd > len(self.post_buffer):
+ self._fill(wtd - len(self.post_buffer))
+ rv = self.post_buffer[:wtd]
+ self.post_buffer = self.post_buffer[wtd:]
+ return rv
+
+ def _fill(self, wtd):
+ self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
+ if self.ifp.eof:
+ with _ignore_deprecation_warning():
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer)
+ self.pre_buffer = b''
+ return
+
+ #
+ # Obfuscated code ahead. We have to take care that we don't
+ # end up with an orphaned RUNCHAR later on. So, we keep a couple
+ # of bytes in the buffer, depending on what the end of
+ # the buffer looks like:
+ # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
+ # '?\220' - Keep 2 bytes: repeated something-else
+ # '\220\0' - Escaped \220: Keep 2 bytes.
+ # '?\220?' - Complete repeat sequence: decode all
+ # otherwise: keep 1 byte.
+ #
+ mark = len(self.pre_buffer)
+ if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
+ mark = mark - 3
+ elif self.pre_buffer[-1:] == RUNCHAR:
+ mark = mark - 2
+ elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
+ mark = mark - 2
+ elif self.pre_buffer[-2:-1] == RUNCHAR:
+ pass # Decode all
+ else:
+ mark = mark - 1
+
+ with _ignore_deprecation_warning():
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer[:mark])
+ self.pre_buffer = self.pre_buffer[mark:]
+
+ def close(self):
+ self.ifp.close()
+
+class HexBin:
+ def __init__(self, ifp):
+ if isinstance(ifp, str):
+ ifp = io.open(ifp, 'rb')
+ #
+ # Find initial colon.
+ #
+ while True:
+ ch = ifp.read(1)
+ if not ch:
+ raise Error("No binhex data found")
+ # Cater for \r\n terminated lines (which show up as \n\r, hence
+ # all lines start with \r)
+ if ch == b'\r':
+ continue
+ if ch == b':':
+ break
+
+ hqxifp = _Hqxdecoderengine(ifp)
+ self.ifp = _Rledecoderengine(hqxifp)
+ self.crc = 0
+ self._readheader()
+
+ def _read(self, len):
+ data = self.ifp.read(len)
+ self.crc = binascii.crc_hqx(data, self.crc)
+ return data
+
+ def _checkcrc(self):
+ filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
+ #self.crc = binascii.crc_hqx('\0\0', self.crc)
+ # XXXX Is this needed??
+ self.crc = self.crc & 0xffff
+ if filecrc != self.crc:
+ raise Error('CRC error, computed %x, read %x'
+ % (self.crc, filecrc))
+ self.crc = 0
+
+ def _readheader(self):
+ len = self._read(1)
+ fname = self._read(ord(len))
+ rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
+ self._checkcrc()
+
+ type = rest[1:5]
+ creator = rest[5:9]
+ flags = struct.unpack('>h', rest[9:11])[0]
+ self.dlen = struct.unpack('>l', rest[11:15])[0]
+ self.rlen = struct.unpack('>l', rest[15:19])[0]
+
+ self.FName = fname
+ self.FInfo = FInfo()
+ self.FInfo.Creator = creator
+ self.FInfo.Type = type
+ self.FInfo.Flags = flags
+
+ self.state = _DID_HEADER
+
+ def read(self, *n):
+ if self.state != _DID_HEADER:
+ raise Error('Read data at wrong time')
+ if n:
+ n = n[0]
+ n = min(n, self.dlen)
+ else:
+ n = self.dlen
+ rv = b''
+ while len(rv) < n:
+ rv = rv + self._read(n-len(rv))
+ self.dlen = self.dlen - n
+ return rv
+
+ def close_data(self):
+ if self.state != _DID_HEADER:
+ raise Error('close_data at wrong time')
+ if self.dlen:
+ dummy = self._read(self.dlen)
+ self._checkcrc()
+ self.state = _DID_DATA
+
+ def read_rsrc(self, *n):
+ if self.state == _DID_HEADER:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error('Read resource data at wrong time')
+ if n:
+ n = n[0]
+ n = min(n, self.rlen)
+ else:
+ n = self.rlen
+ self.rlen = self.rlen - n
+ return self._read(n)
+
+ def close(self):
+ if self.state is None:
+ return
+ try:
+ if self.rlen:
+ dummy = self.read_rsrc(self.rlen)
+ self._checkcrc()
+ finally:
+ self.state = None
+ self.ifp.close()
+
+def hexbin(inp, out):
+ """hexbin(infilename, outfilename) - Decode binhexed file"""
+ ifp = HexBin(inp)
+ finfo = ifp.FInfo
+ if not out:
+ out = ifp.FName
+
+ with io.open(out, 'wb') as ofp:
+ # XXXX Do translation on non-mac systems
+ while True:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write(d)
+ ifp.close_data()
+
+ d = ifp.read_rsrc(128000)
+ if d:
+ ofp = openrsrc(out, 'wb')
+ ofp.write(d)
+ while True:
+ d = ifp.read_rsrc(128000)
+ if not d: break
+ ofp.write(d)
+ ofp.close()
+
+ ifp.close()
diff --git a/evalkit_cambrian/lib/python3.10/bz2.py b/evalkit_cambrian/lib/python3.10/bz2.py
new file mode 100644
index 0000000000000000000000000000000000000000..fabe4f73c8d8085cffc2792dee4c1629a87b8b35
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/bz2.py
@@ -0,0 +1,344 @@
+"""Interface to the libbzip2 compression library.
+
+This module provides a file interface, classes for incremental
+(de)compression, and functions for one-shot (de)compression.
+"""
+
+__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
+ "open", "compress", "decompress"]
+
+__author__ = "Nadeem Vawda "
+
+from builtins import open as _builtin_open
+import io
+import os
+import _compression
+
+from _bz2 import BZ2Compressor, BZ2Decompressor
+
+
+_MODE_CLOSED = 0
+_MODE_READ = 1
+# Value 2 no longer used
+_MODE_WRITE = 3
+
+
+class BZ2File(_compression.BaseStream):
+
+ """A file object providing transparent bzip2 (de)compression.
+
+ A BZ2File can act as a wrapper for an existing file object, or refer
+ directly to a named file on disk.
+
+ Note that BZ2File provides a *binary* file interface - data read is
+ returned as bytes, and data to be written should be given as bytes.
+ """
+
+ def __init__(self, filename, mode="r", *, compresslevel=9):
+ """Open a bzip2-compressed file.
+
+ If filename is a str, bytes, or PathLike object, it gives the
+ name of the file to be opened. Otherwise, it should be a file
+ object, which will be used to read or write the compressed data.
+
+ mode can be 'r' for reading (default), 'w' for (over)writing,
+ 'x' for creating exclusively, or 'a' for appending. These can
+ equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
+
+ If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
+ and 9 specifying the level of compression: 1 produces the least
+ compression, and 9 (default) produces the most compression.
+
+ If mode is 'r', the input file may be the concatenation of
+ multiple compressed streams.
+ """
+ self._fp = None
+ self._closefp = False
+ self._mode = _MODE_CLOSED
+
+ if not (1 <= compresslevel <= 9):
+ raise ValueError("compresslevel must be between 1 and 9")
+
+ if mode in ("", "r", "rb"):
+ mode = "rb"
+ mode_code = _MODE_READ
+ elif mode in ("w", "wb"):
+ mode = "wb"
+ mode_code = _MODE_WRITE
+ self._compressor = BZ2Compressor(compresslevel)
+ elif mode in ("x", "xb"):
+ mode = "xb"
+ mode_code = _MODE_WRITE
+ self._compressor = BZ2Compressor(compresslevel)
+ elif mode in ("a", "ab"):
+ mode = "ab"
+ mode_code = _MODE_WRITE
+ self._compressor = BZ2Compressor(compresslevel)
+ else:
+ raise ValueError("Invalid mode: %r" % (mode,))
+
+ if isinstance(filename, (str, bytes, os.PathLike)):
+ self._fp = _builtin_open(filename, mode)
+ self._closefp = True
+ self._mode = mode_code
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
+ self._fp = filename
+ self._mode = mode_code
+ else:
+ raise TypeError("filename must be a str, bytes, file or PathLike object")
+
+ if self._mode == _MODE_READ:
+ raw = _compression.DecompressReader(self._fp,
+ BZ2Decompressor, trailing_error=OSError)
+ self._buffer = io.BufferedReader(raw)
+ else:
+ self._pos = 0
+
+ def close(self):
+ """Flush and close the file.
+
+ May be called more than once without error. Once the file is
+ closed, any other operation on it will raise a ValueError.
+ """
+ if self._mode == _MODE_CLOSED:
+ return
+ try:
+ if self._mode == _MODE_READ:
+ self._buffer.close()
+ elif self._mode == _MODE_WRITE:
+ self._fp.write(self._compressor.flush())
+ self._compressor = None
+ finally:
+ try:
+ if self._closefp:
+ self._fp.close()
+ finally:
+ self._fp = None
+ self._closefp = False
+ self._mode = _MODE_CLOSED
+ self._buffer = None
+
+ @property
+ def closed(self):
+ """True if this file is closed."""
+ return self._mode == _MODE_CLOSED
+
+ def fileno(self):
+ """Return the file descriptor for the underlying file."""
+ self._check_not_closed()
+ return self._fp.fileno()
+
+ def seekable(self):
+ """Return whether the file supports seeking."""
+ return self.readable() and self._buffer.seekable()
+
+ def readable(self):
+ """Return whether the file was opened for reading."""
+ self._check_not_closed()
+ return self._mode == _MODE_READ
+
+ def writable(self):
+ """Return whether the file was opened for writing."""
+ self._check_not_closed()
+ return self._mode == _MODE_WRITE
+
+ def peek(self, n=0):
+ """Return buffered data without advancing the file position.
+
+ Always returns at least one byte of data, unless at EOF.
+ The exact number of bytes returned is unspecified.
+ """
+ self._check_can_read()
+ # Relies on the undocumented fact that BufferedReader.peek()
+ # always returns at least one byte (except at EOF), independent
+ # of the value of n
+ return self._buffer.peek(n)
+
+ def read(self, size=-1):
+ """Read up to size uncompressed bytes from the file.
+
+ If size is negative or omitted, read until EOF is reached.
+ Returns b'' if the file is already at EOF.
+ """
+ self._check_can_read()
+ return self._buffer.read(size)
+
+ def read1(self, size=-1):
+ """Read up to size uncompressed bytes, while trying to avoid
+ making multiple reads from the underlying stream. Reads up to a
+ buffer's worth of data if size is negative.
+
+ Returns b'' if the file is at EOF.
+ """
+ self._check_can_read()
+ if size < 0:
+ size = io.DEFAULT_BUFFER_SIZE
+ return self._buffer.read1(size)
+
+ def readinto(self, b):
+ """Read bytes into b.
+
+ Returns the number of bytes read (0 for EOF).
+ """
+ self._check_can_read()
+ return self._buffer.readinto(b)
+
+ def readline(self, size=-1):
+ """Read a line of uncompressed bytes from the file.
+
+ The terminating newline (if present) is retained. If size is
+ non-negative, no more than size bytes will be read (in which
+ case the line may be incomplete). Returns b'' if already at EOF.
+ """
+ if not isinstance(size, int):
+ if not hasattr(size, "__index__"):
+ raise TypeError("Integer argument expected")
+ size = size.__index__()
+ self._check_can_read()
+ return self._buffer.readline(size)
+
+ def readlines(self, size=-1):
+ """Read a list of lines of uncompressed bytes from the file.
+
+ size can be specified to control the number of lines read: no
+ further lines will be read once the total size of the lines read
+ so far equals or exceeds size.
+ """
+ if not isinstance(size, int):
+ if not hasattr(size, "__index__"):
+ raise TypeError("Integer argument expected")
+ size = size.__index__()
+ self._check_can_read()
+ return self._buffer.readlines(size)
+
+ def write(self, data):
+ """Write a byte string to the file.
+
+ Returns the number of uncompressed bytes written, which is
+ always the length of data in bytes. Note that due to buffering,
+ the file on disk may not reflect the data written until close()
+ is called.
+ """
+ self._check_can_write()
+ if isinstance(data, (bytes, bytearray)):
+ length = len(data)
+ else:
+ # accept any data that supports the buffer protocol
+ data = memoryview(data)
+ length = data.nbytes
+
+ compressed = self._compressor.compress(data)
+ self._fp.write(compressed)
+ self._pos += length
+ return length
+
+ def writelines(self, seq):
+ """Write a sequence of byte strings to the file.
+
+ Returns the number of uncompressed bytes written.
+ seq can be any iterable yielding byte strings.
+
+ Line separators are not added between the written byte strings.
+ """
+ return _compression.BaseStream.writelines(self, seq)
+
+ def seek(self, offset, whence=io.SEEK_SET):
+ """Change the file position.
+
+ The new position is specified by offset, relative to the
+ position indicated by whence. Values for whence are:
+
+ 0: start of stream (default); offset must not be negative
+ 1: current stream position
+ 2: end of stream; offset must not be positive
+
+ Returns the new file position.
+
+ Note that seeking is emulated, so depending on the parameters,
+ this operation may be extremely slow.
+ """
+ self._check_can_seek()
+ return self._buffer.seek(offset, whence)
+
+ def tell(self):
+ """Return the current file position."""
+ self._check_not_closed()
+ if self._mode == _MODE_READ:
+ return self._buffer.tell()
+ return self._pos
+
+
+def open(filename, mode="rb", compresslevel=9,
+ encoding=None, errors=None, newline=None):
+ """Open a bzip2-compressed file in binary or text mode.
+
+ The filename argument can be an actual filename (a str, bytes, or
+ PathLike object), or an existing file object to read from or write
+ to.
+
+ The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or
+ "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode.
+ The default mode is "rb", and the default compresslevel is 9.
+
+ For binary mode, this function is equivalent to the BZ2File
+ constructor: BZ2File(filename, mode, compresslevel). In this case,
+ the encoding, errors and newline arguments must not be provided.
+
+ For text mode, a BZ2File object is created, and wrapped in an
+ io.TextIOWrapper instance with the specified encoding, error
+ handling behavior, and line ending(s).
+
+ """
+ if "t" in mode:
+ if "b" in mode:
+ raise ValueError("Invalid mode: %r" % (mode,))
+ else:
+ if encoding is not None:
+ raise ValueError("Argument 'encoding' not supported in binary mode")
+ if errors is not None:
+ raise ValueError("Argument 'errors' not supported in binary mode")
+ if newline is not None:
+ raise ValueError("Argument 'newline' not supported in binary mode")
+
+ bz_mode = mode.replace("t", "")
+ binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
+
+ if "t" in mode:
+ encoding = io.text_encoding(encoding)
+ return io.TextIOWrapper(binary_file, encoding, errors, newline)
+ else:
+ return binary_file
+
+
+def compress(data, compresslevel=9):
+ """Compress a block of data.
+
+ compresslevel, if given, must be a number between 1 and 9.
+
+ For incremental compression, use a BZ2Compressor object instead.
+ """
+ comp = BZ2Compressor(compresslevel)
+ return comp.compress(data) + comp.flush()
+
+
+def decompress(data):
+ """Decompress a block of data.
+
+ For incremental decompression, use a BZ2Decompressor object instead.
+ """
+ results = []
+ while data:
+ decomp = BZ2Decompressor()
+ try:
+ res = decomp.decompress(data)
+ except OSError:
+ if results:
+ break # Leftover data is not a valid bzip2 stream; ignore it.
+ else:
+ raise # Error on the first iteration; bail out.
+ results.append(res)
+ if not decomp.eof:
+ raise ValueError("Compressed data ended before the "
+ "end-of-stream marker was reached")
+ data = decomp.unused_data
+ return b"".join(results)
diff --git a/evalkit_cambrian/lib/python3.10/cProfile.py b/evalkit_cambrian/lib/python3.10/cProfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ae1fb8859e51fefb5193127a8d8a24aa77fc92b
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/cProfile.py
@@ -0,0 +1,191 @@
+#! /usr/bin/env python3
+
+"""Python interface for the 'lsprof' profiler.
+ Compatible with the 'profile' module.
+"""
+
+__all__ = ["run", "runctx", "Profile"]
+
+import _lsprof
+import io
+import profile as _pyprofile
+
+# ____________________________________________________________
+# Simple interface
+
+def run(statement, filename=None, sort=-1):
+ return _pyprofile._Utils(Profile).run(statement, filename, sort)
+
+def runctx(statement, globals, locals, filename=None, sort=-1):
+ return _pyprofile._Utils(Profile).runctx(statement, globals, locals,
+ filename, sort)
+
+run.__doc__ = _pyprofile.run.__doc__
+runctx.__doc__ = _pyprofile.runctx.__doc__
+
+# ____________________________________________________________
+
+class Profile(_lsprof.Profiler):
+ """Profile(timer=None, timeunit=None, subcalls=True, builtins=True)
+
+ Builds a profiler object using the specified timer function.
+ The default timer is a fast built-in one based on real time.
+ For custom timer functions returning integers, timeunit can
+ be a float specifying a scale (i.e. how long each integer unit
+ is, in seconds).
+ """
+
+ # Most of the functionality is in the base class.
+ # This subclass only adds convenient and backward-compatible methods.
+
+ def print_stats(self, sort=-1):
+ import pstats
+ pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
+
+ def dump_stats(self, file):
+ import marshal
+ with open(file, 'wb') as f:
+ self.create_stats()
+ marshal.dump(self.stats, f)
+
+ def create_stats(self):
+ self.disable()
+ self.snapshot_stats()
+
+ def snapshot_stats(self):
+ entries = self.getstats()
+ self.stats = {}
+ callersdicts = {}
+ # call information
+ for entry in entries:
+ func = label(entry.code)
+ nc = entry.callcount # ncalls column of pstats (before '/')
+ cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
+ tt = entry.inlinetime # tottime column of pstats
+ ct = entry.totaltime # cumtime column of pstats
+ callers = {}
+ callersdicts[id(entry.code)] = callers
+ self.stats[func] = cc, nc, tt, ct, callers
+ # subcall information
+ for entry in entries:
+ if entry.calls:
+ func = label(entry.code)
+ for subentry in entry.calls:
+ try:
+ callers = callersdicts[id(subentry.code)]
+ except KeyError:
+ continue
+ nc = subentry.callcount
+ cc = nc - subentry.reccallcount
+ tt = subentry.inlinetime
+ ct = subentry.totaltime
+ if func in callers:
+ prev = callers[func]
+ nc += prev[0]
+ cc += prev[1]
+ tt += prev[2]
+ ct += prev[3]
+ callers[func] = nc, cc, tt, ct
+
+ # The following two methods can be called by clients to use
+ # a profiler to profile a statement, given as a string.
+
+ def run(self, cmd):
+ import __main__
+ dict = __main__.__dict__
+ return self.runctx(cmd, dict, dict)
+
+ def runctx(self, cmd, globals, locals):
+ self.enable()
+ try:
+ exec(cmd, globals, locals)
+ finally:
+ self.disable()
+ return self
+
+ # This method is more useful to profile a single function call.
+ def runcall(self, func, /, *args, **kw):
+ self.enable()
+ try:
+ return func(*args, **kw)
+ finally:
+ self.disable()
+
+ def __enter__(self):
+ self.enable()
+ return self
+
+ def __exit__(self, *exc_info):
+ self.disable()
+
+# ____________________________________________________________
+
+def label(code):
+ if isinstance(code, str):
+ return ('~', 0, code) # built-in functions ('~' sorts at the end)
+ else:
+ return (code.co_filename, code.co_firstlineno, code.co_name)
+
+# ____________________________________________________________
+
+def main():
+ import os
+ import sys
+ import runpy
+ import pstats
+ from optparse import OptionParser
+ usage = "cProfile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."
+ parser = OptionParser(usage=usage)
+ parser.allow_interspersed_args = False
+ parser.add_option('-o', '--outfile', dest="outfile",
+ help="Save stats to ", default=None)
+ parser.add_option('-s', '--sort', dest="sort",
+ help="Sort order when printing to stdout, based on pstats.Stats class",
+ default=-1,
+ choices=sorted(pstats.Stats.sort_arg_dict_default))
+ parser.add_option('-m', dest="module", action="store_true",
+ help="Profile a library module", default=False)
+
+ if not sys.argv[1:]:
+ parser.print_usage()
+ sys.exit(2)
+
+ (options, args) = parser.parse_args()
+ sys.argv[:] = args
+
+ # The script that we're profiling may chdir, so capture the absolute path
+ # to the output file at startup.
+ if options.outfile is not None:
+ options.outfile = os.path.abspath(options.outfile)
+
+ if len(args) > 0:
+ if options.module:
+ code = "run_module(modname, run_name='__main__')"
+ globs = {
+ 'run_module': runpy.run_module,
+ 'modname': args[0]
+ }
+ else:
+ progname = args[0]
+ sys.path.insert(0, os.path.dirname(progname))
+ with io.open_code(progname) as fp:
+ code = compile(fp.read(), progname, 'exec')
+ globs = {
+ '__file__': progname,
+ '__name__': '__main__',
+ '__package__': None,
+ '__cached__': None,
+ }
+ try:
+ runctx(code, globs, None, options.outfile, options.sort)
+ except BrokenPipeError as exc:
+ # Prevent "Exception ignored" during interpreter shutdown.
+ sys.stdout = None
+ sys.exit(exc.errno)
+ else:
+ parser.print_usage()
+ return parser
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+ main()
diff --git a/evalkit_cambrian/lib/python3.10/cgi.py b/evalkit_cambrian/lib/python3.10/cgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cb8cf28bd66457ef05a8cc19bb53b8f2afbb780
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/cgi.py
@@ -0,0 +1,1004 @@
+#! /usr/local/bin/python
+
+# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
+# intentionally NOT "/usr/bin/env python". On many systems
+# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
+# scripts, and /usr/local/bin is the default directory where Python is
+# installed, so /usr/bin/env would be unable to find python. Granted,
+# binary installations by Linux vendors often install Python in
+# /usr/bin. So let those vendors patch cgi.py to match their choice
+# of installation.
+
+"""Support module for CGI (Common Gateway Interface) scripts.
+
+This module defines a number of utilities for use by CGI scripts
+written in Python.
+"""
+
+# History
+# -------
+#
+# Michael McLay started this module. Steve Majewski changed the
+# interface to SvFormContentDict and FormContentDict. The multipart
+# parsing was inspired by code submitted by Andreas Paepcke. Guido van
+# Rossum rewrote, reformatted and documented the module and is currently
+# responsible for its maintenance.
+#
+
+__version__ = "2.6"
+
+
+# Imports
+# =======
+
+from io import StringIO, BytesIO, TextIOWrapper
+from collections.abc import Mapping
+import sys
+import os
+import urllib.parse
+from email.parser import FeedParser
+from email.message import Message
+import html
+import locale
+import tempfile
+import warnings
+
+__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart",
+ "parse_header", "test", "print_exception", "print_environ",
+ "print_form", "print_directory", "print_arguments",
+ "print_environ_usage"]
+
+# Logging support
+# ===============
+
+logfile = "" # Filename to log to, if not empty
+logfp = None # File object to log to, if not None
+
+def initlog(*allargs):
+ """Write a log message, if there is a log file.
+
+ Even though this function is called initlog(), you should always
+ use log(); log is a variable that is set either to initlog
+ (initially), to dolog (once the log file has been opened), or to
+ nolog (when logging is disabled).
+
+ The first argument is a format string; the remaining arguments (if
+ any) are arguments to the % operator, so e.g.
+ log("%s: %s", "a", "b")
+ will write "a: b" to the log file, followed by a newline.
+
+ If the global logfp is not None, it should be a file object to
+ which log data is written.
+
+ If the global logfp is None, the global logfile may be a string
+ giving a filename to open, in append mode. This file should be
+ world writable!!! If the file can't be opened, logging is
+ silently disabled (since there is no safe place where we could
+ send an error message).
+
+ """
+ global log, logfile, logfp
+ warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead",
+ DeprecationWarning, stacklevel=2)
+ if logfile and not logfp:
+ try:
+ logfp = open(logfile, "a", encoding="locale")
+ except OSError:
+ pass
+ if not logfp:
+ log = nolog
+ else:
+ log = dolog
+ log(*allargs)
+
+def dolog(fmt, *args):
+ """Write a log message to the log file. See initlog() for docs."""
+ logfp.write(fmt%args + "\n")
+
+def nolog(*allargs):
+ """Dummy function, assigned to log when logging is disabled."""
+ pass
+
+def closelog():
+ """Close the log file."""
+ global log, logfile, logfp
+ logfile = ''
+ if logfp:
+ logfp.close()
+ logfp = None
+ log = initlog
+
+log = initlog # The current logging function
+
+
+# Parsing functions
+# =================
+
+# Maximum input we will accept when REQUEST_METHOD is POST
+# 0 ==> unlimited input
+maxlen = 0
+
+def parse(fp=None, environ=os.environ, keep_blank_values=0,
+ strict_parsing=0, separator='&'):
+ """Parse a query in the environment or from a file (default stdin)
+
+ Arguments, all optional:
+
+ fp : file pointer; default: sys.stdin.buffer
+
+ environ : environment dictionary; default: os.environ
+
+ keep_blank_values: flag indicating whether blank values in
+ percent-encoded forms should be treated as blank strings.
+ A true value indicates that blanks should be retained as
+ blank strings. The default false value indicates that
+ blank values are to be ignored and treated as if they were
+ not included.
+
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
+
+ separator: str. The symbol to use for separating the query arguments.
+ Defaults to &.
+ """
+ if fp is None:
+ fp = sys.stdin
+
+ # field keys and values (except for files) are returned as strings
+ # an encoding is required to decode the bytes read from self.fp
+ if hasattr(fp,'encoding'):
+ encoding = fp.encoding
+ else:
+ encoding = 'latin-1'
+
+ # fp.read() must return bytes
+ if isinstance(fp, TextIOWrapper):
+ fp = fp.buffer
+
+ if not 'REQUEST_METHOD' in environ:
+ environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
+ if environ['REQUEST_METHOD'] == 'POST':
+ ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+ if ctype == 'multipart/form-data':
+ return parse_multipart(fp, pdict, separator=separator)
+ elif ctype == 'application/x-www-form-urlencoded':
+ clength = int(environ['CONTENT_LENGTH'])
+ if maxlen and clength > maxlen:
+ raise ValueError('Maximum content length exceeded')
+ qs = fp.read(clength).decode(encoding)
+ else:
+ qs = '' # Unknown content-type
+ if 'QUERY_STRING' in environ:
+ if qs: qs = qs + '&'
+ qs = qs + environ['QUERY_STRING']
+ elif sys.argv[1:]:
+ if qs: qs = qs + '&'
+ qs = qs + sys.argv[1]
+ environ['QUERY_STRING'] = qs # XXX Shouldn't, really
+ elif 'QUERY_STRING' in environ:
+ qs = environ['QUERY_STRING']
+ else:
+ if sys.argv[1:]:
+ qs = sys.argv[1]
+ else:
+ qs = ""
+ environ['QUERY_STRING'] = qs # XXX Shouldn't, really
+ return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
+ encoding=encoding, separator=separator)
+
+
+def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'):
+ """Parse multipart input.
+
+ Arguments:
+ fp : input file
+ pdict: dictionary containing other parameters of content-type header
+ encoding, errors: request encoding and error handler, passed to
+ FieldStorage
+
+ Returns a dictionary just like parse_qs(): keys are the field names, each
+ value is a list of values for that field. For non-file fields, the value
+ is a list of strings.
+ """
+ # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always
+ # represented as 7bit US-ASCII.
+ boundary = pdict['boundary'].decode('ascii')
+ ctype = "multipart/form-data; boundary={}".format(boundary)
+ headers = Message()
+ headers.set_type(ctype)
+ try:
+ headers['Content-Length'] = pdict['CONTENT-LENGTH']
+ except KeyError:
+ pass
+ fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
+ environ={'REQUEST_METHOD': 'POST'}, separator=separator)
+ return {k: fs.getlist(k) for k in fs}
+
+def _parseparam(s):
+ while s[:1] == ';':
+ s = s[1:]
+ end = s.find(';')
+ while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+ end = s.find(';', end + 1)
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ yield f.strip()
+ s = s[end:]
+
+def parse_header(line):
+ """Parse a Content-type like header.
+
+ Return the main content-type and a dictionary of options.
+
+ """
+ parts = _parseparam(';' + line)
+ key = parts.__next__()
+ pdict = {}
+ for p in parts:
+ i = p.find('=')
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i+1:].strip()
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+ value = value.replace('\\\\', '\\').replace('\\"', '"')
+ pdict[name] = value
+ return key, pdict
+
+
+# Classes for field storage
+# =========================
+
+class MiniFieldStorage:
+
+ """Like FieldStorage, for use when no file uploads are possible."""
+
+ # Dummy attributes
+ filename = None
+ list = None
+ type = None
+ file = None
+ type_options = {}
+ disposition = None
+ disposition_options = {}
+ headers = {}
+
+ def __init__(self, name, value):
+ """Constructor from field name and value."""
+ self.name = name
+ self.value = value
+ # self.file = StringIO(value)
+
+ def __repr__(self):
+ """Return printable representation."""
+ return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
+
+
+class FieldStorage:
+
+ """Store a sequence of fields, reading multipart/form-data.
+
+ This class provides naming, typing, files stored on disk, and
+ more. At the top level, it is accessible like a dictionary, whose
+ keys are the field names. (Note: None can occur as a field name.)
+ The items are either a Python list (if there's multiple values) or
+ another FieldStorage or MiniFieldStorage object. If it's a single
+ object, it has the following attributes:
+
+ name: the field name, if specified; otherwise None
+
+ filename: the filename, if specified; otherwise None; this is the
+ client side filename, *not* the file name on which it is
+ stored (that's a temporary file you don't deal with)
+
+ value: the value as a *string*; for file uploads, this
+ transparently reads the file every time you request the value
+ and returns *bytes*
+
+ file: the file(-like) object from which you can read the data *as
+ bytes* ; None if the data is stored a simple string
+
+ type: the content-type, or None if not specified
+
+ type_options: dictionary of options specified on the content-type
+ line
+
+ disposition: content-disposition, or None if not specified
+
+ disposition_options: dictionary of corresponding options
+
+ headers: a dictionary(-like) object (sometimes email.message.Message or a
+ subclass thereof) containing *all* headers
+
+ The class is subclassable, mostly for the purpose of overriding
+ the make_file() method, which is called internally to come up with
+ a file open for reading and writing. This makes it possible to
+ override the default choice of storing all files in a temporary
+ directory and unlinking them as soon as they have been opened.
+
+ """
+ def __init__(self, fp=None, headers=None, outerboundary=b'',
+ environ=os.environ, keep_blank_values=0, strict_parsing=0,
+ limit=None, encoding='utf-8', errors='replace',
+ max_num_fields=None, separator='&'):
+ """Constructor. Read multipart/* until last part.
+
+ Arguments, all optional:
+
+ fp : file pointer; default: sys.stdin.buffer
+ (not used when the request method is GET)
+ Can be :
+ 1. a TextIOWrapper object
+ 2. an object whose read() and readline() methods return bytes
+
+ headers : header dictionary-like object; default:
+ taken from environ as per CGI spec
+
+ outerboundary : terminating multipart boundary
+ (for internal use only)
+
+ environ : environment dictionary; default: os.environ
+
+ keep_blank_values: flag indicating whether blank values in
+ percent-encoded forms should be treated as blank strings.
+ A true value indicates that blanks should be retained as
+ blank strings. The default false value indicates that
+ blank values are to be ignored and treated as if they were
+ not included.
+
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
+
+ limit : used internally to read parts of multipart/form-data forms,
+ to exit from the reading loop when reached. It is the difference
+ between the form content-length and the number of bytes already
+ read
+
+ encoding, errors : the encoding and error handler used to decode the
+ binary stream to strings. Must be the same as the charset defined
+ for the page sending the form (content-type : meta http-equiv or
+ header)
+
+ max_num_fields: int. If set, then __init__ throws a ValueError
+ if there are more than n fields read by parse_qsl().
+
+ """
+ method = 'GET'
+ self.keep_blank_values = keep_blank_values
+ self.strict_parsing = strict_parsing
+ self.max_num_fields = max_num_fields
+ self.separator = separator
+ if 'REQUEST_METHOD' in environ:
+ method = environ['REQUEST_METHOD'].upper()
+ self.qs_on_post = None
+ if method == 'GET' or method == 'HEAD':
+ if 'QUERY_STRING' in environ:
+ qs = environ['QUERY_STRING']
+ elif sys.argv[1:]:
+ qs = sys.argv[1]
+ else:
+ qs = ""
+ qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
+ fp = BytesIO(qs)
+ if headers is None:
+ headers = {'content-type':
+ "application/x-www-form-urlencoded"}
+ if headers is None:
+ headers = {}
+ if method == 'POST':
+ # Set default content-type for POST to what's traditional
+ headers['content-type'] = "application/x-www-form-urlencoded"
+ if 'CONTENT_TYPE' in environ:
+ headers['content-type'] = environ['CONTENT_TYPE']
+ if 'QUERY_STRING' in environ:
+ self.qs_on_post = environ['QUERY_STRING']
+ if 'CONTENT_LENGTH' in environ:
+ headers['content-length'] = environ['CONTENT_LENGTH']
+ else:
+ if not (isinstance(headers, (Mapping, Message))):
+ raise TypeError("headers must be mapping or an instance of "
+ "email.message.Message")
+ self.headers = headers
+ if fp is None:
+ self.fp = sys.stdin.buffer
+ # self.fp.read() must return bytes
+ elif isinstance(fp, TextIOWrapper):
+ self.fp = fp.buffer
+ else:
+ if not (hasattr(fp, 'read') and hasattr(fp, 'readline')):
+ raise TypeError("fp must be file pointer")
+ self.fp = fp
+
+ self.encoding = encoding
+ self.errors = errors
+
+ if not isinstance(outerboundary, bytes):
+ raise TypeError('outerboundary must be bytes, not %s'
+ % type(outerboundary).__name__)
+ self.outerboundary = outerboundary
+
+ self.bytes_read = 0
+ self.limit = limit
+
+ # Process content-disposition header
+ cdisp, pdict = "", {}
+ if 'content-disposition' in self.headers:
+ cdisp, pdict = parse_header(self.headers['content-disposition'])
+ self.disposition = cdisp
+ self.disposition_options = pdict
+ self.name = None
+ if 'name' in pdict:
+ self.name = pdict['name']
+ self.filename = None
+ if 'filename' in pdict:
+ self.filename = pdict['filename']
+ self._binary_file = self.filename is not None
+
+ # Process content-type header
+ #
+ # Honor any existing content-type header. But if there is no
+ # content-type header, use some sensible defaults. Assume
+ # outerboundary is "" at the outer level, but something non-false
+ # inside a multi-part. The default for an inner part is text/plain,
+ # but for an outer part it should be urlencoded. This should catch
+ # bogus clients which erroneously forget to include a content-type
+ # header.
+ #
+ # See below for what we do if there does exist a content-type header,
+ # but it happens to be something we don't understand.
+ if 'content-type' in self.headers:
+ ctype, pdict = parse_header(self.headers['content-type'])
+ elif self.outerboundary or method != 'POST':
+ ctype, pdict = "text/plain", {}
+ else:
+ ctype, pdict = 'application/x-www-form-urlencoded', {}
+ self.type = ctype
+ self.type_options = pdict
+ if 'boundary' in pdict:
+ self.innerboundary = pdict['boundary'].encode(self.encoding,
+ self.errors)
+ else:
+ self.innerboundary = b""
+
+ clen = -1
+ if 'content-length' in self.headers:
+ try:
+ clen = int(self.headers['content-length'])
+ except ValueError:
+ pass
+ if maxlen and clen > maxlen:
+ raise ValueError('Maximum content length exceeded')
+ self.length = clen
+ if self.limit is None and clen >= 0:
+ self.limit = clen
+
+ self.list = self.file = None
+ self.done = 0
+ if ctype == 'application/x-www-form-urlencoded':
+ self.read_urlencoded()
+ elif ctype[:10] == 'multipart/':
+ self.read_multi(environ, keep_blank_values, strict_parsing)
+ else:
+ self.read_single()
+
+ def __del__(self):
+ try:
+ self.file.close()
+ except AttributeError:
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.file.close()
+
+ def __repr__(self):
+ """Return a printable representation."""
+ return "FieldStorage(%r, %r, %r)" % (
+ self.name, self.filename, self.value)
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __getattr__(self, name):
+ if name != 'value':
+ raise AttributeError(name)
+ if self.file:
+ self.file.seek(0)
+ value = self.file.read()
+ self.file.seek(0)
+ elif self.list is not None:
+ value = self.list
+ else:
+ value = None
+ return value
+
+ def __getitem__(self, key):
+ """Dictionary style indexing."""
+ if self.list is None:
+ raise TypeError("not indexable")
+ found = []
+ for item in self.list:
+ if item.name == key: found.append(item)
+ if not found:
+ raise KeyError(key)
+ if len(found) == 1:
+ return found[0]
+ else:
+ return found
+
+ def getvalue(self, key, default=None):
+ """Dictionary style get() method, including 'value' lookup."""
+ if key in self:
+ value = self[key]
+ if isinstance(value, list):
+ return [x.value for x in value]
+ else:
+ return value.value
+ else:
+ return default
+
+ def getfirst(self, key, default=None):
+ """ Return the first value received."""
+ if key in self:
+ value = self[key]
+ if isinstance(value, list):
+ return value[0].value
+ else:
+ return value.value
+ else:
+ return default
+
+ def getlist(self, key):
+ """ Return list of received values."""
+ if key in self:
+ value = self[key]
+ if isinstance(value, list):
+ return [x.value for x in value]
+ else:
+ return [value.value]
+ else:
+ return []
+
+ def keys(self):
+ """Dictionary style keys() method."""
+ if self.list is None:
+ raise TypeError("not indexable")
+ return list(set(item.name for item in self.list))
+
+ def __contains__(self, key):
+ """Dictionary style __contains__ method."""
+ if self.list is None:
+ raise TypeError("not indexable")
+ return any(item.name == key for item in self.list)
+
+ def __len__(self):
+ """Dictionary style len(x) support."""
+ return len(self.keys())
+
+ def __bool__(self):
+ if self.list is None:
+ raise TypeError("Cannot be converted to bool.")
+ return bool(self.list)
+
+ def read_urlencoded(self):
+ """Internal: read data in query string format."""
+ qs = self.fp.read(self.length)
+ if not isinstance(qs, bytes):
+ raise ValueError("%s should return bytes, got %s" \
+ % (self.fp, type(qs).__name__))
+ qs = qs.decode(self.encoding, self.errors)
+ if self.qs_on_post:
+ qs += '&' + self.qs_on_post
+ query = urllib.parse.parse_qsl(
+ qs, self.keep_blank_values, self.strict_parsing,
+ encoding=self.encoding, errors=self.errors,
+ max_num_fields=self.max_num_fields, separator=self.separator)
+ self.list = [MiniFieldStorage(key, value) for key, value in query]
+ self.skip_lines()
+
+ FieldStorageClass = None
+
+ def read_multi(self, environ, keep_blank_values, strict_parsing):
+ """Internal: read a part that is itself multipart."""
+ ib = self.innerboundary
+ if not valid_boundary(ib):
+ raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
+ self.list = []
+ if self.qs_on_post:
+ query = urllib.parse.parse_qsl(
+ self.qs_on_post, self.keep_blank_values, self.strict_parsing,
+ encoding=self.encoding, errors=self.errors,
+ max_num_fields=self.max_num_fields, separator=self.separator)
+ self.list.extend(MiniFieldStorage(key, value) for key, value in query)
+
+ klass = self.FieldStorageClass or self.__class__
+ first_line = self.fp.readline() # bytes
+ if not isinstance(first_line, bytes):
+ raise ValueError("%s should return bytes, got %s" \
+ % (self.fp, type(first_line).__name__))
+ self.bytes_read += len(first_line)
+
+ # Ensure that we consume the file until we've hit our inner boundary
+ while (first_line.strip() != (b"--" + self.innerboundary) and
+ first_line):
+ first_line = self.fp.readline()
+ self.bytes_read += len(first_line)
+
+ # Propagate max_num_fields into the sub class appropriately
+ max_num_fields = self.max_num_fields
+ if max_num_fields is not None:
+ max_num_fields -= len(self.list)
+
+ while True:
+ parser = FeedParser()
+ hdr_text = b""
+ while True:
+ data = self.fp.readline()
+ hdr_text += data
+ if not data.strip():
+ break
+ if not hdr_text:
+ break
+ # parser takes strings, not bytes
+ self.bytes_read += len(hdr_text)
+ parser.feed(hdr_text.decode(self.encoding, self.errors))
+ headers = parser.close()
+
+ # Some clients add Content-Length for part headers, ignore them
+ if 'content-length' in headers:
+ del headers['content-length']
+
+ limit = None if self.limit is None \
+ else self.limit - self.bytes_read
+ part = klass(self.fp, headers, ib, environ, keep_blank_values,
+ strict_parsing, limit,
+ self.encoding, self.errors, max_num_fields, self.separator)
+
+ if max_num_fields is not None:
+ max_num_fields -= 1
+ if part.list:
+ max_num_fields -= len(part.list)
+ if max_num_fields < 0:
+ raise ValueError('Max number of fields exceeded')
+
+ self.bytes_read += part.bytes_read
+ self.list.append(part)
+ if part.done or self.bytes_read >= self.length > 0:
+ break
+ self.skip_lines()
+
+ def read_single(self):
+ """Internal: read an atomic part."""
+ if self.length >= 0:
+ self.read_binary()
+ self.skip_lines()
+ else:
+ self.read_lines()
+ self.file.seek(0)
+
+ bufsize = 8*1024 # I/O buffering size for copy to file
+
+ def read_binary(self):
+ """Internal: read binary data."""
+ self.file = self.make_file()
+ todo = self.length
+ if todo >= 0:
+ while todo > 0:
+ data = self.fp.read(min(todo, self.bufsize)) # bytes
+ if not isinstance(data, bytes):
+ raise ValueError("%s should return bytes, got %s"
+ % (self.fp, type(data).__name__))
+ self.bytes_read += len(data)
+ if not data:
+ self.done = -1
+ break
+ self.file.write(data)
+ todo = todo - len(data)
+
+ def read_lines(self):
+ """Internal: read lines until EOF or outerboundary."""
+ if self._binary_file:
+ self.file = self.__file = BytesIO() # store data as bytes for files
+ else:
+ self.file = self.__file = StringIO() # as strings for other fields
+ if self.outerboundary:
+ self.read_lines_to_outerboundary()
+ else:
+ self.read_lines_to_eof()
+
+ def __write(self, line):
+ """line is always bytes, not string"""
+ if self.__file is not None:
+ if self.__file.tell() + len(line) > 1000:
+ self.file = self.make_file()
+ data = self.__file.getvalue()
+ self.file.write(data)
+ self.__file = None
+ if self._binary_file:
+ # keep bytes
+ self.file.write(line)
+ else:
+ # decode to string
+ self.file.write(line.decode(self.encoding, self.errors))
+
+ def read_lines_to_eof(self):
+ """Internal: read lines until EOF."""
+ while 1:
+ line = self.fp.readline(1<<16) # bytes
+ self.bytes_read += len(line)
+ if not line:
+ self.done = -1
+ break
+ self.__write(line)
+
+ def read_lines_to_outerboundary(self):
+ """Internal: read lines until outerboundary.
+ Data is read as bytes: boundaries and line ends must be converted
+ to bytes for comparisons.
+ """
+ next_boundary = b"--" + self.outerboundary
+ last_boundary = next_boundary + b"--"
+ delim = b""
+ last_line_lfend = True
+ _read = 0
+ while 1:
+
+ if self.limit is not None and 0 <= self.limit <= _read:
+ break
+ line = self.fp.readline(1<<16) # bytes
+ self.bytes_read += len(line)
+ _read += len(line)
+ if not line:
+ self.done = -1
+ break
+ if delim == b"\r":
+ line = delim + line
+ delim = b""
+ if line.startswith(b"--") and last_line_lfend:
+ strippedline = line.rstrip()
+ if strippedline == next_boundary:
+ break
+ if strippedline == last_boundary:
+ self.done = 1
+ break
+ odelim = delim
+ if line.endswith(b"\r\n"):
+ delim = b"\r\n"
+ line = line[:-2]
+ last_line_lfend = True
+ elif line.endswith(b"\n"):
+ delim = b"\n"
+ line = line[:-1]
+ last_line_lfend = True
+ elif line.endswith(b"\r"):
+ # We may interrupt \r\n sequences if they span the 2**16
+ # byte boundary
+ delim = b"\r"
+ line = line[:-1]
+ last_line_lfend = False
+ else:
+ delim = b""
+ last_line_lfend = False
+ self.__write(odelim + line)
+
+ def skip_lines(self):
+ """Internal: skip lines until outer boundary if defined."""
+ if not self.outerboundary or self.done:
+ return
+ next_boundary = b"--" + self.outerboundary
+ last_boundary = next_boundary + b"--"
+ last_line_lfend = True
+ while True:
+ line = self.fp.readline(1<<16)
+ self.bytes_read += len(line)
+ if not line:
+ self.done = -1
+ break
+ if line.endswith(b"--") and last_line_lfend:
+ strippedline = line.strip()
+ if strippedline == next_boundary:
+ break
+ if strippedline == last_boundary:
+ self.done = 1
+ break
+ last_line_lfend = line.endswith(b'\n')
+
+ def make_file(self):
+ """Overridable: return a readable & writable file.
+
+ The file will be used as follows:
+ - data is written to it
+ - seek(0)
+ - data is read from it
+
+ The file is opened in binary mode for files, in text mode
+ for other fields
+
+ This version opens a temporary file for reading and writing,
+ and immediately deletes (unlinks) it. The trick (on Unix!) is
+ that the file can still be used, but it can't be opened by
+ another process, and it will automatically be deleted when it
+ is closed or when the current process terminates.
+
+ If you want a more permanent file, you derive a class which
+ overrides this method. If you want a visible temporary file
+ that is nevertheless automatically deleted when the script
+ terminates, try defining a __del__ method in a derived class
+ which unlinks the temporary files you have created.
+
+ """
+ if self._binary_file:
+ return tempfile.TemporaryFile("wb+")
+ else:
+ return tempfile.TemporaryFile("w+",
+ encoding=self.encoding, newline = '\n')
+
+
+# Test/debug code
+# ===============
+
+def test(environ=os.environ):
+ """Robust test CGI script, usable as main program.
+
+ Write minimal HTTP headers and dump all information provided to
+ the script in HTML form.
+
+ """
+ print("Content-type: text/html")
+ print()
+ sys.stderr = sys.stdout
+ try:
+ form = FieldStorage() # Replace with other classes to test those
+ print_directory()
+ print_arguments()
+ print_form(form)
+ print_environ(environ)
+ print_environ_usage()
+ def f():
+ exec("testing print_exception() -- italics?")
+ def g(f=f):
+ f()
+ print("
")
+ print()
+ print(sys.argv)
+ print()
+
+def print_environ_usage():
+ """Dump a list of environment variables used by CGI as HTML."""
+ print("""
+
These environment variables could have been set:
+
+
AUTH_TYPE
+
CONTENT_LENGTH
+
CONTENT_TYPE
+
DATE_GMT
+
DATE_LOCAL
+
DOCUMENT_NAME
+
DOCUMENT_ROOT
+
DOCUMENT_URI
+
GATEWAY_INTERFACE
+
LAST_MODIFIED
+
PATH
+
PATH_INFO
+
PATH_TRANSLATED
+
QUERY_STRING
+
REMOTE_ADDR
+
REMOTE_HOST
+
REMOTE_IDENT
+
REMOTE_USER
+
REQUEST_METHOD
+
SCRIPT_NAME
+
SERVER_NAME
+
SERVER_PORT
+
SERVER_PROTOCOL
+
SERVER_ROOT
+
SERVER_SOFTWARE
+
+In addition, HTTP headers sent by the server may be passed in the
+environment as well. Here are some common variable names:
+
+
HTTP_ACCEPT
+
HTTP_CONNECTION
+
HTTP_HOST
+
HTTP_PRAGMA
+
HTTP_REFERER
+
HTTP_USER_AGENT
+
+""")
+
+
+# Utilities
+# =========
+
+def valid_boundary(s):
+ import re
+ if isinstance(s, bytes):
+ _vb_pattern = b"^[ -~]{0,200}[!-~]$"
+ else:
+ _vb_pattern = "^[ -~]{0,200}[!-~]$"
+ return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+ test()
diff --git a/evalkit_cambrian/lib/python3.10/cgitb.py b/evalkit_cambrian/lib/python3.10/cgitb.py
new file mode 100644
index 0000000000000000000000000000000000000000..17ddda376884dfb6ba2a1e7a315e333463644c25
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/cgitb.py
@@ -0,0 +1,321 @@
+"""More comprehensive traceback formatting for Python scripts.
+
+To enable this module, do:
+
+ import cgitb; cgitb.enable()
+
+at the top of your script. The optional arguments to enable() are:
+
+ display - if true, tracebacks are displayed in the web browser
+ logdir - if set, tracebacks are written to files in this directory
+ context - number of lines of source code to show for each stack frame
+ format - 'text' or 'html' controls the output format
+
+By default, tracebacks are displayed but not saved, the context is 5 lines
+and the output format is 'html' (for backwards compatibility with the
+original use of this module)
+
+Alternatively, if you have caught an exception and want cgitb to display it
+for you, call cgitb.handler(). The optional argument to handler() is a
+3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
+The default handler displays output as HTML.
+
+"""
+import inspect
+import keyword
+import linecache
+import os
+import pydoc
+import sys
+import tempfile
+import time
+import tokenize
+import traceback
+
+def reset():
+ """Return a string that resets the CGI and browser to a known state."""
+ return '''
+ --> -->
+
+ '''
+
+__UNDEF__ = [] # a special sentinel object
+def small(text):
+ if text:
+ return '' + text + ''
+ else:
+ return ''
+
+def strong(text):
+ if text:
+ return '' + text + ''
+ else:
+ return ''
+
+def grey(text):
+ if text:
+ return '' + text + ''
+ else:
+ return ''
+
+def lookup(name, frame, locals):
+ """Find the value for a given name in the given environment."""
+ if name in locals:
+ return 'local', locals[name]
+ if name in frame.f_globals:
+ return 'global', frame.f_globals[name]
+ if '__builtins__' in frame.f_globals:
+ builtins = frame.f_globals['__builtins__']
+ if type(builtins) is type({}):
+ if name in builtins:
+ return 'builtin', builtins[name]
+ else:
+ if hasattr(builtins, name):
+ return 'builtin', getattr(builtins, name)
+ return None, __UNDEF__
+
+def scanvars(reader, frame, locals):
+ """Scan one logical line of Python and look up values of variables used."""
+ vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
+ for ttype, token, start, end, line in tokenize.generate_tokens(reader):
+ if ttype == tokenize.NEWLINE: break
+ if ttype == tokenize.NAME and token not in keyword.kwlist:
+ if lasttoken == '.':
+ if parent is not __UNDEF__:
+ value = getattr(parent, token, __UNDEF__)
+ vars.append((prefix + token, prefix, value))
+ else:
+ where, value = lookup(token, frame, locals)
+ vars.append((token, where, value))
+ elif token == '.':
+ prefix += lasttoken + '.'
+ parent = value
+ else:
+ parent, prefix = None, ''
+ lasttoken = token
+ return vars
+
+def html(einfo, context=5):
+ """Return a nice HTML document describing a given traceback."""
+ etype, evalue, etb = einfo
+ if isinstance(etype, type):
+ etype = etype.__name__
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+ head = '' + pydoc.html.heading(
+ '%s' %
+ strong(pydoc.html.escape(str(etype))),
+ '#ffffff', '#6622aa', pyver + ' ' + date) + '''
+
A problem occurred in a Python script. Here is the sequence of
+function calls leading up to the error, in the order they occurred.
' %
+ (' ', link, call)]
+ if index is not None:
+ i = lnum - index
+ for line in lines:
+ num = small(' ' * (5-len(str(i))) + str(i)) + ' '
+ if i in highlight:
+ line = '=>%s%s' % (num, pydoc.html.preformat(line))
+ rows.append('
' % grey(line))
+ i += 1
+
+ done, dump = {}, []
+ for name, where, value in vars:
+ if name in done: continue
+ done[name] = 1
+ if value is not __UNDEF__:
+ if where in ('global', 'builtin'):
+ name = ('%s ' % where) + strong(name)
+ elif where == 'local':
+ name = strong(name)
+ else:
+ name = where + strong(name.split('.')[-1])
+ dump.append('%s = %s' % (name, pydoc.html.repr(value)))
+ else:
+ dump.append(name + ' undefined')
+
+ rows.append('
A problem occurred in a Python script.\n')
+
+ if self.logdir is not None:
+ suffix = ['.txt', '.html'][self.format=="html"]
+ (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
+
+ try:
+ with os.fdopen(fd, 'w') as file:
+ file.write(doc)
+ msg = '%s contains the description of this error.' % path
+ except:
+ msg = 'Tried to save traceback to %s, but failed.' % path
+
+ if self.format == 'html':
+ self.file.write('
' % result
+
+ def grey(self, text): return '%s' % text
+
+ def namelink(self, name, *dicts):
+ """Make a link for an identifier, given name-to-URL mappings."""
+ for dict in dicts:
+ if name in dict:
+ return '%s' % (dict[name], name)
+ return name
+
+ def classlink(self, object, modname):
+ """Make a link for a class."""
+ name, module = object.__name__, sys.modules.get(object.__module__)
+ if hasattr(module, name) and getattr(module, name) is object:
+ return '%s' % (
+ module.__name__, name, classname(object, modname))
+ return classname(object, modname)
+
+ def modulelink(self, object):
+ """Make a link for a module."""
+ return '%s' % (object.__name__, object.__name__)
+
+ def modpkglink(self, modpkginfo):
+ """Make a link for a module or package to display in an index."""
+ name, path, ispackage, shadowed = modpkginfo
+ if shadowed:
+ return self.grey(name)
+ if path:
+ url = '%s.%s.html' % (path, name)
+ else:
+ url = '%s.html' % name
+ if ispackage:
+ text = '%s (package)' % name
+ else:
+ text = name
+ return '%s' % (url, text)
+
+ def filelink(self, url, path):
+ """Make a link to source file."""
+ return '%s' % (url, path)
+
+ def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
+ """Mark up some plain text, given a context of symbols to look for.
+ Each context dictionary maps object names to anchor names."""
+ escape = escape or self.escape
+ results = []
+ here = 0
+ pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|'
+ r'RFC[- ]?(\d+)|'
+ r'PEP[- ]?(\d+)|'
+ r'(self\.)?(\w+))')
+ while True:
+ match = pattern.search(text, here)
+ if not match: break
+ start, end = match.span()
+ results.append(escape(text[here:start]))
+
+ all, scheme, rfc, pep, selfdot, name = match.groups()
+ if scheme:
+ url = escape(all).replace('"', '"')
+ results.append('%s' % (url, url))
+ elif rfc:
+ url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
+ results.append('%s' % (url, escape(all)))
+ elif pep:
+ url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
+ results.append('%s' % (url, escape(all)))
+ elif selfdot:
+ # Create a link for methods like 'self.method(...)'
+ # and use for attributes like 'self.attr'
+ if text[end:end+1] == '(':
+ results.append('self.' + self.namelink(name, methods))
+ else:
+ results.append('self.%s' % name)
+ elif text[end:end+1] == '(':
+ results.append(self.namelink(name, methods, funcs, classes))
+ else:
+ results.append(self.namelink(name, classes))
+ here = end
+ results.append(escape(text[here:]))
+ return ''.join(results)
+
+ # ---------------------------------------------- type-specific routines
+
+ def formattree(self, tree, modname, parent=None):
+ """Produce HTML for a class tree as given by inspect.getclasstree()."""
+ result = ''
+ for entry in tree:
+ if type(entry) is type(()):
+ c, bases = entry
+ result = result + '
'
+ result = result + self.classlink(c, modname)
+ if bases and bases != (parent,):
+ parents = []
+ for base in bases:
+ parents.append(self.classlink(base, modname))
+ result = result + '(' + ', '.join(parents) + ')'
+ result = result + '\n
'
+ elif type(entry) is type([]):
+ result = result + '
\n%s
\n' % self.formattree(
+ entry, modname, c)
+ return '
\n%s
\n' % result
+
+ def docmodule(self, object, name=None, mod=None, *ignored):
+ """Produce HTML documentation for a module object."""
+ name = object.__name__ # ignore the passed-in name
+ try:
+ all = object.__all__
+ except AttributeError:
+ all = None
+ parts = name.split('.')
+ links = []
+ for i in range(len(parts)-1):
+ links.append(
+ '%s' %
+ ('.'.join(parts[:i+1]), parts[i]))
+ linkedname = '.'.join(links + parts[-1:])
+ head = '%s' % linkedname
+ try:
+ path = inspect.getabsfile(object)
+ url = urllib.parse.quote(path)
+ filelink = self.filelink(url, path)
+ except TypeError:
+ filelink = '(built-in)'
+ info = []
+ if hasattr(object, '__version__'):
+ version = str(object.__version__)
+ if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
+ version = version[11:-1].strip()
+ info.append('version %s' % self.escape(version))
+ if hasattr(object, '__date__'):
+ info.append(self.escape(str(object.__date__)))
+ if info:
+ head = head + ' (%s)' % ', '.join(info)
+ docloc = self.getdocloc(object)
+ if docloc is not None:
+ docloc = ' Module Reference' % locals()
+ else:
+ docloc = ''
+ result = self.heading(
+ head, '#ffffff', '#7799ee',
+ 'index ' + filelink + docloc)
+
+ modules = inspect.getmembers(object, inspect.ismodule)
+
+ classes, cdict = [], {}
+ for key, value in inspect.getmembers(object, _isclass):
+ # if __all__ exists, believe it. Otherwise use old heuristic.
+ if (all is not None or
+ (inspect.getmodule(value) or object) is object):
+ if visiblename(key, all, object):
+ classes.append((key, value))
+ cdict[key] = cdict[value] = '#' + key
+ for key, value in classes:
+ for base in value.__bases__:
+ key, modname = base.__name__, base.__module__
+ module = sys.modules.get(modname)
+ if modname != name and module and hasattr(module, key):
+ if getattr(module, key) is base:
+ if not key in cdict:
+ cdict[key] = cdict[base] = modname + '.html#' + key
+ funcs, fdict = [], {}
+ for key, value in inspect.getmembers(object, inspect.isroutine):
+ # if __all__ exists, believe it. Otherwise use old heuristic.
+ if (all is not None or
+ inspect.isbuiltin(value) or inspect.getmodule(value) is object):
+ if visiblename(key, all, object):
+ funcs.append((key, value))
+ fdict[key] = '#-' + key
+ if inspect.isfunction(value): fdict[value] = fdict[key]
+ data = []
+ for key, value in inspect.getmembers(object, isdata):
+ if visiblename(key, all, object):
+ data.append((key, value))
+
+ doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
+ doc = doc and '%s' % doc
+ result = result + '
%s
\n' % doc
+
+ if hasattr(object, '__path__'):
+ modpkgs = []
+ for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
+ modpkgs.append((modname, name, ispkg, 0))
+ modpkgs.sort()
+ contents = self.multicolumn(modpkgs, self.modpkglink)
+ result = result + self.bigsection(
+ 'Package Contents', '#ffffff', '#aa55cc', contents)
+ elif modules:
+ contents = self.multicolumn(
+ modules, lambda t: self.modulelink(t[1]))
+ result = result + self.bigsection(
+ 'Modules', '#ffffff', '#aa55cc', contents)
+
+ if classes:
+ classlist = [value for (key, value) in classes]
+ contents = [
+ self.formattree(inspect.getclasstree(classlist, 1), name)]
+ for key, value in classes:
+ contents.append(self.document(value, key, name, fdict, cdict))
+ result = result + self.bigsection(
+ 'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
+ if funcs:
+ contents = []
+ for key, value in funcs:
+ contents.append(self.document(value, key, name, fdict, cdict))
+ result = result + self.bigsection(
+ 'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
+ if data:
+ contents = []
+ for key, value in data:
+ contents.append(self.document(value, key))
+ result = result + self.bigsection(
+ 'Data', '#ffffff', '#55aa55', ' \n'.join(contents))
+ if hasattr(object, '__author__'):
+ contents = self.markup(str(object.__author__), self.preformat)
+ result = result + self.bigsection(
+ 'Author', '#ffffff', '#7799ee', contents)
+ if hasattr(object, '__credits__'):
+ contents = self.markup(str(object.__credits__), self.preformat)
+ result = result + self.bigsection(
+ 'Credits', '#ffffff', '#7799ee', contents)
+
+ return result
+
+ def docclass(self, object, name=None, mod=None, funcs={}, classes={},
+ *ignored):
+ """Produce HTML documentation for a class object."""
+ realname = object.__name__
+ name = name or realname
+ bases = object.__bases__
+
+ contents = []
+ push = contents.append
+
+ # Cute little class to pump out a horizontal rule between sections.
+ class HorizontalRule:
+ def __init__(self):
+ self.needone = 0
+ def maybe(self):
+ if self.needone:
+ push('\n')
+ self.needone = 1
+ hr = HorizontalRule()
+
+ # List the mro, if non-trivial.
+ mro = deque(inspect.getmro(object))
+ if len(mro) > 2:
+ hr.maybe()
+ push('
' % html.markup(contents)
+ contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
+ if xrefs:
+ xrefs = sorted(xrefs.split())
+
+ def bltinlink(name):
+ return '%s' % (name, name)
+
+ xrefs = html.multicolumn(xrefs, bltinlink)
+ xrefs = html.section('Related help topics: ',
+ '#ffffff', '#ee77aa', xrefs)
+ return ('%s %s' % (title, topic),
+ ''.join((heading, contents, xrefs)))
+
+ def html_getobj(url):
+ obj = locate(url, forceload=1)
+ if obj is None and url != 'None':
+ raise ValueError('could not find object')
+ title = describe(obj)
+ content = html.document(obj, url)
+ return title, content
+
+ def html_error(url, exc):
+ heading = html.heading(
+ 'Error',
+ '#ffffff', '#7799ee')
+ contents = ' '.join(html.escape(line) for line in
+ format_exception_only(type(exc), exc))
+ contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
+ contents)
+ return "Error - %s" % url, contents
+
+ def get_html_page(url):
+ """Generate an HTML page for url."""
+ complete_url = url
+ if url.endswith('.html'):
+ url = url[:-5]
+ try:
+ if url in ("", "index"):
+ title, content = html_index()
+ elif url == "topics":
+ title, content = html_topics()
+ elif url == "keywords":
+ title, content = html_keywords()
+ elif '=' in url:
+ op, _, url = url.partition('=')
+ if op == "search?key":
+ title, content = html_search(url)
+ elif op == "topic?key":
+ # try topics first, then objects.
+ try:
+ title, content = html_topicpage(url)
+ except ValueError:
+ title, content = html_getobj(url)
+ elif op == "get?key":
+ # try objects first, then topics.
+ if url in ("", "index"):
+ title, content = html_index()
+ else:
+ try:
+ title, content = html_getobj(url)
+ except ValueError:
+ title, content = html_topicpage(url)
+ else:
+ raise ValueError('bad pydoc url')
+ else:
+ title, content = html_getobj(url)
+ except Exception as exc:
+ # Catch any errors and display them in an error page.
+ title, content = html_error(complete_url, exc)
+ return html.page(title, content)
+
+ if url.startswith('/'):
+ url = url[1:]
+ if content_type == 'text/css':
+ path_here = os.path.dirname(os.path.realpath(__file__))
+ css_path = os.path.join(path_here, url)
+ with open(css_path) as fp:
+ return ''.join(fp.readlines())
+ elif content_type == 'text/html':
+ return get_html_page(url)
+ # Errors outside the url handler are caught by the server.
+ raise TypeError('unknown content type %r for url %s' % (content_type, url))
+
+
+def browse(port=0, *, open_browser=True, hostname='localhost'):
+ """Start the enhanced pydoc web server and open a web browser.
+
+ Use port '0' to start the server on an arbitrary port.
+ Set open_browser to False to suppress opening a browser.
+ """
+ import webbrowser
+ serverthread = _start_server(_url_handler, hostname, port)
+ if serverthread.error:
+ print(serverthread.error)
+ return
+ if serverthread.serving:
+ server_help_msg = 'Server commands: [b]rowser, [q]uit'
+ if open_browser:
+ webbrowser.open(serverthread.url)
+ try:
+ print('Server ready at', serverthread.url)
+ print(server_help_msg)
+ while serverthread.serving:
+ cmd = input('server> ')
+ cmd = cmd.lower()
+ if cmd == 'q':
+ break
+ elif cmd == 'b':
+ webbrowser.open(serverthread.url)
+ else:
+ print(server_help_msg)
+ except (KeyboardInterrupt, EOFError):
+ print()
+ finally:
+ if serverthread.serving:
+ serverthread.stop()
+ print('Server stopped')
+
+
+# -------------------------------------------------- command-line interface
+
+def ispath(x):
+ return isinstance(x, str) and x.find(os.sep) >= 0
+
+def _get_revised_path(given_path, argv0):
+ """Ensures current directory is on returned path, and argv0 directory is not
+
+ Exception: argv0 dir is left alone if it's also pydoc's directory.
+
+ Returns a new path entry list, or None if no adjustment is needed.
+ """
+ # Scripts may get the current directory in their path by default if they're
+ # run with the -m switch, or directly from the current directory.
+ # The interactive prompt also allows imports from the current directory.
+
+ # Accordingly, if the current directory is already present, don't make
+ # any changes to the given_path
+ if '' in given_path or os.curdir in given_path or os.getcwd() in given_path:
+ return None
+
+ # Otherwise, add the current directory to the given path, and remove the
+ # script directory (as long as the latter isn't also pydoc's directory.
+ stdlib_dir = os.path.dirname(__file__)
+ script_dir = os.path.dirname(argv0)
+ revised_path = given_path.copy()
+ if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir):
+ revised_path.remove(script_dir)
+ revised_path.insert(0, os.getcwd())
+ return revised_path
+
+
+# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself
+def _adjust_cli_sys_path():
+ """Ensures current directory is on sys.path, and __main__ directory is not.
+
+ Exception: __main__ dir is left alone if it's also pydoc's directory.
+ """
+ revised_path = _get_revised_path(sys.path, sys.argv[0])
+ if revised_path is not None:
+ sys.path[:] = revised_path
+
+
+def cli():
+ """Command-line interface (looks at sys.argv to decide what to do)."""
+ import getopt
+ class BadUsage(Exception): pass
+
+ _adjust_cli_sys_path()
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w')
+ writing = False
+ start_server = False
+ open_browser = False
+ port = 0
+ hostname = 'localhost'
+ for opt, val in opts:
+ if opt == '-b':
+ start_server = True
+ open_browser = True
+ if opt == '-k':
+ apropos(val)
+ return
+ if opt == '-p':
+ start_server = True
+ port = val
+ if opt == '-w':
+ writing = True
+ if opt == '-n':
+ start_server = True
+ hostname = val
+
+ if start_server:
+ browse(port, hostname=hostname, open_browser=open_browser)
+ return
+
+ if not args: raise BadUsage
+ for arg in args:
+ if ispath(arg) and not os.path.exists(arg):
+ print('file %r does not exist' % arg)
+ break
+ try:
+ if ispath(arg) and os.path.isfile(arg):
+ arg = importfile(arg)
+ if writing:
+ if ispath(arg) and os.path.isdir(arg):
+ writedocs(arg)
+ else:
+ writedoc(arg)
+ else:
+ help.help(arg)
+ except ErrorDuringImport as value:
+ print(value)
+
+ except (getopt.error, BadUsage):
+ cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+ print("""pydoc - the Python documentation tool
+
+{cmd} ...
+ Show text documentation on something. may be the name of a
+ Python keyword, topic, function, module, or package, or a dotted
+ reference to a class or function within a module or module in a
+ package. If contains a '{sep}', it is used as the path to a
+ Python source file to document. If name is 'keywords', 'topics',
+ or 'modules', a listing of these things is displayed.
+
+{cmd} -k
+ Search for a keyword in the synopsis lines of all available modules.
+
+{cmd} -n
+ Start an HTTP server with the given hostname (default: localhost).
+
+{cmd} -p
+ Start an HTTP server on the given port on the local machine. Port
+ number 0 can be used to get an arbitrary unused port.
+
+{cmd} -b
+ Start an HTTP server on an arbitrary unused port and open a web browser
+ to interactively browse documentation. This option can be used in
+ combination with -n and/or -p.
+
+{cmd} -w ...
+ Write out the HTML documentation for a module to a file in the current
+ directory. If contains a '{sep}', it is treated as a filename; if
+ it names a directory, documentation is written for all the contents.
+""".format(cmd=cmd, sep=os.sep))
+
+if __name__ == '__main__':
+ cli()
diff --git a/evalkit_cambrian/lib/python3.10/random.py b/evalkit_cambrian/lib/python3.10/random.py
new file mode 100644
index 0000000000000000000000000000000000000000..1310a2d9d0e07104ee8b67a0efc2e004bfe62277
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/random.py
@@ -0,0 +1,930 @@
+"""Random variable generators.
+
+ bytes
+ -----
+ uniform bytes (values between 0 and 255)
+
+ integers
+ --------
+ uniform within range
+
+ sequences
+ ---------
+ pick random element
+ pick random sample
+ pick weighted random sample
+ generate random permutation
+
+ distributions on the real line:
+ ------------------------------
+ uniform
+ triangular
+ normal (Gaussian)
+ lognormal
+ negative exponential
+ gamma
+ beta
+ pareto
+ Weibull
+
+ distributions on the circle (angles 0 to 2pi)
+ ---------------------------------------------
+ circular uniform
+ von Mises
+
+General notes on the underlying Mersenne Twister core generator:
+
+* The period is 2**19937-1.
+* It is one of the most extensively tested generators in existence.
+* The random() method is implemented in C, executes in a single Python step,
+ and is, therefore, threadsafe.
+
+"""
+
+# Translated by Guido van Rossum from C source provided by
+# Adrian Baddeley. Adapted by Raymond Hettinger for use with
+# the Mersenne Twister and os.urandom() core generators.
+
+from warnings import warn as _warn
+from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
+from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
+from math import tau as TWOPI, floor as _floor, isfinite as _isfinite
+from os import urandom as _urandom
+from _collections_abc import Set as _Set, Sequence as _Sequence
+from operator import index as _index
+from itertools import accumulate as _accumulate, repeat as _repeat
+from bisect import bisect as _bisect
+import os as _os
+import _random
+
+try:
+ # hashlib is pretty heavy to load, try lean internal module first
+ from _sha512 import sha512 as _sha512
+except ImportError:
+ # fallback to official implementation
+ from hashlib import sha512 as _sha512
+
+__all__ = [
+ "Random",
+ "SystemRandom",
+ "betavariate",
+ "choice",
+ "choices",
+ "expovariate",
+ "gammavariate",
+ "gauss",
+ "getrandbits",
+ "getstate",
+ "lognormvariate",
+ "normalvariate",
+ "paretovariate",
+ "randbytes",
+ "randint",
+ "random",
+ "randrange",
+ "sample",
+ "seed",
+ "setstate",
+ "shuffle",
+ "triangular",
+ "uniform",
+ "vonmisesvariate",
+ "weibullvariate",
+]
+
+NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0)
+LOG4 = _log(4.0)
+SG_MAGICCONST = 1.0 + _log(4.5)
+BPF = 53 # Number of bits in a float
+RECIP_BPF = 2 ** -BPF
+_ONE = 1
+
+
+class Random(_random.Random):
+ """Random number generator base class used by bound module functions.
+
+ Used to instantiate instances of Random to get generators that don't
+ share state.
+
+ Class Random can also be subclassed if you want to use a different basic
+ generator of your own devising: in that case, override the following
+ methods: random(), seed(), getstate(), and setstate().
+ Optionally, implement a getrandbits() method so that randrange()
+ can cover arbitrarily large ranges.
+
+ """
+
+ VERSION = 3 # used by getstate/setstate
+
+ def __init__(self, x=None):
+ """Initialize an instance.
+
+ Optional argument x controls seeding, as for Random.seed().
+ """
+
+ self.seed(x)
+ self.gauss_next = None
+
+ def seed(self, a=None, version=2):
+ """Initialize internal state from a seed.
+
+ The only supported seed types are None, int, float,
+ str, bytes, and bytearray.
+
+ None or no argument seeds from current time or from an operating
+ system specific randomness source if available.
+
+ If *a* is an int, all bits are used.
+
+ For version 2 (the default), all of the bits are used if *a* is a str,
+ bytes, or bytearray. For version 1 (provided for reproducing random
+ sequences from older versions of Python), the algorithm for str and
+ bytes generates a narrower range of seeds.
+
+ """
+
+ if version == 1 and isinstance(a, (str, bytes)):
+ a = a.decode('latin-1') if isinstance(a, bytes) else a
+ x = ord(a[0]) << 7 if a else 0
+ for c in map(ord, a):
+ x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF
+ x ^= len(a)
+ a = -2 if x == -1 else x
+
+ elif version == 2 and isinstance(a, (str, bytes, bytearray)):
+ if isinstance(a, str):
+ a = a.encode()
+ a = int.from_bytes(a + _sha512(a).digest(), 'big')
+
+ elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)):
+ _warn('Seeding based on hashing is deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent '
+ 'version. The only \n'
+ 'supported seed types are: None, '
+ 'int, float, str, bytes, and bytearray.',
+ DeprecationWarning, 2)
+
+ super().seed(a)
+ self.gauss_next = None
+
+ def getstate(self):
+ """Return internal state; can be passed to setstate() later."""
+ return self.VERSION, super().getstate(), self.gauss_next
+
+ def setstate(self, state):
+ """Restore internal state from object returned by getstate()."""
+ version = state[0]
+ if version == 3:
+ version, internalstate, self.gauss_next = state
+ super().setstate(internalstate)
+ elif version == 2:
+ version, internalstate, self.gauss_next = state
+ # In version 2, the state was saved as signed ints, which causes
+ # inconsistencies between 32/64-bit systems. The state is
+ # really unsigned 32-bit ints, so we convert negative ints from
+ # version 2 to positive longs for version 3.
+ try:
+ internalstate = tuple(x % (2 ** 32) for x in internalstate)
+ except ValueError as e:
+ raise TypeError from e
+ super().setstate(internalstate)
+ else:
+ raise ValueError("state with version %s passed to "
+ "Random.setstate() of version %s" %
+ (version, self.VERSION))
+
+
+ ## -------------------------------------------------------
+ ## ---- Methods below this point do not need to be overridden or extended
+ ## ---- when subclassing for the purpose of using a different core generator.
+
+
+ ## -------------------- pickle support -------------------
+
+ # Issue 17489: Since __reduce__ was defined to fix #759889 this is no
+ # longer called; we leave it here because it has been here since random was
+ # rewritten back in 2001 and why risk breaking something.
+ def __getstate__(self): # for pickle
+ return self.getstate()
+
+ def __setstate__(self, state): # for pickle
+ self.setstate(state)
+
+ def __reduce__(self):
+ return self.__class__, (), self.getstate()
+
+
+ ## ---- internal support method for evenly distributed integers ----
+
+ def __init_subclass__(cls, /, **kwargs):
+ """Control how subclasses generate random integers.
+
+ The algorithm a subclass can use depends on the random() and/or
+ getrandbits() implementation available to it and determines
+ whether it can generate random integers from arbitrarily large
+ ranges.
+ """
+
+ for c in cls.__mro__:
+ if '_randbelow' in c.__dict__:
+ # just inherit it
+ break
+ if 'getrandbits' in c.__dict__:
+ cls._randbelow = cls._randbelow_with_getrandbits
+ break
+ if 'random' in c.__dict__:
+ cls._randbelow = cls._randbelow_without_getrandbits
+ break
+
+ def _randbelow_with_getrandbits(self, n):
+ "Return a random int in the range [0,n). Returns 0 if n==0."
+
+ if not n:
+ return 0
+ getrandbits = self.getrandbits
+ k = n.bit_length() # don't use (n-1) here because n can be 1
+ r = getrandbits(k) # 0 <= r < 2**k
+ while r >= n:
+ r = getrandbits(k)
+ return r
+
+ def _randbelow_without_getrandbits(self, n, maxsize=1<= maxsize:
+ _warn("Underlying random() generator does not supply \n"
+ "enough bits to choose from a population range this large.\n"
+ "To remove the range limitation, add a getrandbits() method.")
+ return _floor(random() * n)
+ if n == 0:
+ return 0
+ rem = maxsize % n
+ limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
+ r = random()
+ while r >= limit:
+ r = random()
+ return _floor(r * maxsize) % n
+
+ _randbelow = _randbelow_with_getrandbits
+
+
+ ## --------------------------------------------------------
+ ## ---- Methods below this point generate custom distributions
+ ## ---- based on the methods defined above. They do not
+ ## ---- directly touch the underlying generator and only
+ ## ---- access randomness through the methods: random(),
+ ## ---- getrandbits(), or _randbelow().
+
+
+ ## -------------------- bytes methods ---------------------
+
+ def randbytes(self, n):
+ """Generate n random bytes."""
+ return self.getrandbits(n * 8).to_bytes(n, 'little')
+
+
+ ## -------------------- integer methods -------------------
+
+ def randrange(self, start, stop=None, step=_ONE):
+ """Choose a random item from range(start, stop[, step]).
+
+ This fixes the problem with randint() which includes the
+ endpoint; in Python this is usually not what you want.
+
+ """
+
+ # This code is a bit messy to make it fast for the
+ # common case while still doing adequate error checking.
+ try:
+ istart = _index(start)
+ except TypeError:
+ istart = int(start)
+ if istart != start:
+ _warn('randrange() will raise TypeError in the future',
+ DeprecationWarning, 2)
+ raise ValueError("non-integer arg 1 for randrange()")
+ _warn('non-integer arguments to randrange() have been deprecated '
+ 'since Python 3.10 and will be removed in a subsequent '
+ 'version',
+ DeprecationWarning, 2)
+ if stop is None:
+ # We don't check for "step != 1" because it hasn't been
+ # type checked and converted to an integer yet.
+ if step is not _ONE:
+ raise TypeError('Missing a non-None stop argument')
+ if istart > 0:
+ return self._randbelow(istart)
+ raise ValueError("empty range for randrange()")
+
+ # stop argument supplied.
+ try:
+ istop = _index(stop)
+ except TypeError:
+ istop = int(stop)
+ if istop != stop:
+ _warn('randrange() will raise TypeError in the future',
+ DeprecationWarning, 2)
+ raise ValueError("non-integer stop for randrange()")
+ _warn('non-integer arguments to randrange() have been deprecated '
+ 'since Python 3.10 and will be removed in a subsequent '
+ 'version',
+ DeprecationWarning, 2)
+ width = istop - istart
+ try:
+ istep = _index(step)
+ except TypeError:
+ istep = int(step)
+ if istep != step:
+ _warn('randrange() will raise TypeError in the future',
+ DeprecationWarning, 2)
+ raise ValueError("non-integer step for randrange()")
+ _warn('non-integer arguments to randrange() have been deprecated '
+ 'since Python 3.10 and will be removed in a subsequent '
+ 'version',
+ DeprecationWarning, 2)
+ # Fast path.
+ if istep == 1:
+ if width > 0:
+ return istart + self._randbelow(width)
+ raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width))
+
+ # Non-unit step argument supplied.
+ if istep > 0:
+ n = (width + istep - 1) // istep
+ elif istep < 0:
+ n = (width + istep + 1) // istep
+ else:
+ raise ValueError("zero step for randrange()")
+ if n <= 0:
+ raise ValueError("empty range for randrange()")
+ return istart + istep * self._randbelow(n)
+
+ def randint(self, a, b):
+ """Return random integer in range [a, b], including both end points.
+ """
+
+ return self.randrange(a, b+1)
+
+
+ ## -------------------- sequence methods -------------------
+
+ def choice(self, seq):
+ """Choose a random element from a non-empty sequence."""
+ # raises IndexError if seq is empty
+ return seq[self._randbelow(len(seq))]
+
+ def shuffle(self, x, random=None):
+ """Shuffle list x in place, and return None.
+
+ Optional argument random is a 0-argument function returning a
+ random float in [0.0, 1.0); if it is the default None, the
+ standard random.random will be used.
+
+ """
+
+ if random is None:
+ randbelow = self._randbelow
+ for i in reversed(range(1, len(x))):
+ # pick an element in x[:i+1] with which to exchange x[i]
+ j = randbelow(i + 1)
+ x[i], x[j] = x[j], x[i]
+ else:
+ _warn('The *random* parameter to shuffle() has been deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent '
+ 'version.',
+ DeprecationWarning, 2)
+ floor = _floor
+ for i in reversed(range(1, len(x))):
+ # pick an element in x[:i+1] with which to exchange x[i]
+ j = floor(random() * (i + 1))
+ x[i], x[j] = x[j], x[i]
+
+ def sample(self, population, k, *, counts=None):
+ """Chooses k unique random elements from a population sequence or set.
+
+ Returns a new list containing elements from the population while
+ leaving the original population unchanged. The resulting list is
+ in selection order so that all sub-slices will also be valid random
+ samples. This allows raffle winners (the sample) to be partitioned
+ into grand prize and second place winners (the subslices).
+
+ Members of the population need not be hashable or unique. If the
+ population contains repeats, then each occurrence is a possible
+ selection in the sample.
+
+ Repeated elements can be specified one at a time or with the optional
+ counts parameter. For example:
+
+ sample(['red', 'blue'], counts=[4, 2], k=5)
+
+ is equivalent to:
+
+ sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5)
+
+ To choose a sample from a range of integers, use range() for the
+ population argument. This is especially fast and space efficient
+ for sampling from a large population:
+
+ sample(range(10000000), 60)
+
+ """
+
+ # Sampling without replacement entails tracking either potential
+ # selections (the pool) in a list or previous selections in a set.
+
+ # When the number of selections is small compared to the
+ # population, then tracking selections is efficient, requiring
+ # only a small set and an occasional reselection. For
+ # a larger number of selections, the pool tracking method is
+ # preferred since the list takes less space than the
+ # set and it doesn't suffer from frequent reselections.
+
+ # The number of calls to _randbelow() is kept at or near k, the
+ # theoretical minimum. This is important because running time
+ # is dominated by _randbelow() and because it extracts the
+ # least entropy from the underlying random number generators.
+
+ # Memory requirements are kept to the smaller of a k-length
+ # set or an n-length list.
+
+ # There are other sampling algorithms that do not require
+ # auxiliary memory, but they were rejected because they made
+ # too many calls to _randbelow(), making them slower and
+ # causing them to eat more entropy than necessary.
+
+ if not isinstance(population, _Sequence):
+ if isinstance(population, _Set):
+ _warn('Sampling from a set deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent version.',
+ DeprecationWarning, 2)
+ population = tuple(population)
+ else:
+ raise TypeError("Population must be a sequence. For dicts or sets, use sorted(d).")
+ n = len(population)
+ if counts is not None:
+ cum_counts = list(_accumulate(counts))
+ if len(cum_counts) != n:
+ raise ValueError('The number of counts does not match the population')
+ total = cum_counts.pop()
+ if not isinstance(total, int):
+ raise TypeError('Counts must be integers')
+ if total <= 0:
+ raise ValueError('Total of counts must be greater than zero')
+ selections = self.sample(range(total), k=k)
+ bisect = _bisect
+ return [population[bisect(cum_counts, s)] for s in selections]
+ randbelow = self._randbelow
+ if not 0 <= k <= n:
+ raise ValueError("Sample larger than population or is negative")
+ result = [None] * k
+ setsize = 21 # size of a small set minus size of an empty list
+ if k > 5:
+ setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
+ if n <= setsize:
+ # An n-length list is smaller than a k-length set.
+ # Invariant: non-selected at pool[0 : n-i]
+ pool = list(population)
+ for i in range(k):
+ j = randbelow(n - i)
+ result[i] = pool[j]
+ pool[j] = pool[n - i - 1] # move non-selected item into vacancy
+ else:
+ selected = set()
+ selected_add = selected.add
+ for i in range(k):
+ j = randbelow(n)
+ while j in selected:
+ j = randbelow(n)
+ selected_add(j)
+ result[i] = population[j]
+ return result
+
+ def choices(self, population, weights=None, *, cum_weights=None, k=1):
+ """Return a k sized list of population elements chosen with replacement.
+
+ If the relative weights or cumulative weights are not specified,
+ the selections are made with equal probability.
+
+ """
+ random = self.random
+ n = len(population)
+ if cum_weights is None:
+ if weights is None:
+ floor = _floor
+ n += 0.0 # convert to float for a small speed improvement
+ return [population[floor(random() * n)] for i in _repeat(None, k)]
+ try:
+ cum_weights = list(_accumulate(weights))
+ except TypeError:
+ if not isinstance(weights, int):
+ raise
+ k = weights
+ raise TypeError(
+ f'The number of choices must be a keyword argument: {k=}'
+ ) from None
+ elif weights is not None:
+ raise TypeError('Cannot specify both weights and cumulative weights')
+ if len(cum_weights) != n:
+ raise ValueError('The number of weights does not match the population')
+ total = cum_weights[-1] + 0.0 # convert to float
+ if total <= 0.0:
+ raise ValueError('Total of weights must be greater than zero')
+ if not _isfinite(total):
+ raise ValueError('Total of weights must be finite')
+ bisect = _bisect
+ hi = n - 1
+ return [population[bisect(cum_weights, random() * total, 0, hi)]
+ for i in _repeat(None, k)]
+
+
+ ## -------------------- real-valued distributions -------------------
+
+ def uniform(self, a, b):
+ "Get a random number in the range [a, b) or [a, b] depending on rounding."
+ return a + (b - a) * self.random()
+
+ def triangular(self, low=0.0, high=1.0, mode=None):
+ """Triangular distribution.
+
+ Continuous distribution bounded by given lower and upper limits,
+ and having a given mode value in-between.
+
+ http://en.wikipedia.org/wiki/Triangular_distribution
+
+ """
+ u = self.random()
+ try:
+ c = 0.5 if mode is None else (mode - low) / (high - low)
+ except ZeroDivisionError:
+ return low
+ if u > c:
+ u = 1.0 - u
+ c = 1.0 - c
+ low, high = high, low
+ return low + (high - low) * _sqrt(u * c)
+
+ def normalvariate(self, mu, sigma):
+ """Normal distribution.
+
+ mu is the mean, and sigma is the standard deviation.
+
+ """
+ # Uses Kinderman and Monahan method. Reference: Kinderman,
+ # A.J. and Monahan, J.F., "Computer generation of random
+ # variables using the ratio of uniform deviates", ACM Trans
+ # Math Software, 3, (1977), pp257-260.
+
+ random = self.random
+ while True:
+ u1 = random()
+ u2 = 1.0 - random()
+ z = NV_MAGICCONST * (u1 - 0.5) / u2
+ zz = z * z / 4.0
+ if zz <= -_log(u2):
+ break
+ return mu + z * sigma
+
+ def gauss(self, mu, sigma):
+ """Gaussian distribution.
+
+ mu is the mean, and sigma is the standard deviation. This is
+ slightly faster than the normalvariate() function.
+
+ Not thread-safe without a lock around calls.
+
+ """
+ # When x and y are two variables from [0, 1), uniformly
+ # distributed, then
+ #
+ # cos(2*pi*x)*sqrt(-2*log(1-y))
+ # sin(2*pi*x)*sqrt(-2*log(1-y))
+ #
+ # are two *independent* variables with normal distribution
+ # (mu = 0, sigma = 1).
+ # (Lambert Meertens)
+ # (corrected version; bug discovered by Mike Miller, fixed by LM)
+
+ # Multithreading note: When two threads call this function
+ # simultaneously, it is possible that they will receive the
+ # same return value. The window is very small though. To
+ # avoid this, you have to use a lock around all calls. (I
+ # didn't want to slow this down in the serial case by using a
+ # lock here.)
+
+ random = self.random
+ z = self.gauss_next
+ self.gauss_next = None
+ if z is None:
+ x2pi = random() * TWOPI
+ g2rad = _sqrt(-2.0 * _log(1.0 - random()))
+ z = _cos(x2pi) * g2rad
+ self.gauss_next = _sin(x2pi) * g2rad
+
+ return mu + z * sigma
+
+ def lognormvariate(self, mu, sigma):
+ """Log normal distribution.
+
+ If you take the natural logarithm of this distribution, you'll get a
+ normal distribution with mean mu and standard deviation sigma.
+ mu can have any value, and sigma must be greater than zero.
+
+ """
+ return _exp(self.normalvariate(mu, sigma))
+
+ def expovariate(self, lambd):
+ """Exponential distribution.
+
+ lambd is 1.0 divided by the desired mean. It should be
+ nonzero. (The parameter would be called "lambda", but that is
+ a reserved word in Python.) Returned values range from 0 to
+ positive infinity if lambd is positive, and from negative
+ infinity to 0 if lambd is negative.
+
+ """
+ # lambd: rate lambd = 1/mean
+ # ('lambda' is a Python reserved word)
+
+ # we use 1-random() instead of random() to preclude the
+ # possibility of taking the log of zero.
+ return -_log(1.0 - self.random()) / lambd
+
+ def vonmisesvariate(self, mu, kappa):
+ """Circular data distribution.
+
+ mu is the mean angle, expressed in radians between 0 and 2*pi, and
+ kappa is the concentration parameter, which must be greater than or
+ equal to zero. If kappa is equal to zero, this distribution reduces
+ to a uniform random angle over the range 0 to 2*pi.
+
+ """
+ # Based upon an algorithm published in: Fisher, N.I.,
+ # "Statistical Analysis of Circular Data", Cambridge
+ # University Press, 1993.
+
+ # Thanks to Magnus Kessler for a correction to the
+ # implementation of step 4.
+
+ random = self.random
+ if kappa <= 1e-6:
+ return TWOPI * random()
+
+ s = 0.5 / kappa
+ r = s + _sqrt(1.0 + s * s)
+
+ while True:
+ u1 = random()
+ z = _cos(_pi * u1)
+
+ d = z / (r + z)
+ u2 = random()
+ if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
+ break
+
+ q = 1.0 / r
+ f = (q + z) / (1.0 + q * z)
+ u3 = random()
+ if u3 > 0.5:
+ theta = (mu + _acos(f)) % TWOPI
+ else:
+ theta = (mu - _acos(f)) % TWOPI
+
+ return theta
+
+ def gammavariate(self, alpha, beta):
+ """Gamma distribution. Not the gamma function!
+
+ Conditions on the parameters are alpha > 0 and beta > 0.
+
+ The probability distribution function is:
+
+ x ** (alpha - 1) * math.exp(-x / beta)
+ pdf(x) = --------------------------------------
+ math.gamma(alpha) * beta ** alpha
+
+ """
+ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
+
+ # Warning: a few older sources define the gamma distribution in terms
+ # of alpha > -1.0
+ if alpha <= 0.0 or beta <= 0.0:
+ raise ValueError('gammavariate: alpha and beta must be > 0.0')
+
+ random = self.random
+ if alpha > 1.0:
+
+ # Uses R.C.H. Cheng, "The generation of Gamma
+ # variables with non-integral shape parameters",
+ # Applied Statistics, (1977), 26, No. 1, p71-74
+
+ ainv = _sqrt(2.0 * alpha - 1.0)
+ bbb = alpha - LOG4
+ ccc = alpha + ainv
+
+ while True:
+ u1 = random()
+ if not 1e-7 < u1 < 0.9999999:
+ continue
+ u2 = 1.0 - random()
+ v = _log(u1 / (1.0 - u1)) / ainv
+ x = alpha * _exp(v)
+ z = u1 * u1 * u2
+ r = bbb + ccc * v - x
+ if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z):
+ return x * beta
+
+ elif alpha == 1.0:
+ # expovariate(1/beta)
+ return -_log(1.0 - random()) * beta
+
+ else:
+ # alpha is between 0 and 1 (exclusive)
+ # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
+ while True:
+ u = random()
+ b = (_e + alpha) / _e
+ p = b * u
+ if p <= 1.0:
+ x = p ** (1.0 / alpha)
+ else:
+ x = -_log((b - p) / alpha)
+ u1 = random()
+ if p > 1.0:
+ if u1 <= x ** (alpha - 1.0):
+ break
+ elif u1 <= _exp(-x):
+ break
+ return x * beta
+
+ def betavariate(self, alpha, beta):
+ """Beta distribution.
+
+ Conditions on the parameters are alpha > 0 and beta > 0.
+ Returned values range between 0 and 1.
+
+ """
+ ## See
+ ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
+ ## for Ivan Frohne's insightful analysis of why the original implementation:
+ ##
+ ## def betavariate(self, alpha, beta):
+ ## # Discrete Event Simulation in C, pp 87-88.
+ ##
+ ## y = self.expovariate(alpha)
+ ## z = self.expovariate(1.0/beta)
+ ## return z/(y+z)
+ ##
+ ## was dead wrong, and how it probably got that way.
+
+ # This version due to Janne Sinkkonen, and matches all the std
+ # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
+ y = self.gammavariate(alpha, 1.0)
+ if y:
+ return y / (y + self.gammavariate(beta, 1.0))
+ return 0.0
+
+ def paretovariate(self, alpha):
+ """Pareto distribution. alpha is the shape parameter."""
+ # Jain, pg. 495
+
+ u = 1.0 - self.random()
+ return u ** (-1.0 / alpha)
+
+ def weibullvariate(self, alpha, beta):
+ """Weibull distribution.
+
+ alpha is the scale parameter and beta is the shape parameter.
+
+ """
+ # Jain, pg. 499; bug fix courtesy Bill Arms
+
+ u = 1.0 - self.random()
+ return alpha * (-_log(u)) ** (1.0 / beta)
+
+
+## ------------------------------------------------------------------
+## --------------- Operating System Random Source ------------------
+
+
+class SystemRandom(Random):
+ """Alternate random number generator using sources provided
+ by the operating system (such as /dev/urandom on Unix or
+ CryptGenRandom on Windows).
+
+ Not available on all systems (see os.urandom() for details).
+
+ """
+
+ def random(self):
+ """Get the next random number in the range [0.0, 1.0)."""
+ return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
+
+ def getrandbits(self, k):
+ """getrandbits(k) -> x. Generates an int with k random bits."""
+ if k < 0:
+ raise ValueError('number of bits must be non-negative')
+ numbytes = (k + 7) // 8 # bits / 8 and rounded up
+ x = int.from_bytes(_urandom(numbytes), 'big')
+ return x >> (numbytes * 8 - k) # trim excess bits
+
+ def randbytes(self, n):
+ """Generate n random bytes."""
+ # os.urandom(n) fails with ValueError for n < 0
+ # and returns an empty bytes string for n == 0.
+ return _urandom(n)
+
+ def seed(self, *args, **kwds):
+ "Stub method. Not used for a system random number generator."
+ return None
+
+ def _notimplemented(self, *args, **kwds):
+ "Method should not be called for a system random number generator."
+ raise NotImplementedError('System entropy source does not have state.')
+ getstate = setstate = _notimplemented
+
+
+# ----------------------------------------------------------------------
+# Create one instance, seeded from current time, and export its methods
+# as module-level functions. The functions share state across all uses
+# (both in the user's code and in the Python libraries), but that's fine
+# for most programs and is easier for the casual user than making them
+# instantiate their own Random() instance.
+
+_inst = Random()
+seed = _inst.seed
+random = _inst.random
+uniform = _inst.uniform
+triangular = _inst.triangular
+randint = _inst.randint
+choice = _inst.choice
+randrange = _inst.randrange
+sample = _inst.sample
+shuffle = _inst.shuffle
+choices = _inst.choices
+normalvariate = _inst.normalvariate
+lognormvariate = _inst.lognormvariate
+expovariate = _inst.expovariate
+vonmisesvariate = _inst.vonmisesvariate
+gammavariate = _inst.gammavariate
+gauss = _inst.gauss
+betavariate = _inst.betavariate
+paretovariate = _inst.paretovariate
+weibullvariate = _inst.weibullvariate
+getstate = _inst.getstate
+setstate = _inst.setstate
+getrandbits = _inst.getrandbits
+randbytes = _inst.randbytes
+
+
+## ------------------------------------------------------
+## ----------------- test program -----------------------
+
+def _test_generator(n, func, args):
+ from statistics import stdev, fmean as mean
+ from time import perf_counter
+
+ t0 = perf_counter()
+ data = [func(*args) for i in _repeat(None, n)]
+ t1 = perf_counter()
+
+ xbar = mean(data)
+ sigma = stdev(data, xbar)
+ low = min(data)
+ high = max(data)
+
+ print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}')
+ print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high))
+
+
+def _test(N=2000):
+ _test_generator(N, random, ())
+ _test_generator(N, normalvariate, (0.0, 1.0))
+ _test_generator(N, lognormvariate, (0.0, 1.0))
+ _test_generator(N, vonmisesvariate, (0.0, 1.0))
+ _test_generator(N, gammavariate, (0.01, 1.0))
+ _test_generator(N, gammavariate, (0.1, 1.0))
+ _test_generator(N, gammavariate, (0.1, 2.0))
+ _test_generator(N, gammavariate, (0.5, 1.0))
+ _test_generator(N, gammavariate, (0.9, 1.0))
+ _test_generator(N, gammavariate, (1.0, 1.0))
+ _test_generator(N, gammavariate, (2.0, 1.0))
+ _test_generator(N, gammavariate, (20.0, 1.0))
+ _test_generator(N, gammavariate, (200.0, 1.0))
+ _test_generator(N, gauss, (0.0, 1.0))
+ _test_generator(N, betavariate, (3.0, 3.0))
+ _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0))
+
+
+## ------------------------------------------------------
+## ------------------ fork support ---------------------
+
+if hasattr(_os, "fork"):
+ _os.register_at_fork(after_in_child=_inst.seed)
+
+
+if __name__ == '__main__':
+ _test()
diff --git a/evalkit_cambrian/lib/python3.10/runpy.py b/evalkit_cambrian/lib/python3.10/runpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7d3d8caad1611ed52f1be8d517ad2ac906f04db
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/runpy.py
@@ -0,0 +1,321 @@
+"""runpy.py - locating and running Python code using the module namespace
+
+Provides support for locating and running Python scripts using the Python
+module namespace instead of the native filesystem.
+
+This allows Python code to play nicely with non-filesystem based PEP 302
+importers when locating support scripts as well as when importing modules.
+"""
+# Written by Nick Coghlan
+# to implement PEP 338 (Executing Modules as Scripts)
+
+
+import sys
+import importlib.machinery # importlib first so we can test #15386 via -m
+import importlib.util
+import io
+import types
+import os
+
+__all__ = [
+ "run_module", "run_path",
+]
+
+class _TempModule(object):
+ """Temporarily replace a module in sys.modules with an empty namespace"""
+ def __init__(self, mod_name):
+ self.mod_name = mod_name
+ self.module = types.ModuleType(mod_name)
+ self._saved_module = []
+
+ def __enter__(self):
+ mod_name = self.mod_name
+ try:
+ self._saved_module.append(sys.modules[mod_name])
+ except KeyError:
+ pass
+ sys.modules[mod_name] = self.module
+ return self
+
+ def __exit__(self, *args):
+ if self._saved_module:
+ sys.modules[self.mod_name] = self._saved_module[0]
+ else:
+ del sys.modules[self.mod_name]
+ self._saved_module = []
+
+class _ModifiedArgv0(object):
+ def __init__(self, value):
+ self.value = value
+ self._saved_value = self._sentinel = object()
+
+ def __enter__(self):
+ if self._saved_value is not self._sentinel:
+ raise RuntimeError("Already preserving saved value")
+ self._saved_value = sys.argv[0]
+ sys.argv[0] = self.value
+
+ def __exit__(self, *args):
+ self.value = self._sentinel
+ sys.argv[0] = self._saved_value
+
+# TODO: Replace these helpers with importlib._bootstrap_external functions.
+def _run_code(code, run_globals, init_globals=None,
+ mod_name=None, mod_spec=None,
+ pkg_name=None, script_name=None):
+ """Helper to run code in nominated namespace"""
+ if init_globals is not None:
+ run_globals.update(init_globals)
+ if mod_spec is None:
+ loader = None
+ fname = script_name
+ cached = None
+ else:
+ loader = mod_spec.loader
+ fname = mod_spec.origin
+ cached = mod_spec.cached
+ if pkg_name is None:
+ pkg_name = mod_spec.parent
+ run_globals.update(__name__ = mod_name,
+ __file__ = fname,
+ __cached__ = cached,
+ __doc__ = None,
+ __loader__ = loader,
+ __package__ = pkg_name,
+ __spec__ = mod_spec)
+ exec(code, run_globals)
+ return run_globals
+
+def _run_module_code(code, init_globals=None,
+ mod_name=None, mod_spec=None,
+ pkg_name=None, script_name=None):
+ """Helper to run code in new namespace with sys modified"""
+ fname = script_name if mod_spec is None else mod_spec.origin
+ with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname):
+ mod_globals = temp_module.module.__dict__
+ _run_code(code, mod_globals, init_globals,
+ mod_name, mod_spec, pkg_name, script_name)
+ # Copy the globals of the temporary module, as they
+ # may be cleared when the temporary module goes away
+ return mod_globals.copy()
+
+# Helper to get the full name, spec and code for a module
+def _get_module_details(mod_name, error=ImportError):
+ if mod_name.startswith("."):
+ raise error("Relative module names not supported")
+ pkg_name, _, _ = mod_name.rpartition(".")
+ if pkg_name:
+ # Try importing the parent to avoid catching initialization errors
+ try:
+ __import__(pkg_name)
+ except ImportError as e:
+ # If the parent or higher ancestor package is missing, let the
+ # error be raised by find_spec() below and then be caught. But do
+ # not allow other errors to be caught.
+ if e.name is None or (e.name != pkg_name and
+ not pkg_name.startswith(e.name + ".")):
+ raise
+ # Warn if the module has already been imported under its normal name
+ existing = sys.modules.get(mod_name)
+ if existing is not None and not hasattr(existing, "__path__"):
+ from warnings import warn
+ msg = "{mod_name!r} found in sys.modules after import of " \
+ "package {pkg_name!r}, but prior to execution of " \
+ "{mod_name!r}; this may result in unpredictable " \
+ "behaviour".format(mod_name=mod_name, pkg_name=pkg_name)
+ warn(RuntimeWarning(msg))
+
+ try:
+ spec = importlib.util.find_spec(mod_name)
+ except (ImportError, AttributeError, TypeError, ValueError) as ex:
+ # This hack fixes an impedance mismatch between pkgutil and
+ # importlib, where the latter raises other errors for cases where
+ # pkgutil previously raised ImportError
+ msg = "Error while finding module specification for {!r} ({}: {})"
+ if mod_name.endswith(".py"):
+ msg += (f". Try using '{mod_name[:-3]}' instead of "
+ f"'{mod_name}' as the module name.")
+ raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
+ if spec is None:
+ raise error("No module named %s" % mod_name)
+ if spec.submodule_search_locations is not None:
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
+ raise error("Cannot use package as __main__ module")
+ try:
+ pkg_main_name = mod_name + ".__main__"
+ return _get_module_details(pkg_main_name, error)
+ except error as e:
+ if mod_name not in sys.modules:
+ raise # No module loaded; being a package is irrelevant
+ raise error(("%s; %r is a package and cannot " +
+ "be directly executed") %(e, mod_name))
+ loader = spec.loader
+ if loader is None:
+ raise error("%r is a namespace package and cannot be executed"
+ % mod_name)
+ try:
+ code = loader.get_code(mod_name)
+ except ImportError as e:
+ raise error(format(e)) from e
+ if code is None:
+ raise error("No code object available for %s" % mod_name)
+ return mod_name, spec, code
+
+class _Error(Exception):
+ """Error that _run_module_as_main() should report without a traceback"""
+
+# XXX ncoghlan: Should this be documented and made public?
+# (Current thoughts: don't repeat the mistake that lead to its
+# creation when run_module() no longer met the needs of
+# mainmodule.c, but couldn't be changed because it was public)
+def _run_module_as_main(mod_name, alter_argv=True):
+ """Runs the designated module in the __main__ namespace
+
+ Note that the executed module will have full access to the
+ __main__ namespace. If this is not desirable, the run_module()
+ function should be used to run the module code in a fresh namespace.
+
+ At the very least, these variables in __main__ will be overwritten:
+ __name__
+ __file__
+ __cached__
+ __loader__
+ __package__
+ """
+ try:
+ if alter_argv or mod_name != "__main__": # i.e. -m switch
+ mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
+ else: # i.e. directory or zipfile execution
+ mod_name, mod_spec, code = _get_main_module_details(_Error)
+ except _Error as exc:
+ msg = "%s: %s" % (sys.executable, exc)
+ sys.exit(msg)
+ main_globals = sys.modules["__main__"].__dict__
+ if alter_argv:
+ sys.argv[0] = mod_spec.origin
+ return _run_code(code, main_globals, None,
+ "__main__", mod_spec)
+
+def run_module(mod_name, init_globals=None,
+ run_name=None, alter_sys=False):
+ """Execute a module's code without importing it.
+
+ mod_name -- an absolute module name or package name.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
+
+ run_name -- if not None, this will be used for setting __name__;
+ otherwise, __name__ will be set to mod_name + '__main__' if the
+ named module is a package and to just mod_name otherwise.
+
+ alter_sys -- if True, sys.argv[0] is updated with the value of
+ __file__ and sys.modules[__name__] is updated with a temporary
+ module object for the module being executed. Both are
+ restored to their original values before the function returns.
+
+ Returns the resulting module globals dictionary.
+ """
+ mod_name, mod_spec, code = _get_module_details(mod_name)
+ if run_name is None:
+ run_name = mod_name
+ if alter_sys:
+ return _run_module_code(code, init_globals, run_name, mod_spec)
+ else:
+ # Leave the sys module alone
+ return _run_code(code, {}, init_globals, run_name, mod_spec)
+
+def _get_main_module_details(error=ImportError):
+ # Helper that gives a nicer error message when attempting to
+ # execute a zipfile or directory by invoking __main__.py
+ # Also moves the standard __main__ out of the way so that the
+ # preexisting __loader__ entry doesn't cause issues
+ main_name = "__main__"
+ saved_main = sys.modules[main_name]
+ del sys.modules[main_name]
+ try:
+ return _get_module_details(main_name)
+ except ImportError as exc:
+ if main_name in str(exc):
+ raise error("can't find %r module in %r" %
+ (main_name, sys.path[0])) from exc
+ raise
+ finally:
+ sys.modules[main_name] = saved_main
+
+
+def _get_code_from_file(run_name, fname):
+ # Check for a compiled file first
+ from pkgutil import read_code
+ decoded_path = os.path.abspath(os.fsdecode(fname))
+ with io.open_code(decoded_path) as f:
+ code = read_code(f)
+ if code is None:
+ # That didn't work, so try it as normal source code
+ with io.open_code(decoded_path) as f:
+ code = compile(f.read(), fname, 'exec')
+ return code, fname
+
+def run_path(path_name, init_globals=None, run_name=None):
+ """Execute code located at the specified filesystem location.
+
+ path_name -- filesystem location of a Python script, zipfile,
+ or directory containing a top level __main__.py script.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
+
+ run_name -- if not None, this will be used to set __name__;
+ otherwise, '' will be used for __name__.
+
+ Returns the resulting module globals dictionary.
+ """
+ if run_name is None:
+ run_name = ""
+ pkg_name = run_name.rpartition(".")[0]
+ from pkgutil import get_importer
+ importer = get_importer(path_name)
+ # Trying to avoid importing imp so as to not consume the deprecation warning.
+ is_NullImporter = False
+ if type(importer).__module__ == 'imp':
+ if type(importer).__name__ == 'NullImporter':
+ is_NullImporter = True
+ if isinstance(importer, type(None)) or is_NullImporter:
+ # Not a valid sys.path entry, so run the code directly
+ # execfile() doesn't help as we want to allow compiled files
+ code, fname = _get_code_from_file(run_name, path_name)
+ return _run_module_code(code, init_globals, run_name,
+ pkg_name=pkg_name, script_name=fname)
+ else:
+ # Finder is defined for path, so add it to
+ # the start of sys.path
+ sys.path.insert(0, path_name)
+ try:
+ # Here's where things are a little different from the run_module
+ # case. There, we only had to replace the module in sys while the
+ # code was running and doing so was somewhat optional. Here, we
+ # have no choice and we have to remove it even while we read the
+ # code. If we don't do this, a __loader__ attribute in the
+ # existing __main__ module may prevent location of the new module.
+ mod_name, mod_spec, code = _get_main_module_details()
+ with _TempModule(run_name) as temp_module, \
+ _ModifiedArgv0(path_name):
+ mod_globals = temp_module.module.__dict__
+ return _run_code(code, mod_globals, init_globals,
+ run_name, mod_spec, pkg_name).copy()
+ finally:
+ try:
+ sys.path.remove(path_name)
+ except ValueError:
+ pass
+
+
+if __name__ == "__main__":
+ # Run the module specified as the next command line argument
+ if len(sys.argv) < 2:
+ print("No module specified for execution", file=sys.stderr)
+ else:
+ del sys.argv[0] # Make the requested module sys.argv[0]
+ _run_module_as_main(sys.argv[0])
diff --git a/evalkit_cambrian/lib/python3.10/sched.py b/evalkit_cambrian/lib/python3.10/sched.py
new file mode 100644
index 0000000000000000000000000000000000000000..14613cf29874da539490accd7d83abce51499964
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/sched.py
@@ -0,0 +1,167 @@
+"""A generally useful event scheduler class.
+
+Each instance of this class manages its own queue.
+No multi-threading is implied; you are supposed to hack that
+yourself, or use a single instance per application.
+
+Each instance is parametrized with two functions, one that is
+supposed to return the current time, one that is supposed to
+implement a delay. You can implement real-time scheduling by
+substituting time and sleep from built-in module time, or you can
+implement simulated time by writing your own functions. This can
+also be used to integrate scheduling with STDWIN events; the delay
+function is allowed to modify the queue. Time can be expressed as
+integers or floating point numbers, as long as it is consistent.
+
+Events are specified by tuples (time, priority, action, argument, kwargs).
+As in UNIX, lower priority numbers mean higher priority; in this
+way the queue can be maintained as a priority queue. Execution of the
+event means calling the action function, passing it the argument
+sequence in "argument" (remember that in Python, multiple function
+arguments are be packed in a sequence) and keyword parameters in "kwargs".
+The action function may be an instance method so it
+has another way to reference private data (besides global variables).
+"""
+
+import time
+import heapq
+from collections import namedtuple
+from itertools import count
+import threading
+from time import monotonic as _time
+
+__all__ = ["scheduler"]
+
+Event = namedtuple('Event', 'time, priority, sequence, action, argument, kwargs')
+Event.time.__doc__ = ('''Numeric type compatible with the return value of the
+timefunc function passed to the constructor.''')
+Event.priority.__doc__ = ('''Events scheduled for the same time will be executed
+in the order of their priority.''')
+Event.sequence.__doc__ = ('''A continually increasing sequence number that
+ separates events if time and priority are equal.''')
+Event.action.__doc__ = ('''Executing the event means executing
+action(*argument, **kwargs)''')
+Event.argument.__doc__ = ('''argument is a sequence holding the positional
+arguments for the action.''')
+Event.kwargs.__doc__ = ('''kwargs is a dictionary holding the keyword
+arguments for the action.''')
+
+_sentinel = object()
+
+class scheduler:
+
+ def __init__(self, timefunc=_time, delayfunc=time.sleep):
+ """Initialize a new instance, passing the time and delay
+ functions"""
+ self._queue = []
+ self._lock = threading.RLock()
+ self.timefunc = timefunc
+ self.delayfunc = delayfunc
+ self._sequence_generator = count()
+
+ def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
+ """Enter a new event in the queue at an absolute time.
+
+ Returns an ID for the event which can be used to remove it,
+ if necessary.
+
+ """
+ if kwargs is _sentinel:
+ kwargs = {}
+
+ with self._lock:
+ event = Event(time, priority, next(self._sequence_generator),
+ action, argument, kwargs)
+ heapq.heappush(self._queue, event)
+ return event # The ID
+
+ def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
+ """A variant that specifies the time as a relative time.
+
+ This is actually the more commonly used interface.
+
+ """
+ time = self.timefunc() + delay
+ return self.enterabs(time, priority, action, argument, kwargs)
+
+ def cancel(self, event):
+ """Remove an event from the queue.
+
+ This must be presented the ID as returned by enter().
+ If the event is not in the queue, this raises ValueError.
+
+ """
+ with self._lock:
+ self._queue.remove(event)
+ heapq.heapify(self._queue)
+
+ def empty(self):
+ """Check whether the queue is empty."""
+ with self._lock:
+ return not self._queue
+
+ def run(self, blocking=True):
+ """Execute events until the queue is empty.
+ If blocking is False executes the scheduled events due to
+ expire soonest (if any) and then return the deadline of the
+ next scheduled call in the scheduler.
+
+ When there is a positive delay until the first event, the
+ delay function is called and the event is left in the queue;
+ otherwise, the event is removed from the queue and executed
+ (its action function is called, passing it the argument). If
+ the delay function returns prematurely, it is simply
+ restarted.
+
+ It is legal for both the delay function and the action
+ function to modify the queue or to raise an exception;
+ exceptions are not caught but the scheduler's state remains
+ well-defined so run() may be called again.
+
+ A questionable hack is added to allow other threads to run:
+ just after an event is executed, a delay of 0 is executed, to
+ avoid monopolizing the CPU when other threads are also
+ runnable.
+
+ """
+ # localize variable access to minimize overhead
+ # and to improve thread safety
+ lock = self._lock
+ q = self._queue
+ delayfunc = self.delayfunc
+ timefunc = self.timefunc
+ pop = heapq.heappop
+ while True:
+ with lock:
+ if not q:
+ break
+ (time, priority, sequence, action,
+ argument, kwargs) = q[0]
+ now = timefunc()
+ if time > now:
+ delay = True
+ else:
+ delay = False
+ pop(q)
+ if delay:
+ if not blocking:
+ return time - now
+ delayfunc(time - now)
+ else:
+ action(*argument, **kwargs)
+ delayfunc(0) # Let other threads run
+
+ @property
+ def queue(self):
+ """An ordered list of upcoming events.
+
+ Events are named tuples with fields for:
+ time, priority, action, arguments, kwargs
+
+ """
+ # Use heapq to sort the queue rather than using 'sorted(self._queue)'.
+ # With heapq, two events scheduled at the same time will show in
+ # the actual order they would be retrieved.
+ with self._lock:
+ events = self._queue[:]
+ return list(map(heapq.heappop, [events]*len(events)))
diff --git a/evalkit_cambrian/lib/python3.10/signal.py b/evalkit_cambrian/lib/python3.10/signal.py
new file mode 100644
index 0000000000000000000000000000000000000000..50b215b29d2fadf6ccc38e860242d04e5d946fb5
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/signal.py
@@ -0,0 +1,92 @@
+import _signal
+from _signal import *
+from enum import IntEnum as _IntEnum
+
+_globals = globals()
+
+_IntEnum._convert_(
+ 'Signals', __name__,
+ lambda name:
+ name.isupper()
+ and (name.startswith('SIG') and not name.startswith('SIG_'))
+ or name.startswith('CTRL_'))
+
+_IntEnum._convert_(
+ 'Handlers', __name__,
+ lambda name: name in ('SIG_DFL', 'SIG_IGN'))
+
+if 'pthread_sigmask' in _globals:
+ _IntEnum._convert_(
+ 'Sigmasks', __name__,
+ lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'))
+
+
+def _int_to_enum(value, enum_klass):
+ """Convert a numeric value to an IntEnum member.
+ If it's not a known member, return the numeric value itself.
+ """
+ try:
+ return enum_klass(value)
+ except ValueError:
+ return value
+
+
+def _enum_to_int(value):
+ """Convert an IntEnum member to a numeric value.
+ If it's not an IntEnum member return the value itself.
+ """
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+
+
+# Similar to functools.wraps(), but only assign __doc__.
+# __module__ should be preserved,
+# __name__ and __qualname__ are already fine,
+# __annotations__ is not set.
+def _wraps(wrapped):
+ def decorator(wrapper):
+ wrapper.__doc__ = wrapped.__doc__
+ return wrapper
+ return decorator
+
+@_wraps(_signal.signal)
+def signal(signalnum, handler):
+ handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
+ return _int_to_enum(handler, Handlers)
+
+
+@_wraps(_signal.getsignal)
+def getsignal(signalnum):
+ handler = _signal.getsignal(signalnum)
+ return _int_to_enum(handler, Handlers)
+
+
+if 'pthread_sigmask' in _globals:
+ @_wraps(_signal.pthread_sigmask)
+ def pthread_sigmask(how, mask):
+ sigs_set = _signal.pthread_sigmask(how, mask)
+ return set(_int_to_enum(x, Signals) for x in sigs_set)
+
+
+if 'sigpending' in _globals:
+ @_wraps(_signal.sigpending)
+ def sigpending():
+ return {_int_to_enum(x, Signals) for x in _signal.sigpending()}
+
+
+if 'sigwait' in _globals:
+ @_wraps(_signal.sigwait)
+ def sigwait(sigset):
+ retsig = _signal.sigwait(sigset)
+ return _int_to_enum(retsig, Signals)
+
+
+if 'valid_signals' in _globals:
+ @_wraps(_signal.valid_signals)
+ def valid_signals():
+ return {_int_to_enum(x, Signals) for x in _signal.valid_signals()}
+
+
+del _globals, _wraps
diff --git a/evalkit_cambrian/lib/python3.10/sndhdr.py b/evalkit_cambrian/lib/python3.10/sndhdr.py
new file mode 100644
index 0000000000000000000000000000000000000000..96595c6974468213e0a93414af95f4981bb609c5
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/sndhdr.py
@@ -0,0 +1,257 @@
+"""Routines to help recognizing sound files.
+
+Function whathdr() recognizes various types of sound file headers.
+It understands almost all headers that SOX can decode.
+
+The return tuple contains the following items, in this order:
+- file type (as SOX understands it)
+- sampling rate (0 if unknown or hard to decode)
+- number of channels (0 if unknown or hard to decode)
+- number of frames in the file (-1 if unknown or hard to decode)
+- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
+
+If the file doesn't have a recognizable type, it returns None.
+If the file can't be opened, OSError is raised.
+
+To compute the total time, divide the number of frames by the
+sampling rate (a frame contains a sample for each channel).
+
+Function what() calls whathdr(). (It used to also use some
+heuristics for raw data, but this doesn't work very well.)
+
+Finally, the function test() is a simple main program that calls
+what() for all files mentioned on the argument list. For directory
+arguments it calls what() for all files in that directory. Default
+argument is "." (testing all files in the current directory). The
+option -r tells it to recurse down directories found inside
+explicitly given directories.
+"""
+
+# The file structure is top-down except that the test program and its
+# subroutine come last.
+
+__all__ = ['what', 'whathdr']
+
+from collections import namedtuple
+
+SndHeaders = namedtuple('SndHeaders',
+ 'filetype framerate nchannels nframes sampwidth')
+
+SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type
+and will be one of the strings 'aifc', 'aiff', 'au','hcom',
+'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""")
+SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual
+value or 0 if unknown or difficult to decode.""")
+SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be
+determined or if the value is difficult to decode.""")
+SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number
+of frames or -1.""")
+SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or
+'A' for A-LAW or 'U' for u-LAW.""")
+
+def what(filename):
+ """Guess the type of a sound file."""
+ res = whathdr(filename)
+ return res
+
+
+def whathdr(filename):
+ """Recognize sound headers."""
+ with open(filename, 'rb') as f:
+ h = f.read(512)
+ for tf in tests:
+ res = tf(h, f)
+ if res:
+ return SndHeaders(*res)
+ return None
+
+
+#-----------------------------------#
+# Subroutines per sound header type #
+#-----------------------------------#
+
+tests = []
+
+def test_aifc(h, f):
+ import aifc
+ if not h.startswith(b'FORM'):
+ return None
+ if h[8:12] == b'AIFC':
+ fmt = 'aifc'
+ elif h[8:12] == b'AIFF':
+ fmt = 'aiff'
+ else:
+ return None
+ f.seek(0)
+ try:
+ a = aifc.open(f, 'r')
+ except (EOFError, aifc.Error):
+ return None
+ return (fmt, a.getframerate(), a.getnchannels(),
+ a.getnframes(), 8 * a.getsampwidth())
+
+tests.append(test_aifc)
+
+
+def test_au(h, f):
+ if h.startswith(b'.snd'):
+ func = get_long_be
+ elif h[:4] in (b'\0ds.', b'dns.'):
+ func = get_long_le
+ else:
+ return None
+ filetype = 'au'
+ hdr_size = func(h[4:8])
+ data_size = func(h[8:12])
+ encoding = func(h[12:16])
+ rate = func(h[16:20])
+ nchannels = func(h[20:24])
+ sample_size = 1 # default
+ if encoding == 1:
+ sample_bits = 'U'
+ elif encoding == 2:
+ sample_bits = 8
+ elif encoding == 3:
+ sample_bits = 16
+ sample_size = 2
+ else:
+ sample_bits = '?'
+ frame_size = sample_size * nchannels
+ if frame_size:
+ nframe = data_size / frame_size
+ else:
+ nframe = -1
+ return filetype, rate, nchannels, nframe, sample_bits
+
+tests.append(test_au)
+
+
+def test_hcom(h, f):
+ if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
+ return None
+ divisor = get_long_be(h[144:148])
+ if divisor:
+ rate = 22050 / divisor
+ else:
+ rate = 0
+ return 'hcom', rate, 1, -1, 8
+
+tests.append(test_hcom)
+
+
+def test_voc(h, f):
+ if not h.startswith(b'Creative Voice File\032'):
+ return None
+ sbseek = get_short_le(h[20:22])
+ rate = 0
+ if 0 <= sbseek < 500 and h[sbseek] == 1:
+ ratecode = 256 - h[sbseek+4]
+ if ratecode:
+ rate = int(1000000.0 / ratecode)
+ return 'voc', rate, 1, -1, 8
+
+tests.append(test_voc)
+
+
+def test_wav(h, f):
+ import wave
+ # 'RIFF' 'WAVE' 'fmt '
+ if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
+ return None
+ f.seek(0)
+ try:
+ w = wave.open(f, 'r')
+ except (EOFError, wave.Error):
+ return None
+ return ('wav', w.getframerate(), w.getnchannels(),
+ w.getnframes(), 8*w.getsampwidth())
+
+tests.append(test_wav)
+
+
+def test_8svx(h, f):
+ if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
+ return None
+ # Should decode it to get #channels -- assume always 1
+ return '8svx', 0, 1, 0, 8
+
+tests.append(test_8svx)
+
+
+def test_sndt(h, f):
+ if h.startswith(b'SOUND'):
+ nsamples = get_long_le(h[8:12])
+ rate = get_short_le(h[20:22])
+ return 'sndt', rate, 1, nsamples, 8
+
+tests.append(test_sndt)
+
+
+def test_sndr(h, f):
+ if h.startswith(b'\0\0'):
+ rate = get_short_le(h[2:4])
+ if 4000 <= rate <= 25000:
+ return 'sndr', rate, 1, -1, 8
+
+tests.append(test_sndr)
+
+
+#-------------------------------------------#
+# Subroutines to extract numbers from bytes #
+#-------------------------------------------#
+
+def get_long_be(b):
+ return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
+
+def get_long_le(b):
+ return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
+
+def get_short_be(b):
+ return (b[0] << 8) | b[1]
+
+def get_short_le(b):
+ return (b[1] << 8) | b[0]
+
+
+#--------------------#
+# Small test program #
+#--------------------#
+
+def test():
+ import sys
+ recursive = 0
+ if sys.argv[1:] and sys.argv[1] == '-r':
+ del sys.argv[1:2]
+ recursive = 1
+ try:
+ if sys.argv[1:]:
+ testall(sys.argv[1:], recursive, 1)
+ else:
+ testall(['.'], recursive, 1)
+ except KeyboardInterrupt:
+ sys.stderr.write('\n[Interrupted]\n')
+ sys.exit(1)
+
+def testall(list, recursive, toplevel):
+ import sys
+ import os
+ for filename in list:
+ if os.path.isdir(filename):
+ print(filename + '/:', end=' ')
+ if recursive or toplevel:
+ print('recursing down:')
+ import glob
+ names = glob.glob(os.path.join(glob.escape(filename), '*'))
+ testall(names, recursive, 0)
+ else:
+ print('*** directory (use -r) ***')
+ else:
+ print(filename + ':', end=' ')
+ sys.stdout.flush()
+ try:
+ print(what(filename))
+ except OSError:
+ print('*** not found ***')
+
+if __name__ == '__main__':
+ test()
diff --git a/evalkit_cambrian/lib/python3.10/socketserver.py b/evalkit_cambrian/lib/python3.10/socketserver.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d9583d56a4d742aa04b075426cdb66b781ef0c5
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/socketserver.py
@@ -0,0 +1,844 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+ - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+ - AF_UNIX: Unix domain sockets
+ - others, e.g. AF_DECNET are conceivable (see
+- socket type:
+ - SOCK_STREAM (reliable stream, e.g. TCP)
+ - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+ (This is actually a hook for any processing that needs to look
+ at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+ - synchronous (one request is handled at a time)
+ - forking (each request is handled by a new process)
+ - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server. This is bad class design, but
+saves some typing. (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+ +------------+
+ | BaseServer |
+ +------------+
+ |
+ v
+ +-----------+ +------------------+
+ | TCPServer |------->| UnixStreamServer |
+ +-----------+ +------------------+
+ |
+ v
+ +-----------+ +--------------------+
+ | UDPServer |------->| UnixDatagramServer |
+ +-----------+ +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingMixIn and ThreadingMixIn mix-in classes. For
+instance, a threading UDP server class is created as follows:
+
+ class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer! Setting the various member variables also changes
+the behavior of the underlying server mechanism.
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method. You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services. This can be hidden by using the request handler
+subclasses StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child). In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to read all the data it has requested. Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data. This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use a selector to
+decide which request to work on next (or whether to handle a new
+incoming request). This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+ and encryption schemes
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+ Copyright (C) 2000 Luke Kenneth Casson Leighton
+
+ example: read entries from a SQL database (requires overriding
+ get_request() to return a table entry from the database).
+ entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+__version__ = "0.4"
+
+
+import socket
+import selectors
+import os
+import sys
+import threading
+from io import BufferedIOBase
+from time import monotonic as time
+
+__all__ = ["BaseServer", "TCPServer", "UDPServer",
+ "ThreadingUDPServer", "ThreadingTCPServer",
+ "BaseRequestHandler", "StreamRequestHandler",
+ "DatagramRequestHandler", "ThreadingMixIn"]
+if hasattr(os, "fork"):
+ __all__.extend(["ForkingUDPServer","ForkingTCPServer", "ForkingMixIn"])
+if hasattr(socket, "AF_UNIX"):
+ __all__.extend(["UnixStreamServer","UnixDatagramServer",
+ "ThreadingUnixStreamServer",
+ "ThreadingUnixDatagramServer"])
+
+# poll/select have the advantage of not requiring any extra file descriptor,
+# contrarily to epoll/kqueue (also, they require a single syscall).
+if hasattr(selectors, 'PollSelector'):
+ _ServerSelector = selectors.PollSelector
+else:
+ _ServerSelector = selectors.SelectSelector
+
+
+class BaseServer:
+
+ """Base class for server classes.
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you do not use serve_forever()
+ - fileno() -> int # for selector
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - server_close()
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - service_actions()
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - allow_reuse_address
+
+ Instance variables:
+
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ timeout = None
+
+ def __init__(self, server_address, RequestHandlerClass):
+ """Constructor. May be extended, do not override."""
+ self.server_address = server_address
+ self.RequestHandlerClass = RequestHandlerClass
+ self.__is_shut_down = threading.Event()
+ self.__shutdown_request = False
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def serve_forever(self, poll_interval=0.5):
+ """Handle one request at a time until shutdown.
+
+ Polls for shutdown every poll_interval seconds. Ignores
+ self.timeout. If you need to do periodic tasks, do them in
+ another thread.
+ """
+ self.__is_shut_down.clear()
+ try:
+ # XXX: Consider using another file descriptor or connecting to the
+ # socket to wake this up instead of polling. Polling reduces our
+ # responsiveness to a shutdown request and wastes cpu at all other
+ # times.
+ with _ServerSelector() as selector:
+ selector.register(self, selectors.EVENT_READ)
+
+ while not self.__shutdown_request:
+ ready = selector.select(poll_interval)
+ # bpo-35017: shutdown() called during select(), exit immediately.
+ if self.__shutdown_request:
+ break
+ if ready:
+ self._handle_request_noblock()
+
+ self.service_actions()
+ finally:
+ self.__shutdown_request = False
+ self.__is_shut_down.set()
+
+ def shutdown(self):
+ """Stops the serve_forever loop.
+
+ Blocks until the loop has finished. This must be called while
+ serve_forever() is running in another thread, or it will
+ deadlock.
+ """
+ self.__shutdown_request = True
+ self.__is_shut_down.wait()
+
+ def service_actions(self):
+ """Called by the serve_forever() loop.
+
+ May be overridden by a subclass / Mixin to implement any code that
+ needs to be run during the loop.
+ """
+ pass
+
+ # The distinction between handling, getting, processing and finishing a
+ # request is fairly arbitrary. Remember:
+ #
+ # - handle_request() is the top-level call. It calls selector.select(),
+ # get_request(), verify_request() and process_request()
+ # - get_request() is different for stream or datagram sockets
+ # - process_request() is the place that may fork a new process or create a
+ # new thread to finish the request
+ # - finish_request() instantiates the request handler class; this
+ # constructor will handle the request all by itself
+
+ def handle_request(self):
+ """Handle one request, possibly blocking.
+
+ Respects self.timeout.
+ """
+ # Support people who used socket.settimeout() to escape
+ # handle_request before self.timeout was available.
+ timeout = self.socket.gettimeout()
+ if timeout is None:
+ timeout = self.timeout
+ elif self.timeout is not None:
+ timeout = min(timeout, self.timeout)
+ if timeout is not None:
+ deadline = time() + timeout
+
+ # Wait until a request arrives or the timeout expires - the loop is
+ # necessary to accommodate early wakeups due to EINTR.
+ with _ServerSelector() as selector:
+ selector.register(self, selectors.EVENT_READ)
+
+ while True:
+ ready = selector.select(timeout)
+ if ready:
+ return self._handle_request_noblock()
+ else:
+ if timeout is not None:
+ timeout = deadline - time()
+ if timeout < 0:
+ return self.handle_timeout()
+
+ def _handle_request_noblock(self):
+ """Handle one request, without blocking.
+
+ I assume that selector.select() has returned that the socket is
+ readable before this function was called, so there should be no risk of
+ blocking in get_request().
+ """
+ try:
+ request, client_address = self.get_request()
+ except OSError:
+ return
+ if self.verify_request(request, client_address):
+ try:
+ self.process_request(request, client_address)
+ except Exception:
+ self.handle_error(request, client_address)
+ self.shutdown_request(request)
+ except:
+ self.shutdown_request(request)
+ raise
+ else:
+ self.shutdown_request(request)
+
+ def handle_timeout(self):
+ """Called if no new request arrives within self.timeout.
+
+ Overridden by ForkingMixIn.
+ """
+ pass
+
+ def verify_request(self, request, client_address):
+ """Verify the request. May be overridden.
+
+ Return True if we should proceed with this request.
+
+ """
+ return True
+
+ def process_request(self, request, client_address):
+ """Call finish_request.
+
+ Overridden by ForkingMixIn and ThreadingMixIn.
+
+ """
+ self.finish_request(request, client_address)
+ self.shutdown_request(request)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def finish_request(self, request, client_address):
+ """Finish one request by instantiating RequestHandlerClass."""
+ self.RequestHandlerClass(request, client_address, self)
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ pass
+
+ def handle_error(self, request, client_address):
+ """Handle an error gracefully. May be overridden.
+
+ The default is to print a traceback and continue.
+
+ """
+ print('-'*40, file=sys.stderr)
+ print('Exception occurred during processing of request from',
+ client_address, file=sys.stderr)
+ import traceback
+ traceback.print_exc()
+ print('-'*40, file=sys.stderr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.server_close()
+
+
+class TCPServer(BaseServer):
+
+ """Base class for various socket-based server classes.
+
+ Defaults to synchronous IP stream (i.e., TCP).
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass, bind_and_activate=True)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you don't use serve_forever()
+ - fileno() -> int # for selector
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - request_queue_size (only for stream sockets)
+ - allow_reuse_address
+
+ Instance variables:
+
+ - server_address
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ address_family = socket.AF_INET
+
+ socket_type = socket.SOCK_STREAM
+
+ request_queue_size = 5
+
+ allow_reuse_address = False
+
+ def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
+ """Constructor. May be extended, do not override."""
+ BaseServer.__init__(self, server_address, RequestHandlerClass)
+ self.socket = socket.socket(self.address_family,
+ self.socket_type)
+ if bind_and_activate:
+ try:
+ self.server_bind()
+ self.server_activate()
+ except:
+ self.server_close()
+ raise
+
+ def server_bind(self):
+ """Called by constructor to bind the socket.
+
+ May be overridden.
+
+ """
+ if self.allow_reuse_address:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.bind(self.server_address)
+ self.server_address = self.socket.getsockname()
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ self.socket.listen(self.request_queue_size)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ self.socket.close()
+
+ def fileno(self):
+ """Return socket file number.
+
+ Interface required by selector.
+
+ """
+ return self.socket.fileno()
+
+ def get_request(self):
+ """Get the request and client address from the socket.
+
+ May be overridden.
+
+ """
+ return self.socket.accept()
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ try:
+ #explicitly shutdown. socket.close() merely releases
+ #the socket and waits for GC to perform the actual close.
+ request.shutdown(socket.SHUT_WR)
+ except OSError:
+ pass #some platforms may raise ENOTCONN here
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ request.close()
+
+
+class UDPServer(TCPServer):
+
+ """UDP server class."""
+
+ allow_reuse_address = False
+
+ socket_type = socket.SOCK_DGRAM
+
+ max_packet_size = 8192
+
+ def get_request(self):
+ data, client_addr = self.socket.recvfrom(self.max_packet_size)
+ return (data, self.socket), client_addr
+
+ def server_activate(self):
+ # No need to call listen() for UDP.
+ pass
+
+ def shutdown_request(self, request):
+ # No need to shutdown anything.
+ self.close_request(request)
+
+ def close_request(self, request):
+ # No need to close anything.
+ pass
+
+if hasattr(os, "fork"):
+ class ForkingMixIn:
+ """Mix-in class to handle each request in a new process."""
+
+ timeout = 300
+ active_children = None
+ max_children = 40
+ # If true, server_close() waits until all child processes complete.
+ block_on_close = True
+
+ def collect_children(self, *, blocking=False):
+ """Internal routine to wait for children that have exited."""
+ if self.active_children is None:
+ return
+
+ # If we're above the max number of children, wait and reap them until
+ # we go back below threshold. Note that we use waitpid(-1) below to be
+ # able to collect children in size() syscalls instead
+ # of size(): the downside is that this might reap children
+ # which we didn't spawn, which is why we only resort to this when we're
+ # above max_children.
+ while len(self.active_children) >= self.max_children:
+ try:
+ pid, _ = os.waitpid(-1, 0)
+ self.active_children.discard(pid)
+ except ChildProcessError:
+ # we don't have any children, we're done
+ self.active_children.clear()
+ except OSError:
+ break
+
+ # Now reap all defunct children.
+ for pid in self.active_children.copy():
+ try:
+ flags = 0 if blocking else os.WNOHANG
+ pid, _ = os.waitpid(pid, flags)
+ # if the child hasn't exited yet, pid will be 0 and ignored by
+ # discard() below
+ self.active_children.discard(pid)
+ except ChildProcessError:
+ # someone else reaped it
+ self.active_children.discard(pid)
+ except OSError:
+ pass
+
+ def handle_timeout(self):
+ """Wait for zombies after self.timeout seconds of inactivity.
+
+ May be extended, do not override.
+ """
+ self.collect_children()
+
+ def service_actions(self):
+ """Collect the zombie child processes regularly in the ForkingMixIn.
+
+ service_actions is called in the BaseServer's serve_forever loop.
+ """
+ self.collect_children()
+
+ def process_request(self, request, client_address):
+ """Fork a new subprocess to process the request."""
+ pid = os.fork()
+ if pid:
+ # Parent process
+ if self.active_children is None:
+ self.active_children = set()
+ self.active_children.add(pid)
+ self.close_request(request)
+ return
+ else:
+ # Child process.
+ # This must never return, hence os._exit()!
+ status = 1
+ try:
+ self.finish_request(request, client_address)
+ status = 0
+ except Exception:
+ self.handle_error(request, client_address)
+ finally:
+ try:
+ self.shutdown_request(request)
+ finally:
+ os._exit(status)
+
+ def server_close(self):
+ super().server_close()
+ self.collect_children(blocking=self.block_on_close)
+
+
+class _Threads(list):
+ """
+ Joinable list of all non-daemon threads.
+ """
+ def append(self, thread):
+ self.reap()
+ if thread.daemon:
+ return
+ super().append(thread)
+
+ def pop_all(self):
+ self[:], result = [], self[:]
+ return result
+
+ def join(self):
+ for thread in self.pop_all():
+ thread.join()
+
+ def reap(self):
+ self[:] = (thread for thread in self if thread.is_alive())
+
+
+class _NoThreads:
+ """
+ Degenerate version of _Threads.
+ """
+ def append(self, thread):
+ pass
+
+ def join(self):
+ pass
+
+
+class ThreadingMixIn:
+ """Mix-in class to handle each request in a new thread."""
+
+ # Decides how threads will act upon termination of the
+ # main process
+ daemon_threads = False
+ # If true, server_close() waits until all non-daemonic threads terminate.
+ block_on_close = True
+ # Threads object
+ # used by server_close() to wait for all threads completion.
+ _threads = _NoThreads()
+
+ def process_request_thread(self, request, client_address):
+ """Same as in BaseServer but as a thread.
+
+ In addition, exception handling is done here.
+
+ """
+ try:
+ self.finish_request(request, client_address)
+ except Exception:
+ self.handle_error(request, client_address)
+ finally:
+ self.shutdown_request(request)
+
+ def process_request(self, request, client_address):
+ """Start a new thread to process the request."""
+ if self.block_on_close:
+ vars(self).setdefault('_threads', _Threads())
+ t = threading.Thread(target = self.process_request_thread,
+ args = (request, client_address))
+ t.daemon = self.daemon_threads
+ self._threads.append(t)
+ t.start()
+
+ def server_close(self):
+ super().server_close()
+ self._threads.join()
+
+
+if hasattr(os, "fork"):
+ class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+ class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+ class UnixStreamServer(TCPServer):
+ address_family = socket.AF_UNIX
+
+ class UnixDatagramServer(UDPServer):
+ address_family = socket.AF_UNIX
+
+ class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+ class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+ """Base class for request handler classes.
+
+ This class is instantiated for each request to be handled. The
+ constructor sets the instance variables request, client_address
+ and server, and then calls the handle() method. To implement a
+ specific service, all you need to do is to derive a class which
+ defines a handle() method.
+
+ The handle() method can find the request as self.request, the
+ client address as self.client_address, and the server (in case it
+ needs access to per-server information) as self.server. Since a
+ separate instance is created for each request, the handle() method
+ can define other arbitrary instance variables.
+
+ """
+
+ def __init__(self, request, client_address, server):
+ self.request = request
+ self.client_address = client_address
+ self.server = server
+ self.setup()
+ try:
+ self.handle()
+ finally:
+ self.finish()
+
+ def setup(self):
+ pass
+
+ def handle(self):
+ pass
+
+ def finish(self):
+ pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for stream sockets."""
+
+ # Default buffer sizes for rfile, wfile.
+ # We default rfile to buffered because otherwise it could be
+ # really slow for large data (a getc() call per byte); we make
+ # wfile unbuffered because (a) often after a write() we want to
+ # read and we need to flush the line; (b) big writes to unbuffered
+ # files are typically optimized by stdio even when big reads
+ # aren't.
+ rbufsize = -1
+ wbufsize = 0
+
+ # A timeout to apply to the request socket, if not None.
+ timeout = None
+
+ # Disable nagle algorithm for this socket, if True.
+ # Use only when wbufsize != 0, to avoid small packets.
+ disable_nagle_algorithm = False
+
+ def setup(self):
+ self.connection = self.request
+ if self.timeout is not None:
+ self.connection.settimeout(self.timeout)
+ if self.disable_nagle_algorithm:
+ self.connection.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_NODELAY, True)
+ self.rfile = self.connection.makefile('rb', self.rbufsize)
+ if self.wbufsize == 0:
+ self.wfile = _SocketWriter(self.connection)
+ else:
+ self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+ def finish(self):
+ if not self.wfile.closed:
+ try:
+ self.wfile.flush()
+ except socket.error:
+ # A final socket error may have occurred here, such as
+ # the local error ECONNABORTED.
+ pass
+ self.wfile.close()
+ self.rfile.close()
+
+class _SocketWriter(BufferedIOBase):
+ """Simple writable BufferedIOBase implementation for a socket
+
+ Does not hold data in a buffer, avoiding any need to call flush()."""
+
+ def __init__(self, sock):
+ self._sock = sock
+
+ def writable(self):
+ return True
+
+ def write(self, b):
+ self._sock.sendall(b)
+ with memoryview(b) as view:
+ return view.nbytes
+
+ def fileno(self):
+ return self._sock.fileno()
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for datagram sockets."""
+
+ def setup(self):
+ from io import BytesIO
+ self.packet, self.socket = self.request
+ self.rfile = BytesIO(self.packet)
+ self.wfile = BytesIO()
+
+ def finish(self):
+ self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/evalkit_cambrian/lib/python3.10/string.py b/evalkit_cambrian/lib/python3.10/string.py
new file mode 100644
index 0000000000000000000000000000000000000000..489777b10c25df7ea1c53444f0df800022af56b4
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/string.py
@@ -0,0 +1,280 @@
+"""A collection of string constants.
+
+Public module variables:
+
+whitespace -- a string containing all ASCII whitespace
+ascii_lowercase -- a string containing all ASCII lowercase letters
+ascii_uppercase -- a string containing all ASCII uppercase letters
+ascii_letters -- a string containing all ASCII letters
+digits -- a string containing all ASCII decimal digits
+hexdigits -- a string containing all ASCII hexadecimal digits
+octdigits -- a string containing all ASCII octal digits
+punctuation -- a string containing all ASCII punctuation characters
+printable -- a string containing all ASCII characters considered printable
+
+"""
+
+__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords",
+ "digits", "hexdigits", "octdigits", "printable", "punctuation",
+ "whitespace", "Formatter", "Template"]
+
+import _string
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
+ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + ascii_letters + punctuation + whitespace
+
+# Functions which aren't available as string methods.
+
+# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
+def capwords(s, sep=None):
+ """capwords(s [,sep]) -> string
+
+ Split the argument into words using split, capitalize each
+ word using capitalize, and join the capitalized words using
+ join. If the optional second argument sep is absent or None,
+ runs of whitespace characters are replaced by a single space
+ and leading and trailing whitespace are removed, otherwise
+ sep is used to split and join the words.
+
+ """
+ return (sep or ' ').join(x.capitalize() for x in s.split(sep))
+
+
+####################################################################
+import re as _re
+from collections import ChainMap as _ChainMap
+
+_sentinel_dict = {}
+
+class Template:
+ """A string class for supporting $-substitutions."""
+
+ delimiter = '$'
+ # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but
+ # without the ASCII flag. We can't add re.ASCII to flags because of
+ # backward compatibility. So we use the ?a local flag and [a-z] pattern.
+ # See https://bugs.python.org/issue31672
+ idpattern = r'(?a:[_a-z][_a-z0-9]*)'
+ braceidpattern = None
+ flags = _re.IGNORECASE
+
+ def __init_subclass__(cls):
+ super().__init_subclass__()
+ if 'pattern' in cls.__dict__:
+ pattern = cls.pattern
+ else:
+ delim = _re.escape(cls.delimiter)
+ id = cls.idpattern
+ bid = cls.braceidpattern or cls.idpattern
+ pattern = fr"""
+ {delim}(?:
+ (?P{delim}) | # Escape sequence of two delimiters
+ (?P{id}) | # delimiter and a Python identifier
+ {{(?P{bid})}} | # delimiter and a braced identifier
+ (?P) # Other ill-formed delimiter exprs
+ )
+ """
+ cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
+
+ def __init__(self, template):
+ self.template = template
+
+ # Search for $$, $identifier, ${identifier}, and any bare $'s
+
+ def _invalid(self, mo):
+ i = mo.start('invalid')
+ lines = self.template[:i].splitlines(keepends=True)
+ if not lines:
+ colno = 1
+ lineno = 1
+ else:
+ colno = i - len(''.join(lines[:-1]))
+ lineno = len(lines)
+ raise ValueError('Invalid placeholder in string: line %d, col %d' %
+ (lineno, colno))
+
+ def substitute(self, mapping=_sentinel_dict, /, **kws):
+ if mapping is _sentinel_dict:
+ mapping = kws
+ elif kws:
+ mapping = _ChainMap(kws, mapping)
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ return str(mapping[named])
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+ def safe_substitute(self, mapping=_sentinel_dict, /, **kws):
+ if mapping is _sentinel_dict:
+ mapping = kws
+ elif kws:
+ mapping = _ChainMap(kws, mapping)
+ # Helper function for .sub()
+ def convert(mo):
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ try:
+ return str(mapping[named])
+ except KeyError:
+ return mo.group()
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ return mo.group()
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+# Initialize Template.pattern. __init_subclass__() is automatically called
+# only for subclasses, not for the Template class itself.
+Template.__init_subclass__()
+
+
+########################################################################
+# the Formatter class
+# see PEP 3101 for details and purpose of this class
+
+# The hard parts are reused from the C implementation. They're exposed as "_"
+# prefixed methods of str.
+
+# The overall parser is implemented in _string.formatter_parser.
+# The field name parser is implemented in _string.formatter_field_name_split
+
+class Formatter:
+ def format(self, format_string, /, *args, **kwargs):
+ return self.vformat(format_string, args, kwargs)
+
+ def vformat(self, format_string, args, kwargs):
+ used_args = set()
+ result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
+ self.check_unused_args(used_args, args, kwargs)
+ return result
+
+ def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
+ auto_arg_index=0):
+ if recursion_depth < 0:
+ raise ValueError('Max string recursion exceeded')
+ result = []
+ for literal_text, field_name, format_spec, conversion in \
+ self.parse(format_string):
+
+ # output the literal text
+ if literal_text:
+ result.append(literal_text)
+
+ # if there's a field, output it
+ if field_name is not None:
+ # this is some markup, find the object and do
+ # the formatting
+
+ # handle arg indexing when empty field_names are given.
+ if field_name == '':
+ if auto_arg_index is False:
+ raise ValueError('cannot switch from manual field '
+ 'specification to automatic field '
+ 'numbering')
+ field_name = str(auto_arg_index)
+ auto_arg_index += 1
+ elif field_name.isdigit():
+ if auto_arg_index:
+ raise ValueError('cannot switch from manual field '
+ 'specification to automatic field '
+ 'numbering')
+ # disable auto arg incrementing, if it gets
+ # used later on, then an exception will be raised
+ auto_arg_index = False
+
+ # given the field_name, find the object it references
+ # and the argument it came from
+ obj, arg_used = self.get_field(field_name, args, kwargs)
+ used_args.add(arg_used)
+
+ # do any conversion on the resulting object
+ obj = self.convert_field(obj, conversion)
+
+ # expand the format spec, if needed
+ format_spec, auto_arg_index = self._vformat(
+ format_spec, args, kwargs,
+ used_args, recursion_depth-1,
+ auto_arg_index=auto_arg_index)
+
+ # format the object and append to the result
+ result.append(self.format_field(obj, format_spec))
+
+ return ''.join(result), auto_arg_index
+
+
+ def get_value(self, key, args, kwargs):
+ if isinstance(key, int):
+ return args[key]
+ else:
+ return kwargs[key]
+
+
+ def check_unused_args(self, used_args, args, kwargs):
+ pass
+
+
+ def format_field(self, value, format_spec):
+ return format(value, format_spec)
+
+
+ def convert_field(self, value, conversion):
+ # do any conversion on the resulting object
+ if conversion is None:
+ return value
+ elif conversion == 's':
+ return str(value)
+ elif conversion == 'r':
+ return repr(value)
+ elif conversion == 'a':
+ return ascii(value)
+ raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
+
+
+ # returns an iterable that contains tuples of the form:
+ # (literal_text, field_name, format_spec, conversion)
+ # literal_text can be zero length
+ # field_name can be None, in which case there's no
+ # object to format and output
+ # if field_name is not None, it is looked up, formatted
+ # with format_spec and conversion and then used
+ def parse(self, format_string):
+ return _string.formatter_parser(format_string)
+
+
+ # given a field_name, find the object it references.
+ # field_name: the field being looked up, e.g. "0.name"
+ # or "lookup[3]"
+ # used_args: a set of which args have been used
+ # args, kwargs: as passed in to vformat
+ def get_field(self, field_name, args, kwargs):
+ first, rest = _string.formatter_field_name_split(field_name)
+
+ obj = self.get_value(first, args, kwargs)
+
+ # loop through the rest of the field_name, doing
+ # getattr or getitem as needed
+ for is_attr, i in rest:
+ if is_attr:
+ obj = getattr(obj, i)
+ else:
+ obj = obj[i]
+
+ return obj, first
diff --git a/evalkit_cambrian/lib/python3.10/textwrap.py b/evalkit_cambrian/lib/python3.10/textwrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..841de9baecf5d8a497b26d1648081d69a4612e5e
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/textwrap.py
@@ -0,0 +1,494 @@
+"""Text wrapping and filling.
+"""
+
+# Copyright (C) 1999-2001 Gregory P. Ward.
+# Copyright (C) 2002, 2003 Python Software Foundation.
+# Written by Greg Ward
+
+import re
+
+__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']
+
+# Hardcode the recognized whitespace characters to the US-ASCII
+# whitespace characters. The main reason for doing this is that
+# some Unicode spaces (like \u00a0) are non-breaking whitespaces.
+_whitespace = '\t\n\x0b\x0c\r '
+
+class TextWrapper:
+ """
+ Object for wrapping/filling text. The public interface consists of
+ the wrap() and fill() methods; the other methods are just there for
+ subclasses to override in order to tweak the default behaviour.
+ If you want to completely replace the main wrapping algorithm,
+ you'll probably have to override _wrap_chunks().
+
+ Several instance attributes control various aspects of wrapping:
+ width (default: 70)
+ the maximum width of wrapped lines (unless break_long_words
+ is false)
+ initial_indent (default: "")
+ string that will be prepended to the first line of wrapped
+ output. Counts towards the line's width.
+ subsequent_indent (default: "")
+ string that will be prepended to all lines save the first
+ of wrapped output; also counts towards each line's width.
+ expand_tabs (default: true)
+ Expand tabs in input text to spaces before further processing.
+ Each tab will become 0 .. 'tabsize' spaces, depending on its position
+ in its line. If false, each tab is treated as a single character.
+ tabsize (default: 8)
+ Expand tabs in input text to 0 .. 'tabsize' spaces, unless
+ 'expand_tabs' is false.
+ replace_whitespace (default: true)
+ Replace all whitespace characters in the input text by spaces
+ after tab expansion. Note that if expand_tabs is false and
+ replace_whitespace is true, every tab will be converted to a
+ single space!
+ fix_sentence_endings (default: false)
+ Ensure that sentence-ending punctuation is always followed
+ by two spaces. Off by default because the algorithm is
+ (unavoidably) imperfect.
+ break_long_words (default: true)
+ Break words longer than 'width'. If false, those words will not
+ be broken, and some lines might be longer than 'width'.
+ break_on_hyphens (default: true)
+ Allow breaking hyphenated words. If true, wrapping will occur
+ preferably on whitespaces and right after hyphens part of
+ compound words.
+ drop_whitespace (default: true)
+ Drop leading and trailing whitespace from lines.
+ max_lines (default: None)
+ Truncate wrapped lines.
+ placeholder (default: ' [...]')
+ Append to the last line of truncated text.
+ """
+
+ unicode_whitespace_trans = {}
+ uspace = ord(' ')
+ for x in _whitespace:
+ unicode_whitespace_trans[ord(x)] = uspace
+
+ # This funky little regex is just the trick for splitting
+ # text up into word-wrappable chunks. E.g.
+ # "Hello there -- you goof-ball, use the -b option!"
+ # splits into
+ # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
+ # (after stripping out empty strings).
+ word_punct = r'[\w!"\'&.,?]'
+ letter = r'[^\d\W]'
+ whitespace = r'[%s]' % re.escape(_whitespace)
+ nowhitespace = '[^' + whitespace[1:]
+ wordsep_re = re.compile(r'''
+ ( # any whitespace
+ %(ws)s+
+ | # em-dash between words
+ (?<=%(wp)s) -{2,} (?=\w)
+ | # word, possibly hyphenated
+ %(nws)s+? (?:
+ # hyphenated word
+ -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))
+ (?= %(lt)s -? %(lt)s)
+ | # end of word
+ (?=%(ws)s|\Z)
+ | # em-dash
+ (?<=%(wp)s) (?=-{2,}\w)
+ )
+ )''' % {'wp': word_punct, 'lt': letter,
+ 'ws': whitespace, 'nws': nowhitespace},
+ re.VERBOSE)
+ del word_punct, letter, nowhitespace
+
+ # This less funky little regex just split on recognized spaces. E.g.
+ # "Hello there -- you goof-ball, use the -b option!"
+ # splits into
+ # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
+ wordsep_simple_re = re.compile(r'(%s+)' % whitespace)
+ del whitespace
+
+ # XXX this is not locale- or charset-aware -- string.lowercase
+ # is US-ASCII only (and therefore English-only)
+ sentence_end_re = re.compile(r'[a-z]' # lowercase letter
+ r'[\.\!\?]' # sentence-ending punct.
+ r'[\"\']?' # optional end-of-quote
+ r'\Z') # end of chunk
+
+ def __init__(self,
+ width=70,
+ initial_indent="",
+ subsequent_indent="",
+ expand_tabs=True,
+ replace_whitespace=True,
+ fix_sentence_endings=False,
+ break_long_words=True,
+ drop_whitespace=True,
+ break_on_hyphens=True,
+ tabsize=8,
+ *,
+ max_lines=None,
+ placeholder=' [...]'):
+ self.width = width
+ self.initial_indent = initial_indent
+ self.subsequent_indent = subsequent_indent
+ self.expand_tabs = expand_tabs
+ self.replace_whitespace = replace_whitespace
+ self.fix_sentence_endings = fix_sentence_endings
+ self.break_long_words = break_long_words
+ self.drop_whitespace = drop_whitespace
+ self.break_on_hyphens = break_on_hyphens
+ self.tabsize = tabsize
+ self.max_lines = max_lines
+ self.placeholder = placeholder
+
+
+ # -- Private methods -----------------------------------------------
+ # (possibly useful for subclasses to override)
+
+ def _munge_whitespace(self, text):
+ """_munge_whitespace(text : string) -> string
+
+ Munge whitespace in text: expand tabs and convert all other
+ whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
+ becomes " foo bar baz".
+ """
+ if self.expand_tabs:
+ text = text.expandtabs(self.tabsize)
+ if self.replace_whitespace:
+ text = text.translate(self.unicode_whitespace_trans)
+ return text
+
+
+ def _split(self, text):
+ """_split(text : string) -> [string]
+
+ Split the text to wrap into indivisible chunks. Chunks are
+ not quite the same as words; see _wrap_chunks() for full
+ details. As an example, the text
+ Look, goof-ball -- use the -b option!
+ breaks into the following chunks:
+ 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
+ 'use', ' ', 'the', ' ', '-b', ' ', 'option!'
+ if break_on_hyphens is True, or in:
+ 'Look,', ' ', 'goof-ball', ' ', '--', ' ',
+ 'use', ' ', 'the', ' ', '-b', ' ', option!'
+ otherwise.
+ """
+ if self.break_on_hyphens is True:
+ chunks = self.wordsep_re.split(text)
+ else:
+ chunks = self.wordsep_simple_re.split(text)
+ chunks = [c for c in chunks if c]
+ return chunks
+
+ def _fix_sentence_endings(self, chunks):
+ """_fix_sentence_endings(chunks : [string])
+
+ Correct for sentence endings buried in 'chunks'. Eg. when the
+ original text contains "... foo.\\nBar ...", munge_whitespace()
+ and split() will convert that to [..., "foo.", " ", "Bar", ...]
+ which has one too few spaces; this method simply changes the one
+ space to two.
+ """
+ i = 0
+ patsearch = self.sentence_end_re.search
+ while i < len(chunks)-1:
+ if chunks[i+1] == " " and patsearch(chunks[i]):
+ chunks[i+1] = " "
+ i += 2
+ else:
+ i += 1
+
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ """_handle_long_word(chunks : [string],
+ cur_line : [string],
+ cur_len : int, width : int)
+
+ Handle a chunk of text (most likely a word, not whitespace) that
+ is too long to fit in any line.
+ """
+ # Figure out when indent is larger than the specified width, and make
+ # sure at least one character is stripped off on every pass
+ if width < 1:
+ space_left = 1
+ else:
+ space_left = width - cur_len
+
+ # If we're allowed to break long words, then do so: put as much
+ # of the next chunk onto the current line as will fit.
+ if self.break_long_words:
+ end = space_left
+ chunk = reversed_chunks[-1]
+ if self.break_on_hyphens and len(chunk) > space_left:
+ # break after last hyphen, but only if there are
+ # non-hyphens before it
+ hyphen = chunk.rfind('-', 0, space_left)
+ if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]):
+ end = hyphen + 1
+ cur_line.append(chunk[:end])
+ reversed_chunks[-1] = chunk[end:]
+
+ # Otherwise, we have to preserve the long word intact. Only add
+ # it to the current line if there's nothing already there --
+ # that minimizes how much we violate the width constraint.
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ # If we're not allowed to break long words, and there's already
+ # text on the current line, do nothing. Next time through the
+ # main loop of _wrap_chunks(), we'll wind up here again, but
+ # cur_len will be zero, so the next line will be entirely
+ # devoted to the long word that we can't handle right now.
+
+ def _wrap_chunks(self, chunks):
+ """_wrap_chunks(chunks : [string]) -> [string]
+
+ Wrap a sequence of text chunks and return a list of lines of
+ length 'self.width' or less. (If 'break_long_words' is false,
+ some lines may be longer than this.) Chunks correspond roughly
+ to words and the whitespace between them: each chunk is
+ indivisible (modulo 'break_long_words'), but a line break can
+ come between any two chunks. Chunks should not have internal
+ whitespace; ie. a chunk is either all whitespace or a "word".
+ Whitespace chunks will be removed from the beginning and end of
+ lines, but apart from that whitespace is preserved.
+ """
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+ if self.max_lines is not None:
+ if self.max_lines > 1:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+ if len(indent) + len(self.placeholder.lstrip()) > self.width:
+ raise ValueError("placeholder too large for max width")
+
+ # Arrange in reverse order so items can be efficiently popped
+ # from a stack of chucks.
+ chunks.reverse()
+
+ while chunks:
+
+ # Start the list of chunks that will make up the current line.
+ # cur_len is just the length of all the chunks in cur_line.
+ cur_line = []
+ cur_len = 0
+
+ # Figure out which static string will prefix this line.
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ # Maximum width for this line.
+ width = self.width - len(indent)
+
+ # First chunk on line is whitespace -- drop it, unless this
+ # is the very beginning of the text (ie. no lines started yet).
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+ del chunks[-1]
+
+ while chunks:
+ l = len(chunks[-1])
+
+ # Can at least squeeze this chunk onto the current line.
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop())
+ cur_len += l
+
+ # Nope, this line is full.
+ else:
+ break
+
+ # The current line is full, and the next chunk is too big to
+ # fit on *any* line (not just this one).
+ if chunks and len(chunks[-1]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+ cur_len = sum(map(len, cur_line))
+
+ # If the last chunk on this line is all whitespace, drop it.
+ if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
+ cur_len -= len(cur_line[-1])
+ del cur_line[-1]
+
+ if cur_line:
+ if (self.max_lines is None or
+ len(lines) + 1 < self.max_lines or
+ (not chunks or
+ self.drop_whitespace and
+ len(chunks) == 1 and
+ not chunks[0].strip()) and cur_len <= width):
+ # Convert current line back to a string and store it in
+ # list of all lines (return value).
+ lines.append(indent + ''.join(cur_line))
+ else:
+ while cur_line:
+ if (cur_line[-1].strip() and
+ cur_len + len(self.placeholder) <= width):
+ cur_line.append(self.placeholder)
+ lines.append(indent + ''.join(cur_line))
+ break
+ cur_len -= len(cur_line[-1])
+ del cur_line[-1]
+ else:
+ if lines:
+ prev_line = lines[-1].rstrip()
+ if (len(prev_line) + len(self.placeholder) <=
+ self.width):
+ lines[-1] = prev_line + self.placeholder
+ break
+ lines.append(indent + self.placeholder.lstrip())
+ break
+
+ return lines
+
+ def _split_chunks(self, text):
+ text = self._munge_whitespace(text)
+ return self._split(text)
+
+ # -- Public interface ----------------------------------------------
+
+ def wrap(self, text):
+ """wrap(text : string) -> [string]
+
+ Reformat the single paragraph in 'text' so it fits in lines of
+ no more than 'self.width' columns, and return a list of wrapped
+ lines. Tabs in 'text' are expanded with string.expandtabs(),
+ and all other whitespace characters (including newline) are
+ converted to space.
+ """
+ chunks = self._split_chunks(text)
+ if self.fix_sentence_endings:
+ self._fix_sentence_endings(chunks)
+ return self._wrap_chunks(chunks)
+
+ def fill(self, text):
+ """fill(text : string) -> string
+
+ Reformat the single paragraph in 'text' to fit in lines of no
+ more than 'self.width' columns, and return a new string
+ containing the entire wrapped paragraph.
+ """
+ return "\n".join(self.wrap(text))
+
+
+# -- Convenience interface ---------------------------------------------
+
+def wrap(text, width=70, **kwargs):
+ """Wrap a single paragraph of text, returning a list of wrapped lines.
+
+ Reformat the single paragraph in 'text' so it fits in lines of no
+ more than 'width' columns, and return a list of wrapped lines. By
+ default, tabs in 'text' are expanded with string.expandtabs(), and
+ all other whitespace characters (including newline) are converted to
+ space. See TextWrapper class for available keyword args to customize
+ wrapping behaviour.
+ """
+ w = TextWrapper(width=width, **kwargs)
+ return w.wrap(text)
+
+def fill(text, width=70, **kwargs):
+ """Fill a single paragraph of text, returning a new string.
+
+ Reformat the single paragraph in 'text' to fit in lines of no more
+ than 'width' columns, and return a new string containing the entire
+ wrapped paragraph. As with wrap(), tabs are expanded and other
+ whitespace characters converted to space. See TextWrapper class for
+ available keyword args to customize wrapping behaviour.
+ """
+ w = TextWrapper(width=width, **kwargs)
+ return w.fill(text)
+
+def shorten(text, width, **kwargs):
+ """Collapse and truncate the given text to fit in the given width.
+
+ The text first has its whitespace collapsed. If it then fits in
+ the *width*, it is returned as is. Otherwise, as many words
+ as possible are joined and then the placeholder is appended::
+
+ >>> textwrap.shorten("Hello world!", width=12)
+ 'Hello world!'
+ >>> textwrap.shorten("Hello world!", width=11)
+ 'Hello [...]'
+ """
+ w = TextWrapper(width=width, max_lines=1, **kwargs)
+ return w.fill(' '.join(text.strip().split()))
+
+
+# -- Loosely related functionality -------------------------------------
+
+_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
+_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
+
+def dedent(text):
+ """Remove any common leading whitespace from every line in `text`.
+
+ This can be used to make triple-quoted strings line up with the left
+ edge of the display, while still presenting them in the source code
+ in indented form.
+
+ Note that tabs and spaces are both treated as whitespace, but they
+ are not equal: the lines " hello" and "\\thello" are
+ considered to have no common leading whitespace.
+
+ Entirely blank lines are normalized to a newline character.
+ """
+ # Look for the longest leading string of spaces and tabs common to
+ # all lines.
+ margin = None
+ text = _whitespace_only_re.sub('', text)
+ indents = _leading_whitespace_re.findall(text)
+ for indent in indents:
+ if margin is None:
+ margin = indent
+
+ # Current line more deeply indented than previous winner:
+ # no change (previous winner is still on top).
+ elif indent.startswith(margin):
+ pass
+
+ # Current line consistent with and no deeper than previous winner:
+ # it's the new winner.
+ elif margin.startswith(indent):
+ margin = indent
+
+ # Find the largest common whitespace between current line and previous
+ # winner.
+ else:
+ for i, (x, y) in enumerate(zip(margin, indent)):
+ if x != y:
+ margin = margin[:i]
+ break
+
+ # sanity check (testing/debugging only)
+ if 0 and margin:
+ for line in text.split("\n"):
+ assert not line or line.startswith(margin), \
+ "line = %r, margin = %r" % (line, margin)
+
+ if margin:
+ text = re.sub(r'(?m)^' + margin, '', text)
+ return text
+
+
+def indent(text, prefix, predicate=None):
+ """Adds 'prefix' to the beginning of selected lines in 'text'.
+
+ If 'predicate' is provided, 'prefix' will only be added to the lines
+ where 'predicate(line)' is True. If 'predicate' is not provided,
+ it will default to adding 'prefix' to all non-empty lines that do not
+ consist solely of whitespace characters.
+ """
+ if predicate is None:
+ def predicate(line):
+ return line.strip()
+
+ def prefixed_lines():
+ for line in text.splitlines(True):
+ yield (prefix + line if predicate(line) else line)
+ return ''.join(prefixed_lines())
+
+
+if __name__ == "__main__":
+ #print dedent("\tfoo\n\tbar")
+ #print dedent(" \thello there\n \t how are you?")
+ print(dedent("Hello there.\n This is indented."))
diff --git a/evalkit_cambrian/lib/python3.10/this.py b/evalkit_cambrian/lib/python3.10/this.py
new file mode 100644
index 0000000000000000000000000000000000000000..e68dd3ff39b04ff857420b98889bee590b344024
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/this.py
@@ -0,0 +1,28 @@
+s = """Gur Mra bs Clguba, ol Gvz Crgref
+
+Ornhgvshy vf orggre guna htyl.
+Rkcyvpvg vf orggre guna vzcyvpvg.
+Fvzcyr vf orggre guna pbzcyrk.
+Pbzcyrk vf orggre guna pbzcyvpngrq.
+Syng vf orggre guna arfgrq.
+Fcnefr vf orggre guna qrafr.
+Ernqnovyvgl pbhagf.
+Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
+Nygubhtu cenpgvpnyvgl orngf chevgl.
+Reebef fubhyq arire cnff fvyragyl.
+Hayrff rkcyvpvgyl fvyraprq.
+Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
+Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
+Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
+Abj vf orggre guna arire.
+Nygubhtu arire vf bsgra orggre guna *evtug* abj.
+Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
+Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
+Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
+
+d = {}
+for c in (65, 97):
+ for i in range(26):
+ d[chr(i+c)] = chr((i+13) % 26 + c)
+
+print("".join([d.get(c, c) for c in s]))
diff --git a/evalkit_cambrian/lib/python3.10/token.py b/evalkit_cambrian/lib/python3.10/token.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d0c0bf0fb0368e15170336a0c60b17f5fa4da40
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/token.py
@@ -0,0 +1,137 @@
+"""Token constants."""
+# Auto-generated by Tools/scripts/generate_token.py
+
+__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
+
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+LBRACE = 25
+RBRACE = 26
+EQEQUAL = 27
+NOTEQUAL = 28
+LESSEQUAL = 29
+GREATEREQUAL = 30
+TILDE = 31
+CIRCUMFLEX = 32
+LEFTSHIFT = 33
+RIGHTSHIFT = 34
+DOUBLESTAR = 35
+PLUSEQUAL = 36
+MINEQUAL = 37
+STAREQUAL = 38
+SLASHEQUAL = 39
+PERCENTEQUAL = 40
+AMPEREQUAL = 41
+VBAREQUAL = 42
+CIRCUMFLEXEQUAL = 43
+LEFTSHIFTEQUAL = 44
+RIGHTSHIFTEQUAL = 45
+DOUBLESTAREQUAL = 46
+DOUBLESLASH = 47
+DOUBLESLASHEQUAL = 48
+AT = 49
+ATEQUAL = 50
+RARROW = 51
+ELLIPSIS = 52
+COLONEQUAL = 53
+OP = 54
+AWAIT = 55
+ASYNC = 56
+TYPE_IGNORE = 57
+TYPE_COMMENT = 58
+SOFT_KEYWORD = 59
+# These aren't used by the C tokenizer but are needed for tokenize.py
+ERRORTOKEN = 60
+COMMENT = 61
+NL = 62
+ENCODING = 63
+N_TOKENS = 64
+# Special definitions for cooperation with parser
+NT_OFFSET = 256
+
+tok_name = {value: name
+ for name, value in globals().items()
+ if isinstance(value, int) and not name.startswith('_')}
+__all__.extend(tok_name.values())
+
+EXACT_TOKEN_TYPES = {
+ '!=': NOTEQUAL,
+ '%': PERCENT,
+ '%=': PERCENTEQUAL,
+ '&': AMPER,
+ '&=': AMPEREQUAL,
+ '(': LPAR,
+ ')': RPAR,
+ '*': STAR,
+ '**': DOUBLESTAR,
+ '**=': DOUBLESTAREQUAL,
+ '*=': STAREQUAL,
+ '+': PLUS,
+ '+=': PLUSEQUAL,
+ ',': COMMA,
+ '-': MINUS,
+ '-=': MINEQUAL,
+ '->': RARROW,
+ '.': DOT,
+ '...': ELLIPSIS,
+ '/': SLASH,
+ '//': DOUBLESLASH,
+ '//=': DOUBLESLASHEQUAL,
+ '/=': SLASHEQUAL,
+ ':': COLON,
+ ':=': COLONEQUAL,
+ ';': SEMI,
+ '<': LESS,
+ '<<': LEFTSHIFT,
+ '<<=': LEFTSHIFTEQUAL,
+ '<=': LESSEQUAL,
+ '=': EQUAL,
+ '==': EQEQUAL,
+ '>': GREATER,
+ '>=': GREATEREQUAL,
+ '>>': RIGHTSHIFT,
+ '>>=': RIGHTSHIFTEQUAL,
+ '@': AT,
+ '@=': ATEQUAL,
+ '[': LSQB,
+ ']': RSQB,
+ '^': CIRCUMFLEX,
+ '^=': CIRCUMFLEXEQUAL,
+ '{': LBRACE,
+ '|': VBAR,
+ '|=': VBAREQUAL,
+ '}': RBRACE,
+ '~': TILDE,
+}
+
+def ISTERMINAL(x):
+ return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+ return x >= NT_OFFSET
+
+def ISEOF(x):
+ return x == ENDMARKER
diff --git a/evalkit_cambrian/lib/python3.10/turtle.py b/evalkit_cambrian/lib/python3.10/turtle.py
new file mode 100644
index 0000000000000000000000000000000000000000..d287c15543528a8c24609ea1ef7b479ffc82d35f
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/turtle.py
@@ -0,0 +1,4141 @@
+#
+# turtle.py: a Tkinter based turtle graphics module for Python
+# Version 1.1b - 4. 5. 2009
+#
+# Copyright (C) 2006 - 2010 Gregor Lingl
+# email: glingl@aon.at
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+
+"""
+Turtle graphics is a popular way for introducing programming to
+kids. It was part of the original Logo programming language developed
+by Wally Feurzig and Seymour Papert in 1966.
+
+Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it
+the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
+the direction it is facing, drawing a line as it moves. Give it the
+command turtle.right(25), and it rotates in-place 25 degrees clockwise.
+
+By combining together these and similar commands, intricate shapes and
+pictures can easily be drawn.
+
+----- turtle.py
+
+This module is an extended reimplementation of turtle.py from the
+Python standard distribution up to Python 2.5. (See: https://www.python.org)
+
+It tries to keep the merits of turtle.py and to be (nearly) 100%
+compatible with it. This means in the first place to enable the
+learning programmer to use all the commands, classes and methods
+interactively when using the module from within IDLE run with
+the -n switch.
+
+Roughly it has the following features added:
+
+- Better animation of the turtle movements, especially of turning the
+ turtle. So the turtles can more easily be used as a visual feedback
+ instrument by the (beginning) programmer.
+
+- Different turtle shapes, gif-images as turtle shapes, user defined
+ and user controllable turtle shapes, among them compound
+ (multicolored) shapes. Turtle shapes can be stretched and tilted, which
+ makes turtles very versatile geometrical objects.
+
+- Fine control over turtle movement and screen updates via delay(),
+ and enhanced tracer() and speed() methods.
+
+- Aliases for the most commonly used commands, like fd for forward etc.,
+ following the early Logo traditions. This reduces the boring work of
+ typing long sequences of commands, which often occur in a natural way
+ when kids try to program fancy pictures on their first encounter with
+ turtle graphics.
+
+- Turtles now have an undo()-method with configurable undo-buffer.
+
+- Some simple commands/methods for creating event driven programs
+ (mouse-, key-, timer-events). Especially useful for programming games.
+
+- A scrollable Canvas class. The default scrollable Canvas can be
+ extended interactively as needed while playing around with the turtle(s).
+
+- A TurtleScreen class with methods controlling background color or
+ background image, window and canvas size and other properties of the
+ TurtleScreen.
+
+- There is a method, setworldcoordinates(), to install a user defined
+ coordinate-system for the TurtleScreen.
+
+- The implementation uses a 2-vector class named Vec2D, derived from tuple.
+ This class is public, so it can be imported by the application programmer,
+ which makes certain types of computations very natural and compact.
+
+- Appearance of the TurtleScreen and the Turtles at startup/import can be
+ configured by means of a turtle.cfg configuration file.
+ The default configuration mimics the appearance of the old turtle module.
+
+- If configured appropriately the module reads in docstrings from a docstring
+ dictionary in some different language, supplied separately and replaces
+ the English ones by those read in. There is a utility function
+ write_docstringdict() to write a dictionary with the original (English)
+ docstrings to disc, so it can serve as a template for translations.
+
+Behind the scenes there are some features included with possible
+extensions in mind. These will be commented and documented elsewhere.
+
+"""
+
+_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009"
+
+# print(_ver)
+
+import tkinter as TK
+import types
+import math
+import time
+import inspect
+import sys
+
+from os.path import isfile, split, join
+from copy import deepcopy
+from tkinter import simpledialog
+
+_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
+ 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
+_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
+ 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
+ 'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
+ 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
+ 'register_shape', 'resetscreen', 'screensize', 'setup',
+ 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
+ 'window_height', 'window_width']
+_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
+ 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
+ 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
+ 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
+ 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
+ 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
+ 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
+ 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
+ 'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
+ 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
+ 'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
+ 'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
+ 'write', 'xcor', 'ycor']
+_tg_utilities = ['write_docstringdict', 'done']
+
+__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
+ _tg_utilities + ['Terminator']) # + _math_functions)
+
+_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
+ 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
+ 'turtlesize', 'up', 'width']
+
+_CFG = {"width" : 0.5, # Screen
+ "height" : 0.75,
+ "canvwidth" : 400,
+ "canvheight": 300,
+ "leftright": None,
+ "topbottom": None,
+ "mode": "standard", # TurtleScreen
+ "colormode": 1.0,
+ "delay": 10,
+ "undobuffersize": 1000, # RawTurtle
+ "shape": "classic",
+ "pencolor" : "black",
+ "fillcolor" : "black",
+ "resizemode" : "noresize",
+ "visible" : True,
+ "language": "english", # docstrings
+ "exampleturtle": "turtle",
+ "examplescreen": "screen",
+ "title": "Python Turtle Graphics",
+ "using_IDLE": False
+ }
+
+def config_dict(filename):
+ """Convert content of config-file into dictionary."""
+ with open(filename, "r") as f:
+ cfglines = f.readlines()
+ cfgdict = {}
+ for line in cfglines:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ try:
+ key, value = line.split("=")
+ except ValueError:
+ print("Bad line in config-file %s:\n%s" % (filename,line))
+ continue
+ key = key.strip()
+ value = value.strip()
+ if value in ["True", "False", "None", "''", '""']:
+ value = eval(value)
+ else:
+ try:
+ if "." in value:
+ value = float(value)
+ else:
+ value = int(value)
+ except ValueError:
+ pass # value need not be converted
+ cfgdict[key] = value
+ return cfgdict
+
+def readconfig(cfgdict):
+ """Read config-files, change configuration-dict accordingly.
+
+ If there is a turtle.cfg file in the current working directory,
+ read it from there. If this contains an importconfig-value,
+ say 'myway', construct filename turtle_mayway.cfg else use
+ turtle.cfg and read it from the import-directory, where
+ turtle.py is located.
+ Update configuration dictionary first according to config-file,
+ in the import directory, then according to config-file in the
+ current working directory.
+ If no config-file is found, the default configuration is used.
+ """
+ default_cfg = "turtle.cfg"
+ cfgdict1 = {}
+ cfgdict2 = {}
+ if isfile(default_cfg):
+ cfgdict1 = config_dict(default_cfg)
+ if "importconfig" in cfgdict1:
+ default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
+ try:
+ head, tail = split(__file__)
+ cfg_file2 = join(head, default_cfg)
+ except Exception:
+ cfg_file2 = ""
+ if isfile(cfg_file2):
+ cfgdict2 = config_dict(cfg_file2)
+ _CFG.update(cfgdict2)
+ _CFG.update(cfgdict1)
+
+try:
+ readconfig(_CFG)
+except Exception:
+ print ("No configfile read, reason unknown")
+
+
+class Vec2D(tuple):
+ """A 2 dimensional vector class, used as a helper class
+ for implementing turtle graphics.
+ May be useful for turtle graphics programs also.
+ Derived from tuple, so a vector is a tuple!
+
+ Provides (for a, b vectors, k number):
+ a+b vector addition
+ a-b vector subtraction
+ a*b inner product
+ k*a and a*k multiplication with scalar
+ |a| absolute value of a
+ a.rotate(angle) rotation
+ """
+ def __new__(cls, x, y):
+ return tuple.__new__(cls, (x, y))
+ def __add__(self, other):
+ return Vec2D(self[0]+other[0], self[1]+other[1])
+ def __mul__(self, other):
+ if isinstance(other, Vec2D):
+ return self[0]*other[0]+self[1]*other[1]
+ return Vec2D(self[0]*other, self[1]*other)
+ def __rmul__(self, other):
+ if isinstance(other, int) or isinstance(other, float):
+ return Vec2D(self[0]*other, self[1]*other)
+ return NotImplemented
+ def __sub__(self, other):
+ return Vec2D(self[0]-other[0], self[1]-other[1])
+ def __neg__(self):
+ return Vec2D(-self[0], -self[1])
+ def __abs__(self):
+ return math.hypot(*self)
+ def rotate(self, angle):
+ """rotate self counterclockwise by angle
+ """
+ perp = Vec2D(-self[1], self[0])
+ angle = math.radians(angle)
+ c, s = math.cos(angle), math.sin(angle)
+ return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
+ def __getnewargs__(self):
+ return (self[0], self[1])
+ def __repr__(self):
+ return "(%.2f,%.2f)" % self
+
+
+##############################################################################
+### From here up to line : Tkinter - Interface for turtle.py ###
+### May be replaced by an interface to some different graphics toolkit ###
+##############################################################################
+
+## helper functions for Scrolled Canvas, to forward Canvas-methods
+## to ScrolledCanvas class
+
+def __methodDict(cls, _dict):
+ """helper function for Scrolled Canvas"""
+ baseList = list(cls.__bases__)
+ baseList.reverse()
+ for _super in baseList:
+ __methodDict(_super, _dict)
+ for key, value in cls.__dict__.items():
+ if type(value) == types.FunctionType:
+ _dict[key] = value
+
+def __methods(cls):
+ """helper function for Scrolled Canvas"""
+ _dict = {}
+ __methodDict(cls, _dict)
+ return _dict.keys()
+
+__stringBody = (
+ 'def %(method)s(self, *args, **kw): return ' +
+ 'self.%(attribute)s.%(method)s(*args, **kw)')
+
+def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
+ ### MANY CHANGES ###
+ _dict_1 = {}
+ __methodDict(toClass, _dict_1)
+ _dict = {}
+ mfc = __methods(fromClass)
+ for ex in _dict_1.keys():
+ if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc:
+ pass
+ else:
+ _dict[ex] = _dict_1[ex]
+
+ for method, func in _dict.items():
+ d = {'method': method, 'func': func}
+ if isinstance(toPart, str):
+ execString = \
+ __stringBody % {'method' : method, 'attribute' : toPart}
+ exec(execString, d)
+ setattr(fromClass, method, d[method]) ### NEWU!
+
+
+class ScrolledCanvas(TK.Frame):
+ """Modeled after the scrolled canvas class from Grayons's Tkinter book.
+
+ Used as the default canvas, which pops up automatically when
+ using turtle graphics functions or the Turtle class.
+ """
+ def __init__(self, master, width=500, height=350,
+ canvwidth=600, canvheight=500):
+ TK.Frame.__init__(self, master, width=width, height=height)
+ self._rootwindow = self.winfo_toplevel()
+ self.width, self.height = width, height
+ self.canvwidth, self.canvheight = canvwidth, canvheight
+ self.bg = "white"
+ self._canvas = TK.Canvas(master, width=width, height=height,
+ bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
+ self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
+ orient=TK.HORIZONTAL)
+ self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
+ self._canvas.configure(xscrollcommand=self.hscroll.set,
+ yscrollcommand=self.vscroll.set)
+ self.rowconfigure(0, weight=1, minsize=0)
+ self.columnconfigure(0, weight=1, minsize=0)
+ self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
+ column=1, rowspan=1, columnspan=1, sticky='news')
+ self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.reset()
+ self._rootwindow.bind('', self.onResize)
+
+ def reset(self, canvwidth=None, canvheight=None, bg = None):
+ """Adjust canvas and scrollbars according to given canvas size."""
+ if canvwidth:
+ self.canvwidth = canvwidth
+ if canvheight:
+ self.canvheight = canvheight
+ if bg:
+ self.bg = bg
+ self._canvas.config(bg=bg,
+ scrollregion=(-self.canvwidth//2, -self.canvheight//2,
+ self.canvwidth//2, self.canvheight//2))
+ self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
+ self.canvwidth)
+ self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
+ self.canvheight)
+ self.adjustScrolls()
+
+
+ def adjustScrolls(self):
+ """ Adjust scrollbars according to window- and canvas-size.
+ """
+ cwidth = self._canvas.winfo_width()
+ cheight = self._canvas.winfo_height()
+ self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
+ self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
+ if cwidth < self.canvwidth or cheight < self.canvheight:
+ self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
+ column=1, rowspan=1, columnspan=1, sticky='news')
+ else:
+ self.hscroll.grid_forget()
+ self.vscroll.grid_forget()
+
+ def onResize(self, event):
+ """self-explanatory"""
+ self.adjustScrolls()
+
+ def bbox(self, *args):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ return self._canvas.bbox(*args)
+
+ def cget(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ return self._canvas.cget(*args, **kwargs)
+
+ def config(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.config(*args, **kwargs)
+
+ def bind(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.bind(*args, **kwargs)
+
+ def unbind(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.unbind(*args, **kwargs)
+
+ def focus_force(self):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.focus_force()
+
+__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
+
+
+class _Root(TK.Tk):
+ """Root class for Screen based on Tkinter."""
+ def __init__(self):
+ TK.Tk.__init__(self)
+
+ def setupcanvas(self, width, height, cwidth, cheight):
+ self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
+ self._canvas.pack(expand=1, fill="both")
+
+ def _getcanvas(self):
+ return self._canvas
+
+ def set_geometry(self, width, height, startx, starty):
+ self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
+
+ def ondestroy(self, destroy):
+ self.wm_protocol("WM_DELETE_WINDOW", destroy)
+
+ def win_width(self):
+ return self.winfo_screenwidth()
+
+ def win_height(self):
+ return self.winfo_screenheight()
+
+Canvas = TK.Canvas
+
+
+class TurtleScreenBase(object):
+ """Provide the basic graphics functionality.
+ Interface between Tkinter and turtle.py.
+
+ To port turtle.py to some different graphics toolkit
+ a corresponding TurtleScreenBase class has to be implemented.
+ """
+
+ def _blankimage(self):
+ """return a blank image object
+ """
+ img = TK.PhotoImage(width=1, height=1, master=self.cv)
+ img.blank()
+ return img
+
+ def _image(self, filename):
+ """return an image object containing the
+ imagedata from a gif-file named filename.
+ """
+ return TK.PhotoImage(file=filename, master=self.cv)
+
+ def __init__(self, cv):
+ self.cv = cv
+ if isinstance(cv, ScrolledCanvas):
+ w = self.cv.canvwidth
+ h = self.cv.canvheight
+ else: # expected: ordinary TK.Canvas
+ w = int(self.cv.cget("width"))
+ h = int(self.cv.cget("height"))
+ self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
+ self.canvwidth = w
+ self.canvheight = h
+ self.xscale = self.yscale = 1.0
+
+ def _createpoly(self):
+ """Create an invisible polygon item on canvas self.cv)
+ """
+ return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
+
+ def _drawpoly(self, polyitem, coordlist, fill=None,
+ outline=None, width=None, top=False):
+ """Configure polygonitem polyitem according to provided
+ arguments:
+ coordlist is sequence of coordinates
+ fill is filling color
+ outline is outline color
+ top is a boolean value, which specifies if polyitem
+ will be put on top of the canvas' displaylist so it
+ will not be covered by other items.
+ """
+ cl = []
+ for x, y in coordlist:
+ cl.append(x * self.xscale)
+ cl.append(-y * self.yscale)
+ self.cv.coords(polyitem, *cl)
+ if fill is not None:
+ self.cv.itemconfigure(polyitem, fill=fill)
+ if outline is not None:
+ self.cv.itemconfigure(polyitem, outline=outline)
+ if width is not None:
+ self.cv.itemconfigure(polyitem, width=width)
+ if top:
+ self.cv.tag_raise(polyitem)
+
+ def _createline(self):
+ """Create an invisible line item on canvas self.cv)
+ """
+ return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
+ capstyle = TK.ROUND)
+
+ def _drawline(self, lineitem, coordlist=None,
+ fill=None, width=None, top=False):
+ """Configure lineitem according to provided arguments:
+ coordlist is sequence of coordinates
+ fill is drawing color
+ width is width of drawn line.
+ top is a boolean value, which specifies if polyitem
+ will be put on top of the canvas' displaylist so it
+ will not be covered by other items.
+ """
+ if coordlist is not None:
+ cl = []
+ for x, y in coordlist:
+ cl.append(x * self.xscale)
+ cl.append(-y * self.yscale)
+ self.cv.coords(lineitem, *cl)
+ if fill is not None:
+ self.cv.itemconfigure(lineitem, fill=fill)
+ if width is not None:
+ self.cv.itemconfigure(lineitem, width=width)
+ if top:
+ self.cv.tag_raise(lineitem)
+
+ def _delete(self, item):
+ """Delete graphics item from canvas.
+ If item is"all" delete all graphics items.
+ """
+ self.cv.delete(item)
+
+ def _update(self):
+ """Redraw graphics items on canvas
+ """
+ self.cv.update()
+
+ def _delay(self, delay):
+ """Delay subsequent canvas actions for delay ms."""
+ self.cv.after(delay)
+
+ def _iscolorstring(self, color):
+ """Check if the string color is a legal Tkinter color string.
+ """
+ try:
+ rgb = self.cv.winfo_rgb(color)
+ ok = True
+ except TK.TclError:
+ ok = False
+ return ok
+
+ def _bgcolor(self, color=None):
+ """Set canvas' backgroundcolor if color is not None,
+ else return backgroundcolor."""
+ if color is not None:
+ self.cv.config(bg = color)
+ self._update()
+ else:
+ return self.cv.cget("bg")
+
+ def _write(self, pos, txt, align, font, pencolor):
+ """Write txt at pos in canvas with specified font
+ and color.
+ Return text item and x-coord of right bottom corner
+ of text's bounding box."""
+ x, y = pos
+ x = x * self.xscale
+ y = y * self.yscale
+ anchor = {"left":"sw", "center":"s", "right":"se" }
+ item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
+ fill = pencolor, font = font)
+ x0, y0, x1, y1 = self.cv.bbox(item)
+ return item, x1-1
+
+## def _dot(self, pos, size, color):
+## """may be implemented for some other graphics toolkit"""
+
+ def _onclick(self, item, fun, num=1, add=None):
+ """Bind fun to mouse-click event on turtle.
+ fun must be a function with two arguments, the coordinates
+ of the clicked point on the canvas.
+ num, the number of the mouse-button defaults to 1
+ """
+ if fun is None:
+ self.cv.tag_unbind(item, "" % num)
+ else:
+ def eventfun(event):
+ x, y = (self.cv.canvasx(event.x)/self.xscale,
+ -self.cv.canvasy(event.y)/self.yscale)
+ fun(x, y)
+ self.cv.tag_bind(item, "" % num, eventfun, add)
+
+ def _onrelease(self, item, fun, num=1, add=None):
+ """Bind fun to mouse-button-release event on turtle.
+ fun must be a function with two arguments, the coordinates
+ of the point on the canvas where mouse button is released.
+ num, the number of the mouse-button defaults to 1
+
+ If a turtle is clicked, first _onclick-event will be performed,
+ then _onscreensclick-event.
+ """
+ if fun is None:
+ self.cv.tag_unbind(item, "