diff --git a/.gitattributes b/.gitattributes index 225eb77511fdc00b79e181cc69c3b04bd9cca115..1cf55e55b97f98d9364059d6690836ca97ae9468 100644 --- a/.gitattributes +++ b/.gitattributes @@ -259,3 +259,10 @@ evalkit_cambrian/lib/libtinfo.so.6 filter=lfs diff=lfs merge=lfs -text evalkit_cambrian/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text evalkit_cambrian/bin/unxz filter=lfs diff=lfs merge=lfs -text evalkit_cambrian/lib/libtinfo.so filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/libssl.so filter=lfs diff=lfs merge=lfs -text +evalkit_llava/lib/python3.10/lib-dynload/_decimal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/libz.so.1.2.13 filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/bin/xz filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_cambrian/bin/xz b/evalkit_cambrian/bin/xz new file mode 100644 index 0000000000000000000000000000000000000000..8dd9f2116d6a8588f795dfc3e1b583fdc1f7c0c3 --- /dev/null +++ b/evalkit_cambrian/bin/xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a127c4faacf3bcd0d328c3eb0cc7096176e28a0f61306173489f619827c98276 +size 108336 diff --git a/evalkit_cambrian/lib/libssl.so b/evalkit_cambrian/lib/libssl.so new file mode 100644 index 0000000000000000000000000000000000000000..53b6e92c255c102b29b845c3cc906034d8461529 --- /dev/null +++ b/evalkit_cambrian/lib/libssl.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3480c91df4e0c1a33514955568641405e37924f680e8ba42f494a209640516c6 +size 775712 diff --git a/evalkit_cambrian/lib/libz.so.1.2.13 b/evalkit_cambrian/lib/libz.so.1.2.13 new file mode 100644 index 0000000000000000000000000000000000000000..64cd6a309bad00dc38040a577191f6681e18bf89 --- /dev/null +++ b/evalkit_cambrian/lib/libz.so.1.2.13 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914 +size 124744 diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/bdist_dumb.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/bdist_dumb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e74082c221b5ef54152324398e03b50b5767ecb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/bdist_dumb.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/build_clib.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/build_clib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ee79d272f9683bcb9b152013312470ab01a0ff Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/build_clib.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/check.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5756d2eda5b5a7b5047594578deed0a96ea655b1 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/check.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/sdist.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/sdist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d66678232c2effd5e98742f7131219e76ba60d87 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/distutils/command/__pycache__/sdist.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/bdist.py b/evalkit_cambrian/lib/python3.10/distutils/command/bdist.py new file mode 100644 index 0000000000000000000000000000000000000000..d580a8090bd201879a25819324d8192ed72193d3 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/bdist.py @@ -0,0 +1,141 @@ +"""distutils.command.bdist + +Implements the Distutils 'bdist' command (create a built [binary] +distribution).""" + +import os +from distutils.core import Command +from distutils.errors import * +from distutils.util import get_platform + + +def show_formats(): + """Print list of available formats (arguments to "--format" option). + """ + from distutils.fancy_getopt import FancyGetopt + formats = [] + for format in bdist.format_commands: + formats.append(("formats=" + format, None, + bdist.format_command[format][1])) + pretty_printer = FancyGetopt(formats) + pretty_printer.print_help("List of available distribution formats:") + + +class bdist(Command): + + description = "create a built (binary) distribution" + + user_options = [('bdist-base=', 'b', + "temporary directory for creating built distributions"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('formats=', None, + "formats for distribution (comma-separated list)"), + ('dist-dir=', 'd', + "directory to put final built distributions in " + "[default: dist]"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('owner=', 'u', + "Owner name used when creating a tar file" + " [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file" + " [default: current group]"), + ] + + boolean_options = ['skip-build'] + + help_options = [ + ('help-formats', None, + "lists available distribution formats", show_formats), + ] + + # The following commands do not take a format option from bdist + no_format_option = ('bdist_rpm',) + + # This won't do in reality: will need to distinguish RPM-ish Linux, + # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. + default_format = {'posix': 'gztar', + 'nt': 'zip'} + + # Establish the preferred order (for the --help-formats option). + format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar', + 'zip', 'msi'] + + # And the real information. + format_command = {'rpm': ('bdist_rpm', "RPM distribution"), + 'gztar': ('bdist_dumb', "gzip'ed tar file"), + 'bztar': ('bdist_dumb', "bzip2'ed tar file"), + 'xztar': ('bdist_dumb', "xz'ed tar file"), + 'ztar': ('bdist_dumb', "compressed tar file"), + 'tar': ('bdist_dumb', "tar file"), + 'zip': ('bdist_dumb', "ZIP file"), + 'msi': ('bdist_msi', "Microsoft Installer") + } + + + def initialize_options(self): + self.bdist_base = None + self.plat_name = None + self.formats = None + self.dist_dir = None + self.skip_build = 0 + self.group = None + self.owner = None + + def finalize_options(self): + # have to finalize 'plat_name' before 'bdist_base' + if self.plat_name is None: + if self.skip_build: + self.plat_name = get_platform() + else: + self.plat_name = self.get_finalized_command('build').plat_name + + # 'bdist_base' -- parent of per-built-distribution-format + # temporary directories (eg. we'll probably have + # "build/bdist./dumb", "build/bdist./rpm", etc.) + if self.bdist_base is None: + build_base = self.get_finalized_command('build').build_base + self.bdist_base = os.path.join(build_base, + 'bdist.' + self.plat_name) + + self.ensure_string_list('formats') + if self.formats is None: + try: + self.formats = [self.default_format[os.name]] + except KeyError: + raise DistutilsPlatformError( + "don't know how to create built distributions " + "on platform %s" % os.name) + + if self.dist_dir is None: + self.dist_dir = "dist" + + def run(self): + # Figure out which sub-commands we need to run. + commands = [] + for format in self.formats: + try: + commands.append(self.format_command[format][0]) + except KeyError: + raise DistutilsOptionError("invalid format '%s'" % format) + + # Reinitialize and run each command. + for i in range(len(self.formats)): + cmd_name = commands[i] + sub_cmd = self.reinitialize_command(cmd_name) + if cmd_name not in self.no_format_option: + sub_cmd.format = self.formats[i] + + # passing the owner and group names for tar archiving + if cmd_name == 'bdist_dumb': + sub_cmd.owner = self.owner + sub_cmd.group = self.group + + # If we're going to need to run this command again, tell it to + # keep its temporary files around so subsequent runs go faster. + if cmd_name in commands[i+1:]: + sub_cmd.keep_temp = 1 + self.run_command(cmd_name) diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/build_py.py b/evalkit_cambrian/lib/python3.10/distutils/command/build_py.py new file mode 100644 index 0000000000000000000000000000000000000000..edc2171cd122dda26a96a2770d2cfa69ccab417b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/build_py.py @@ -0,0 +1,416 @@ +"""distutils.command.build_py + +Implements the Distutils 'build_py' command.""" + +import os +import importlib.util +import sys +import glob + +from distutils.core import Command +from distutils.errors import * +from distutils.util import convert_path, Mixin2to3 +from distutils import log + +class build_py (Command): + + description = "\"build\" pure Python modules (copy to build directory)" + + user_options = [ + ('build-lib=', 'd', "directory to \"build\" (copy) to"), + ('compile', 'c', "compile .py to .pyc"), + ('no-compile', None, "don't compile .py files [default]"), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + ('force', 'f', "forcibly build everything (ignore file timestamps)"), + ] + + boolean_options = ['compile', 'force'] + negative_opt = {'no-compile' : 'compile'} + + def initialize_options(self): + self.build_lib = None + self.py_modules = None + self.package = None + self.package_data = None + self.package_dir = None + self.compile = 0 + self.optimize = 0 + self.force = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_lib', 'build_lib'), + ('force', 'force')) + + # Get the distribution options that are aliases for build_py + # options -- list of packages and list of modules. + self.packages = self.distribution.packages + self.py_modules = self.distribution.py_modules + self.package_data = self.distribution.package_data + self.package_dir = {} + if self.distribution.package_dir: + for name, path in self.distribution.package_dir.items(): + self.package_dir[name] = convert_path(path) + self.data_files = self.get_data_files() + + # Ick, copied straight from install_lib.py (fancy_getopt needs a + # type system! Hell, *everything* needs a type system!!!) + if not isinstance(self.optimize, int): + try: + self.optimize = int(self.optimize) + assert 0 <= self.optimize <= 2 + except (ValueError, AssertionError): + raise DistutilsOptionError("optimize must be 0, 1, or 2") + + def run(self): + # XXX copy_file by default preserves atime and mtime. IMHO this is + # the right thing to do, but perhaps it should be an option -- in + # particular, a site administrator might want installed files to + # reflect the time of installation rather than the last + # modification time before the installed release. + + # XXX copy_file by default preserves mode, which appears to be the + # wrong thing to do: if a file is read-only in the working + # directory, we want it to be installed read/write so that the next + # installation of the same module distribution can overwrite it + # without problems. (This might be a Unix-specific issue.) Thus + # we turn off 'preserve_mode' when copying to the build directory, + # since the build directory is supposed to be exactly what the + # installation will look like (ie. we preserve mode when + # installing). + + # Two options control which modules will be installed: 'packages' + # and 'py_modules'. The former lets us work with whole packages, not + # specifying individual modules at all; the latter is for + # specifying modules one-at-a-time. + + if self.py_modules: + self.build_modules() + if self.packages: + self.build_packages() + self.build_package_data() + + self.byte_compile(self.get_outputs(include_bytecode=0)) + + def get_data_files(self): + """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" + data = [] + if not self.packages: + return data + for package in self.packages: + # Locate package source directory + src_dir = self.get_package_dir(package) + + # Compute package build directory + build_dir = os.path.join(*([self.build_lib] + package.split('.'))) + + # Length of path to strip from found files + plen = 0 + if src_dir: + plen = len(src_dir)+1 + + # Strip directory from globbed filenames + filenames = [ + file[plen:] for file in self.find_data_files(package, src_dir) + ] + data.append((package, src_dir, build_dir, filenames)) + return data + + def find_data_files(self, package, src_dir): + """Return filenames for package's data files in 'src_dir'""" + globs = (self.package_data.get('', []) + + self.package_data.get(package, [])) + files = [] + for pattern in globs: + # Each pattern has to be converted to a platform-specific path + filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) + # Files that match more than one pattern are only added once + files.extend([fn for fn in filelist if fn not in files + and os.path.isfile(fn)]) + return files + + def build_package_data(self): + """Copy data files into build directory""" + lastdir = None + for package, src_dir, build_dir, filenames in self.data_files: + for filename in filenames: + target = os.path.join(build_dir, filename) + self.mkpath(os.path.dirname(target)) + self.copy_file(os.path.join(src_dir, filename), target, + preserve_mode=False) + + def get_package_dir(self, package): + """Return the directory, relative to the top of the source + distribution, where package 'package' should be found + (at least according to the 'package_dir' option, if any).""" + path = package.split('.') + + if not self.package_dir: + if path: + return os.path.join(*path) + else: + return '' + else: + tail = [] + while path: + try: + pdir = self.package_dir['.'.join(path)] + except KeyError: + tail.insert(0, path[-1]) + del path[-1] + else: + tail.insert(0, pdir) + return os.path.join(*tail) + else: + # Oops, got all the way through 'path' without finding a + # match in package_dir. If package_dir defines a directory + # for the root (nameless) package, then fallback on it; + # otherwise, we might as well have not consulted + # package_dir at all, as we just use the directory implied + # by 'tail' (which should be the same as the original value + # of 'path' at this point). + pdir = self.package_dir.get('') + if pdir is not None: + tail.insert(0, pdir) + + if tail: + return os.path.join(*tail) + else: + return '' + + def check_package(self, package, package_dir): + # Empty dir name means current directory, which we can probably + # assume exists. Also, os.path.exists and isdir don't know about + # my "empty string means current dir" convention, so we have to + # circumvent them. + if package_dir != "": + if not os.path.exists(package_dir): + raise DistutilsFileError( + "package directory '%s' does not exist" % package_dir) + if not os.path.isdir(package_dir): + raise DistutilsFileError( + "supposed package directory '%s' exists, " + "but is not a directory" % package_dir) + + # Require __init__.py for all but the "root package" + if package: + init_py = os.path.join(package_dir, "__init__.py") + if os.path.isfile(init_py): + return init_py + else: + log.warn(("package init file '%s' not found " + + "(or not a regular file)"), init_py) + + # Either not in a package at all (__init__.py not expected), or + # __init__.py doesn't exist -- so don't return the filename. + return None + + def check_module(self, module, module_file): + if not os.path.isfile(module_file): + log.warn("file %s (for module %s) not found", module_file, module) + return False + else: + return True + + def find_package_modules(self, package, package_dir): + self.check_package(package, package_dir) + module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) + modules = [] + setup_script = os.path.abspath(self.distribution.script_name) + + for f in module_files: + abs_f = os.path.abspath(f) + if abs_f != setup_script: + module = os.path.splitext(os.path.basename(f))[0] + modules.append((package, module, f)) + else: + self.debug_print("excluding %s" % setup_script) + return modules + + def find_modules(self): + """Finds individually-specified Python modules, ie. those listed by + module name in 'self.py_modules'. Returns a list of tuples (package, + module_base, filename): 'package' is a tuple of the path through + package-space to the module; 'module_base' is the bare (no + packages, no dots) module name, and 'filename' is the path to the + ".py" file (relative to the distribution root) that implements the + module. + """ + # Map package names to tuples of useful info about the package: + # (package_dir, checked) + # package_dir - the directory where we'll find source files for + # this package + # checked - true if we have checked that the package directory + # is valid (exists, contains __init__.py, ... ?) + packages = {} + + # List of (package, module, filename) tuples to return + modules = [] + + # We treat modules-in-packages almost the same as toplevel modules, + # just the "package" for a toplevel is empty (either an empty + # string or empty list, depending on context). Differences: + # - don't check for __init__.py in directory for empty package + for module in self.py_modules: + path = module.split('.') + package = '.'.join(path[0:-1]) + module_base = path[-1] + + try: + (package_dir, checked) = packages[package] + except KeyError: + package_dir = self.get_package_dir(package) + checked = 0 + + if not checked: + init_py = self.check_package(package, package_dir) + packages[package] = (package_dir, 1) + if init_py: + modules.append((package, "__init__", init_py)) + + # XXX perhaps we should also check for just .pyc files + # (so greedy closed-source bastards can distribute Python + # modules too) + module_file = os.path.join(package_dir, module_base + ".py") + if not self.check_module(module, module_file): + continue + + modules.append((package, module_base, module_file)) + + return modules + + def find_all_modules(self): + """Compute the list of all modules that will be built, whether + they are specified one-module-at-a-time ('self.py_modules') or + by whole packages ('self.packages'). Return a list of tuples + (package, module, module_file), just like 'find_modules()' and + 'find_package_modules()' do.""" + modules = [] + if self.py_modules: + modules.extend(self.find_modules()) + if self.packages: + for package in self.packages: + package_dir = self.get_package_dir(package) + m = self.find_package_modules(package, package_dir) + modules.extend(m) + return modules + + def get_source_files(self): + return [module[-1] for module in self.find_all_modules()] + + def get_module_outfile(self, build_dir, package, module): + outfile_path = [build_dir] + list(package) + [module + ".py"] + return os.path.join(*outfile_path) + + def get_outputs(self, include_bytecode=1): + modules = self.find_all_modules() + outputs = [] + for (package, module, module_file) in modules: + package = package.split('.') + filename = self.get_module_outfile(self.build_lib, package, module) + outputs.append(filename) + if include_bytecode: + if self.compile: + outputs.append(importlib.util.cache_from_source( + filename, optimization='')) + if self.optimize > 0: + outputs.append(importlib.util.cache_from_source( + filename, optimization=self.optimize)) + + outputs += [ + os.path.join(build_dir, filename) + for package, src_dir, build_dir, filenames in self.data_files + for filename in filenames + ] + + return outputs + + def build_module(self, module, module_file, package): + if isinstance(package, str): + package = package.split('.') + elif not isinstance(package, (list, tuple)): + raise TypeError( + "'package' must be a string (dot-separated), list, or tuple") + + # Now put the module source file into the "build" area -- this is + # easy, we just copy it somewhere under self.build_lib (the build + # directory for Python source). + outfile = self.get_module_outfile(self.build_lib, package, module) + dir = os.path.dirname(outfile) + self.mkpath(dir) + return self.copy_file(module_file, outfile, preserve_mode=0) + + def build_modules(self): + modules = self.find_modules() + for (package, module, module_file) in modules: + # Now "build" the module -- ie. copy the source file to + # self.build_lib (the build directory for Python source). + # (Actually, it gets copied to the directory for this package + # under self.build_lib.) + self.build_module(module, module_file, package) + + def build_packages(self): + for package in self.packages: + # Get list of (package, module, module_file) tuples based on + # scanning the package directory. 'package' is only included + # in the tuple so that 'find_modules()' and + # 'find_package_tuples()' have a consistent interface; it's + # ignored here (apart from a sanity check). Also, 'module' is + # the *unqualified* module name (ie. no dots, no package -- we + # already know its package!), and 'module_file' is the path to + # the .py file, relative to the current directory + # (ie. including 'package_dir'). + package_dir = self.get_package_dir(package) + modules = self.find_package_modules(package, package_dir) + + # Now loop over the modules we found, "building" each one (just + # copy it to self.build_lib). + for (package_, module, module_file) in modules: + assert package == package_ + self.build_module(module, module_file, package) + + def byte_compile(self, files): + if sys.dont_write_bytecode: + self.warn('byte-compiling is disabled, skipping.') + return + + from distutils.util import byte_compile + prefix = self.build_lib + if prefix[-1] != os.sep: + prefix = prefix + os.sep + + # XXX this code is essentially the same as the 'byte_compile() + # method of the "install_lib" command, except for the determination + # of the 'prefix' string. Hmmm. + if self.compile: + byte_compile(files, optimize=0, + force=self.force, prefix=prefix, dry_run=self.dry_run) + if self.optimize > 0: + byte_compile(files, optimize=self.optimize, + force=self.force, prefix=prefix, dry_run=self.dry_run) + +class build_py_2to3(build_py, Mixin2to3): + def run(self): + self.updated_files = [] + + # Base class code + if self.py_modules: + self.build_modules() + if self.packages: + self.build_packages() + self.build_package_data() + + # 2to3 + self.run_2to3(self.updated_files) + + # Remaining base class code + self.byte_compile(self.get_outputs(include_bytecode=0)) + + def build_module(self, module, module_file, package): + res = build_py.build_module(self, module, module_file, package) + if res[1]: + # file was copied + self.updated_files.append(res[0]) + return res diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/build_scripts.py b/evalkit_cambrian/lib/python3.10/distutils/command/build_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..ccc70e6465016ef2bbde845ea7eb99b6078eb9d8 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/build_scripts.py @@ -0,0 +1,160 @@ +"""distutils.command.build_scripts + +Implements the Distutils 'build_scripts' command.""" + +import os, re +from stat import ST_MODE +from distutils import sysconfig +from distutils.core import Command +from distutils.dep_util import newer +from distutils.util import convert_path, Mixin2to3 +from distutils import log +import tokenize + +# check if Python is called on the first line with this expression +first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$') + +class build_scripts(Command): + + description = "\"build\" scripts (copy and fixup #! line)" + + user_options = [ + ('build-dir=', 'd', "directory to \"build\" (copy) to"), + ('force', 'f', "forcibly build everything (ignore file timestamps"), + ('executable=', 'e', "specify final destination interpreter path"), + ] + + boolean_options = ['force'] + + + def initialize_options(self): + self.build_dir = None + self.scripts = None + self.force = None + self.executable = None + self.outfiles = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_scripts', 'build_dir'), + ('force', 'force'), + ('executable', 'executable')) + self.scripts = self.distribution.scripts + + def get_source_files(self): + return self.scripts + + def run(self): + if not self.scripts: + return + self.copy_scripts() + + + def copy_scripts(self): + r"""Copy each script listed in 'self.scripts'; if it's marked as a + Python script in the Unix way (first line matches 'first_line_re', + ie. starts with "\#!" and contains "python"), then adjust the first + line to refer to the current Python interpreter as we copy. + """ + self.mkpath(self.build_dir) + outfiles = [] + updated_files = [] + for script in self.scripts: + adjust = False + script = convert_path(script) + outfile = os.path.join(self.build_dir, os.path.basename(script)) + outfiles.append(outfile) + + if not self.force and not newer(script, outfile): + log.debug("not copying %s (up-to-date)", script) + continue + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, "rb") + except OSError: + if not self.dry_run: + raise + f = None + else: + encoding, lines = tokenize.detect_encoding(f.readline) + f.seek(0) + first_line = f.readline() + if not first_line: + self.warn("%s is an empty file (skipping)" % script) + continue + + match = first_line_re.match(first_line) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if adjust: + log.info("copying and adjusting %s -> %s", script, + self.build_dir) + updated_files.append(outfile) + if not self.dry_run: + if not sysconfig.python_build: + executable = self.executable + else: + executable = os.path.join( + sysconfig.get_config_var("BINDIR"), + "python%s%s" % (sysconfig.get_config_var("VERSION"), + sysconfig.get_config_var("EXE"))) + executable = os.fsencode(executable) + shebang = b"#!" + executable + post_interp + b"\n" + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: + raise ValueError( + "The shebang ({!r}) is not decodable " + "from utf-8".format(shebang)) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + try: + shebang.decode(encoding) + except UnicodeDecodeError: + raise ValueError( + "The shebang ({!r}) is not decodable " + "from the script encoding ({})" + .format(shebang, encoding)) + with open(outfile, "wb") as outf: + outf.write(shebang) + outf.writelines(f.readlines()) + if f: + f.close() + else: + if f: + f.close() + updated_files.append(outfile) + self.copy_file(script, outfile) + + if os.name == 'posix': + for file in outfiles: + if self.dry_run: + log.info("changing mode of %s", file) + else: + oldmode = os.stat(file)[ST_MODE] & 0o7777 + newmode = (oldmode | 0o555) & 0o7777 + if newmode != oldmode: + log.info("changing mode of %s from %o to %o", + file, oldmode, newmode) + os.chmod(file, newmode) + # XXX should we modify self.outfiles? + return outfiles, updated_files + +class build_scripts_2to3(build_scripts, Mixin2to3): + + def copy_scripts(self): + outfiles, updated_files = build_scripts.copy_scripts(self) + if not self.dry_run: + self.run_2to3(updated_files) + return outfiles, updated_files diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/command_template b/evalkit_cambrian/lib/python3.10/distutils/command/command_template new file mode 100644 index 0000000000000000000000000000000000000000..6106819db843b5d93e85c4ee6929825d45c2f08a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/command_template @@ -0,0 +1,33 @@ +"""distutils.command.x + +Implements the Distutils 'x' command. +""" + +# created 2000/mm/dd, John Doe + +__revision__ = "$Id$" + +from distutils.core import Command + + +class x(Command): + + # Brief (40-50 characters) description of the command + description = "" + + # List of option tuples: long name, short name (None if no short + # name), and help string. + user_options = [('', '', + ""), + ] + + def initialize_options(self): + self. = None + self. = None + self. = None + + def finalize_options(self): + if self.x is None: + self.x = + + def run(self): diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/install_headers.py b/evalkit_cambrian/lib/python3.10/distutils/command/install_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..9bb0b18dc0d809dbc03d9ca355818b3bb0af573b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/install_headers.py @@ -0,0 +1,47 @@ +"""distutils.command.install_headers + +Implements the Distutils 'install_headers' command, to install C/C++ header +files to the Python include directory.""" + +from distutils.core import Command + + +# XXX force is never used +class install_headers(Command): + + description = "install C/C++ header files" + + user_options = [('install-dir=', 'd', + "directory to install header files to"), + ('force', 'f', + "force installation (overwrite existing files)"), + ] + + boolean_options = ['force'] + + def initialize_options(self): + self.install_dir = None + self.force = 0 + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', + ('install_headers', 'install_dir'), + ('force', 'force')) + + + def run(self): + headers = self.distribution.headers + if not headers: + return + + self.mkpath(self.install_dir) + for header in headers: + (out, _) = self.copy_file(header, self.install_dir) + self.outfiles.append(out) + + def get_inputs(self): + return self.distribution.headers or [] + + def get_outputs(self): + return self.outfiles diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/install_scripts.py b/evalkit_cambrian/lib/python3.10/distutils/command/install_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..31a1130ee549371dffc668e515d2ae5d91799aac --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/install_scripts.py @@ -0,0 +1,60 @@ +"""distutils.command.install_scripts + +Implements the Distutils 'install_scripts' command, for installing +Python scripts.""" + +# contributed by Bastian Kleineidam + +import os +from distutils.core import Command +from distutils import log +from stat import ST_MODE + + +class install_scripts(Command): + + description = "install scripts (Python or otherwise)" + + user_options = [ + ('install-dir=', 'd', "directory to install scripts to"), + ('build-dir=','b', "build directory (where to install from)"), + ('force', 'f', "force installation (overwrite existing files)"), + ('skip-build', None, "skip the build steps"), + ] + + boolean_options = ['force', 'skip-build'] + + def initialize_options(self): + self.install_dir = None + self.force = 0 + self.build_dir = None + self.skip_build = None + + def finalize_options(self): + self.set_undefined_options('build', ('build_scripts', 'build_dir')) + self.set_undefined_options('install', + ('install_scripts', 'install_dir'), + ('force', 'force'), + ('skip_build', 'skip_build'), + ) + + def run(self): + if not self.skip_build: + self.run_command('build_scripts') + self.outfiles = self.copy_tree(self.build_dir, self.install_dir) + if os.name == 'posix': + # Set the executable bits (owner, group, and world) on + # all the scripts we just installed. + for file in self.get_outputs(): + if self.dry_run: + log.info("changing mode of %s", file) + else: + mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777 + log.info("changing mode of %s to %o", file, mode) + os.chmod(file, mode) + + def get_inputs(self): + return self.distribution.scripts or [] + + def get_outputs(self): + return self.outfiles or [] diff --git a/evalkit_cambrian/lib/python3.10/distutils/command/upload.py b/evalkit_cambrian/lib/python3.10/distutils/command/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ecb655b93fafaeaad82062591e055219921f7d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/distutils/command/upload.py @@ -0,0 +1,215 @@ +""" +distutils.command.upload + +Implements the Distutils 'upload' subcommand (upload package to a package +index). +""" + +import os +import io +import hashlib +from base64 import standard_b64encode +from urllib.error import HTTPError +from urllib.request import urlopen, Request +from urllib.parse import urlparse +from distutils.errors import DistutilsError, DistutilsOptionError +from distutils.core import PyPIRCCommand +from distutils.spawn import spawn +from distutils import log + + +# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256) +# https://bugs.python.org/issue40698 +_FILE_CONTENT_DIGESTS = { + "md5_digest": getattr(hashlib, "md5", None), + "sha256_digest": getattr(hashlib, "sha256", None), + "blake2_256_digest": getattr(hashlib, "blake2b", None), +} + + +class upload(PyPIRCCommand): + + description = "upload binary package to PyPI" + + user_options = PyPIRCCommand.user_options + [ + ('sign', 's', + 'sign files to upload using gpg'), + ('identity=', 'i', 'GPG identity used to sign files'), + ] + + boolean_options = PyPIRCCommand.boolean_options + ['sign'] + + def initialize_options(self): + PyPIRCCommand.initialize_options(self) + self.username = '' + self.password = '' + self.show_response = 0 + self.sign = False + self.identity = None + + def finalize_options(self): + PyPIRCCommand.finalize_options(self) + if self.identity and not self.sign: + raise DistutilsOptionError( + "Must use --sign for --identity to have meaning" + ) + config = self._read_pypirc() + if config != {}: + self.username = config['username'] + self.password = config['password'] + self.repository = config['repository'] + self.realm = config['realm'] + + # getting the password from the distribution + # if previously set by the register command + if not self.password and self.distribution.password: + self.password = self.distribution.password + + def run(self): + if not self.distribution.dist_files: + msg = ("Must create and upload files in one command " + "(e.g. setup.py sdist upload)") + raise DistutilsOptionError(msg) + for command, pyversion, filename in self.distribution.dist_files: + self.upload_file(command, pyversion, filename) + + def upload_file(self, command, pyversion, filename): + # Makes sure the repository URL is compliant + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) + if params or query or fragments: + raise AssertionError("Incompatible url %s" % self.repository) + + if schema not in ('http', 'https'): + raise AssertionError("unsupported schema " + schema) + + # Sign if requested + if self.sign: + gpg_args = ["gpg", "--detach-sign", "-a", filename] + if self.identity: + gpg_args[2:2] = ["--local-user", self.identity] + spawn(gpg_args, + dry_run=self.dry_run) + + # Fill in the data - send all the meta-data in case we need to + # register a new release + f = open(filename,'rb') + try: + content = f.read() + finally: + f.close() + + meta = self.distribution.metadata + data = { + # action + ':action': 'file_upload', + 'protocol_version': '1', + + # identify release + 'name': meta.get_name(), + 'version': meta.get_version(), + + # file content + 'content': (os.path.basename(filename),content), + 'filetype': command, + 'pyversion': pyversion, + + # additional meta-data + 'metadata_version': '1.0', + 'summary': meta.get_description(), + 'home_page': meta.get_url(), + 'author': meta.get_contact(), + 'author_email': meta.get_contact_email(), + 'license': meta.get_licence(), + 'description': meta.get_long_description(), + 'keywords': meta.get_keywords(), + 'platform': meta.get_platforms(), + 'classifiers': meta.get_classifiers(), + 'download_url': meta.get_download_url(), + # PEP 314 + 'provides': meta.get_provides(), + 'requires': meta.get_requires(), + 'obsoletes': meta.get_obsoletes(), + } + + data['comment'] = '' + + # file content digests + for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items(): + if digest_cons is None: + continue + try: + data[digest_name] = digest_cons(content).hexdigest() + except ValueError: + # hash digest not available or blocked by security policy + pass + + if self.sign: + with open(filename + ".asc", "rb") as f: + data['gpg_signature'] = (os.path.basename(filename) + ".asc", + f.read()) + + # set up the authentication + user_pass = (self.username + ":" + self.password).encode('ascii') + # The exact encoding of the authentication string is debated. + # Anyway PyPI only accepts ascii for both username or password. + auth = "Basic " + standard_b64encode(user_pass).decode('ascii') + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b'\r\n--' + boundary.encode('ascii') + end_boundary = sep_boundary + b'--\r\n' + body = io.BytesIO() + for key, value in data.items(): + title = '\r\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if not isinstance(value, list): + value = [value] + for value in value: + if type(value) is tuple: + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = str(value).encode('utf-8') + body.write(sep_boundary) + body.write(title.encode('utf-8')) + body.write(b"\r\n\r\n") + body.write(value) + body.write(end_boundary) + body = body.getvalue() + + msg = "Submitting %s to %s" % (filename, self.repository) + self.announce(msg, log.INFO) + + # build the Request + headers = { + 'Content-type': 'multipart/form-data; boundary=%s' % boundary, + 'Content-length': str(len(body)), + 'Authorization': auth, + } + + request = Request(self.repository, data=body, + headers=headers) + # send the data + try: + result = urlopen(request) + status = result.getcode() + reason = result.msg + except HTTPError as e: + status = e.code + reason = e.msg + except OSError as e: + self.announce(str(e), log.ERROR) + raise + + if status == 200: + self.announce('Server response (%s): %s' % (status, reason), + log.INFO) + if self.show_response: + text = self._read_pypi_response(result) + msg = '\n'.join(('-' * 75, text, '-' * 75)) + self.announce(msg, log.INFO) + else: + msg = 'Upload failed (%s): %s' % (status, reason) + self.announce(msg, log.ERROR) + raise DistutilsError(msg) diff --git a/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl b/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..c7e03d464c9687907f77dc23718365528a05ac0b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f +size 2055563 diff --git a/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl b/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..673f289f117f53c03ab13e13af450df69b012431 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f62ea9da9ed6289bfe868cd6845968a2c854d1427f8548d52cae02a42b4f0356 +size 1232695 diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/Grammar.txt b/evalkit_cambrian/lib/python3.10/lib2to3/Grammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa7b15061d941c4d952c8d6446fe33c40890a264 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/Grammar.txt @@ -0,0 +1,196 @@ +# Grammar for 2to3. This grammar supports Python 2.x and 3.x. + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) +async_funcdef: ASYNC funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' + +# The following definition for typedarglist is equivalent to this set of rules: +# +# arguments = argument (',' argument)* +# argument = tfpdef ['=' test] +# kwargs = '**' tname [','] +# args = '*' [tname] +# kwonly_kwargs = (',' argument)* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ + ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] + ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] + +# The following definition for varargslist is equivalent to this set of rules: +# +# arguments = argument (',' argument )* +# argument = vfpdef ['=' test] +# kwargs = '**' vname [','] +# args = '*' [vname] +# kwonly_kwargs = (',' argument )* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ + ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* + [',' ['**' vname [',']]] | '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + ]] | ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: ASYNC (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +with_var: 'as' expr +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: [AWAIT] atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +testlist_gexp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [comp_iter] +comp_if: 'if' old_test [comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/PatternGrammar.txt b/evalkit_cambrian/lib/python3.10/lib2to3/PatternGrammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..36bf8148273bd7a27b4a76817a776fe2ab234562 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/PatternGrammar.txt @@ -0,0 +1,28 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# A grammar to describe tree matching patterns. +# Not shown here: +# - 'TOKEN' stands for any token (leaf node) +# - 'any' stands for any node (leaf or interior) +# With 'any' we can still specify the sub-structure. + +# The start symbol is 'Matcher'. + +Matcher: Alternatives ENDMARKER + +Alternatives: Alternative ('|' Alternative)* + +Alternative: (Unit | NegatedUnit)+ + +Unit: [NAME '='] ( STRING [Repeater] + | NAME [Details] [Repeater] + | '(' Alternatives ')' [Repeater] + | '[' Alternatives ']' + ) + +NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') + +Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' + +Details: '<' Alternatives '>' diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/__init__.py b/evalkit_cambrian/lib/python3.10/lib2to3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4224dffef429570a3aaca3c1cc3a258a8f322251 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/__init__.py @@ -0,0 +1,8 @@ +import warnings + + +warnings.warn( + "lib2to3 package is deprecated and may not be able to parse Python 3.10+", + PendingDeprecationWarning, + stacklevel=2, +) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/__main__.py b/evalkit_cambrian/lib/python3.10/lib2to3/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..80688baf27abfcaf99ea680787f17212747db5f6 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/__main__.py @@ -0,0 +1,4 @@ +import sys +from .main import main + +sys.exit(main("lib2to3.fixes")) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/btm_matcher.py b/evalkit_cambrian/lib/python3.10/lib2to3/btm_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3b78868038bda0733f03995e3f31c517c17bdc00 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/btm_matcher.py @@ -0,0 +1,163 @@ +"""A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.""" + +__author__ = "George Boutsioukis " + +import logging +import itertools +from collections import defaultdict + +from . import pytree +from .btm_utils import reduce_tree + +class BMNode(object): + """Class for a node of the Aho-Corasick automaton used in matching""" + count = itertools.count() + def __init__(self): + self.transition_table = {} + self.fixers = [] + self.id = next(BMNode.count) + self.content = '' + +class BottomMatcher(object): + """The main matcher class. After instantiating the patterns should + be added using the add_fixer method""" + + def __init__(self): + self.match = set() + self.root = BMNode() + self.nodes = [self.root] + self.fixers = [] + self.logger = logging.getLogger("RefactoringTool") + + def add_fixer(self, fixer): + """Reduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reached""" + self.fixers.append(fixer) + tree = reduce_tree(fixer.pattern_tree) + linear = tree.get_linear_subpattern() + match_nodes = self.add(linear, start=self.root) + for match_node in match_nodes: + match_node.fixers.append(fixer) + + def add(self, pattern, start): + "Recursively adds a linear pattern to the AC automaton" + #print("adding pattern", pattern, "to", start) + if not pattern: + #print("empty pattern") + return [start] + if isinstance(pattern[0], tuple): + #alternatives + #print("alternatives") + match_nodes = [] + for alternative in pattern[0]: + #add all alternatives, and add the rest of the pattern + #to each end node + end_nodes = self.add(alternative, start=start) + for end in end_nodes: + match_nodes.extend(self.add(pattern[1:], end)) + return match_nodes + else: + #single token + #not last + if pattern[0] not in start.transition_table: + #transition did not exist, create new + next_node = BMNode() + start.transition_table[pattern[0]] = next_node + else: + #transition exists already, follow + next_node = start.transition_table[pattern[0]] + + if pattern[1:]: + end_nodes = self.add(pattern[1:], start=next_node) + else: + end_nodes = [next_node] + return end_nodes + + def run(self, leaves): + """The main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + """ + current_ac_node = self.root + results = defaultdict(list) + for leaf in leaves: + current_ast_node = leaf + while current_ast_node: + current_ast_node.was_checked = True + for child in current_ast_node.children: + # multiple statements, recheck + if isinstance(child, pytree.Leaf) and child.value == ";": + current_ast_node.was_checked = False + break + if current_ast_node.type == 1: + #name + node_token = current_ast_node.value + else: + node_token = current_ast_node.type + + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + else: + #matching failed, reset automaton + current_ac_node = self.root + if (current_ast_node.parent is not None + and current_ast_node.parent.was_checked): + #the rest of the tree upwards has been checked, next leaf + break + + #recheck the rejected node once from the root + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + + current_ast_node = current_ast_node.parent + return results + + def print_ac(self): + "Prints a graphviz diagram of the BM automaton(for debugging)" + print("digraph g{") + def print_node(node): + for subnode_key in node.transition_table.keys(): + subnode = node.transition_table[subnode_key] + print("%d -> %d [label=%s] //%s" % + (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) + if subnode_key == 1: + print(subnode.content) + print_node(subnode) + print_node(self.root) + print("}") + +# taken from pytree.py for debugging; only used by print_ac +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/btm_utils.py b/evalkit_cambrian/lib/python3.10/lib2to3/btm_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ff76ba340470729f7e44ec2ce19afc7cc1ca931a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/btm_utils.py @@ -0,0 +1,281 @@ +"Utility functions used by the btm_matcher module" + +from . import pytree +from .pgen2 import grammar, token +from .pygram import pattern_symbols, python_symbols + +syms = pattern_symbols +pysyms = python_symbols +tokens = grammar.opmap +token_labels = token + +TYPE_ANY = -1 +TYPE_ALTERNATIVES = -2 +TYPE_GROUP = -3 + +class MinNode(object): + """This class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatterns""" + + def __init__(self, type=None, name=None): + self.type = type + self.name = name + self.children = [] + self.leaf = False + self.parent = None + self.alternatives = [] + self.group = [] + + def __repr__(self): + return str(self.type) + ' ' + str(self.name) + + def leaf_to_root(self): + """Internal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a single""" + node = self + subp = [] + while node: + if node.type == TYPE_ALTERNATIVES: + node.alternatives.append(subp) + if len(node.alternatives) == len(node.children): + #last alternative + subp = [tuple(node.alternatives)] + node.alternatives = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == TYPE_GROUP: + node.group.append(subp) + #probably should check the number of leaves + if len(node.group) == len(node.children): + subp = get_characteristic_subpattern(node.group) + node.group = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == token_labels.NAME and node.name: + #in case of type=name, use the name instead + subp.append(node.name) + else: + subp.append(node.type) + + node = node.parent + return subp + + def get_linear_subpattern(self): + """Drives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + """ + + for l in self.leaves(): + subp = l.leaf_to_root() + if subp: + return subp + + def leaves(self): + "Generator that returns the leaves of the tree" + for child in self.children: + yield from child.leaves() + if not self.children: + yield self + +def reduce_tree(node, parent=None): + """ + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + """ + + new_node = None + #switch on the node type + if node.type == syms.Matcher: + #skip + node = node.children[0] + + if node.type == syms.Alternatives : + #2 cases + if len(node.children) <= 2: + #just a single 'Alternative', skip this node + new_node = reduce_tree(node.children[0], parent) + else: + #real alternatives + new_node = MinNode(type=TYPE_ALTERNATIVES) + #skip odd children('|' tokens) + for child in node.children: + if node.children.index(child)%2: + continue + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + elif node.type == syms.Alternative: + if len(node.children) > 1: + + new_node = MinNode(type=TYPE_GROUP) + for child in node.children: + reduced = reduce_tree(child, new_node) + if reduced: + new_node.children.append(reduced) + if not new_node.children: + # delete the group if all of the children were reduced to None + new_node = None + + else: + new_node = reduce_tree(node.children[0], parent) + + elif node.type == syms.Unit: + if (isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '('): + #skip parentheses + return reduce_tree(node.children[1], parent) + if ((isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '[') + or + (len(node.children)>1 and + hasattr(node.children[1], "value") and + node.children[1].value == '[')): + #skip whole unit if its optional + return None + + leaf = True + details_node = None + alternatives_node = None + has_repeater = False + repeater_node = None + has_variable_name = False + + for child in node.children: + if child.type == syms.Details: + leaf = False + details_node = child + elif child.type == syms.Repeater: + has_repeater = True + repeater_node = child + elif child.type == syms.Alternatives: + alternatives_node = child + if hasattr(child, 'value') and child.value == '=': # variable name + has_variable_name = True + + #skip variable name + if has_variable_name: + #skip variable name, '=' + name_leaf = node.children[2] + if hasattr(name_leaf, 'value') and name_leaf.value == '(': + # skip parenthesis + name_leaf = node.children[3] + else: + name_leaf = node.children[0] + + #set node type + if name_leaf.type == token_labels.NAME: + #(python) non-name or wildcard + if name_leaf.value == 'any': + new_node = MinNode(type=TYPE_ANY) + else: + if hasattr(token_labels, name_leaf.value): + new_node = MinNode(type=getattr(token_labels, name_leaf.value)) + else: + new_node = MinNode(type=getattr(pysyms, name_leaf.value)) + + elif name_leaf.type == token_labels.STRING: + #(python) name or character; remove the apostrophes from + #the string value + name = name_leaf.value.strip("'") + if name in tokens: + new_node = MinNode(type=tokens[name]) + else: + new_node = MinNode(type=token_labels.NAME, name=name) + elif name_leaf.type == syms.Alternatives: + new_node = reduce_tree(alternatives_node, parent) + + #handle repeaters + if has_repeater: + if repeater_node.children[0].value == '*': + #reduce to None + new_node = None + elif repeater_node.children[0].value == '+': + #reduce to a single occurrence i.e. do nothing + pass + else: + #TODO: handle {min, max} repeaters + raise NotImplementedError + pass + + #add children + if details_node and new_node is not None: + for child in details_node.children[1:-1]: + #skip '<', '>' markers + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + if new_node: + new_node.parent = parent + return new_node + + +def get_characteristic_subpattern(subpatterns): + """Picks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + """ + if not isinstance(subpatterns, list): + return subpatterns + if len(subpatterns)==1: + return subpatterns[0] + + # first pick out the ones containing variable names + subpatterns_with_names = [] + subpatterns_with_common_names = [] + common_names = ['in', 'for', 'if' , 'not', 'None'] + subpatterns_with_common_chars = [] + common_chars = "[]().,:" + for subpattern in subpatterns: + if any(rec_test(subpattern, lambda x: type(x) is str)): + if any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_chars)): + subpatterns_with_common_chars.append(subpattern) + elif any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_names)): + subpatterns_with_common_names.append(subpattern) + + else: + subpatterns_with_names.append(subpattern) + + if subpatterns_with_names: + subpatterns = subpatterns_with_names + elif subpatterns_with_common_names: + subpatterns = subpatterns_with_common_names + elif subpatterns_with_common_chars: + subpatterns = subpatterns_with_common_chars + # of the remaining subpatterns pick out the longest one + return max(subpatterns, key=len) + +def rec_test(sequence, test_func): + """Tests test_func on all items of sequence and items of included + sub-iterables""" + for x in sequence: + if isinstance(x, (list, tuple)): + yield from rec_test(x, test_func) + else: + yield test_func(x) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixer_base.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..df581a4deab9e93aade8591bf13f949050d7e50d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixer_base.py @@ -0,0 +1,186 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Base class for fixers (optional, but recommended).""" + +# Python imports +import itertools + +# Local imports +from .patcomp import PatternCompiler +from . import pygram +from .fixer_util import does_tree_import + +class BaseFix(object): + + """Optional base class for fixers. + + The subclass name must be FixFooBar where FooBar is the result of + removing underscores and capitalizing the words of the fix name. + For example, the class name for a fixer named 'has_key' should be + FixHasKey. + """ + + PATTERN = None # Most subclasses should override with a string literal + pattern = None # Compiled pattern, set by compile_pattern() + pattern_tree = None # Tree representation of the pattern + options = None # Options object passed to initializer + filename = None # The filename (set by set_filename) + numbers = itertools.count(1) # For new_name() + used_names = set() # A set of all used NAMEs + order = "post" # Does the fixer prefer pre- or post-order traversal + explicit = False # Is this ignored by refactor.py -f all? + run_order = 5 # Fixers will be sorted by run order before execution + # Lower numbers will be run first. + _accept_type = None # [Advanced and not public] This tells RefactoringTool + # which node type to accept when there's not a pattern. + + keep_line_order = False # For the bottom matcher: match with the + # original line order + BM_compatible = False # Compatibility with the bottom matching + # module; every fixer should set this + # manually + + # Shortcut for access to Python grammar symbols + syms = pygram.python_symbols + + def __init__(self, options, log): + """Initializer. Subclass may override. + + Args: + options: a dict containing the options passed to RefactoringTool + that could be used to customize the fixer through the command line. + log: a list to append warnings and other messages to. + """ + self.options = options + self.log = log + self.compile_pattern() + + def compile_pattern(self): + """Compiles self.PATTERN into self.pattern. + + Subclass may override if it doesn't want to use + self.{pattern,PATTERN} in .match(). + """ + if self.PATTERN is not None: + PC = PatternCompiler() + self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, + with_tree=True) + + def set_filename(self, filename): + """Set the filename. + + The main refactoring tool should call this. + """ + self.filename = filename + + def match(self, node): + """Returns match for a given parse tree node. + + Should return a true or false object (not necessarily a bool). + It may return a non-empty dict of matching sub-nodes as + returned by a matching pattern. + + Subclass may override. + """ + results = {"node": node} + return self.pattern.match(node, results) and results + + def transform(self, node, results): + """Returns the transformation for a given parse tree node. + + Args: + node: the root of the parse tree that matched the fixer. + results: a dict mapping symbolic names to part of the match. + + Returns: + None, or a node that is a modified copy of the + argument node. The node argument may also be modified in-place to + effect the same change. + + Subclass *must* override. + """ + raise NotImplementedError() + + def new_name(self, template="xxx_todo_changeme"): + """Return a string suitable for use as an identifier + + The new name is guaranteed not to conflict with other identifiers. + """ + name = template + while name in self.used_names: + name = template + str(next(self.numbers)) + self.used_names.add(name) + return name + + def log_message(self, message): + if self.first_log: + self.first_log = False + self.log.append("### In file %s ###" % self.filename) + self.log.append(message) + + def cannot_convert(self, node, reason=None): + """Warn the user that a given chunk of code is not valid Python 3, + but that it cannot be converted automatically. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + for_output = node.clone() + for_output.prefix = "" + msg = "Line %d: could not convert: %s" + self.log_message(msg % (lineno, for_output)) + if reason: + self.log_message(reason) + + def warning(self, node, reason): + """Used for warning the user about possible uncertainty in the + translation. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + self.log_message("Line %d: %s" % (lineno, reason)) + + def start_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the start of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + self.used_names = tree.used_names + self.set_filename(filename) + self.numbers = itertools.count(1) + self.first_log = True + + def finish_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the conclusion of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + pass + + +class ConditionalFix(BaseFix): + """ Base class for fixers which not execute if an import is found. """ + + # This is the name of the import which, if found, will cause the test to be skipped + skip_on = None + + def start_tree(self, *args): + super(ConditionalFix, self).start_tree(*args) + self._should_skip = None + + def should_skip(self, node): + if self._should_skip is not None: + return self._should_skip + pkg = self.skip_on.split(".") + name = pkg[-1] + pkg = ".".join(pkg[:-1]) + self._should_skip = does_tree_import(pkg, name, node) + return self._should_skip diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixer_util.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixer_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c2a3a47f503286e05e9bd9d69759e6a2ca214b2c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixer_util.py @@ -0,0 +1,453 @@ +"""Utility functions, node construction macros, etc.""" +# Author: Collin Winter + +# Local imports +from .pgen2 import token +from .pytree import Leaf, Node +from .pygram import python_symbols as syms +from . import patcomp + + +########################################################### +### Common node-construction "macros" +########################################################### + +def KeywordArg(keyword, value): + return Node(syms.argument, + [keyword, Leaf(token.EQUAL, "="), value]) + +def LParen(): + return Leaf(token.LPAR, "(") + +def RParen(): + return Leaf(token.RPAR, ")") + +def Assign(target, source): + """Build an assignment statement""" + if not isinstance(target, list): + target = [target] + if not isinstance(source, list): + source.prefix = " " + source = [source] + + return Node(syms.atom, + target + [Leaf(token.EQUAL, "=", prefix=" ")] + source) + +def Name(name, prefix=None): + """Return a NAME leaf""" + return Leaf(token.NAME, name, prefix=prefix) + +def Attr(obj, attr): + """A node tuple for obj.attr""" + return [obj, Node(syms.trailer, [Dot(), attr])] + +def Comma(): + """A comma leaf""" + return Leaf(token.COMMA, ",") + +def Dot(): + """A period (.) leaf""" + return Leaf(token.DOT, ".") + +def ArgList(args, lparen=LParen(), rparen=RParen()): + """A parenthesised argument list, used by Call()""" + node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) + if args: + node.insert_child(1, Node(syms.arglist, args)) + return node + +def Call(func_name, args=None, prefix=None): + """A function call""" + node = Node(syms.power, [func_name, ArgList(args)]) + if prefix is not None: + node.prefix = prefix + return node + +def Newline(): + """A newline literal""" + return Leaf(token.NEWLINE, "\n") + +def BlankLine(): + """A blank line""" + return Leaf(token.NEWLINE, "") + +def Number(n, prefix=None): + return Leaf(token.NUMBER, n, prefix=prefix) + +def Subscript(index_node): + """A numeric or string subscript""" + return Node(syms.trailer, [Leaf(token.LBRACE, "["), + index_node, + Leaf(token.RBRACE, "]")]) + +def String(string, prefix=None): + """A string leaf""" + return Leaf(token.STRING, string, prefix=prefix) + +def ListComp(xp, fp, it, test=None): + """A list comprehension of the form [xp for fp in it if test]. + + If test is None, the "if test" part is omitted. + """ + xp.prefix = "" + fp.prefix = " " + it.prefix = " " + for_leaf = Leaf(token.NAME, "for") + for_leaf.prefix = " " + in_leaf = Leaf(token.NAME, "in") + in_leaf.prefix = " " + inner_args = [for_leaf, fp, in_leaf, it] + if test: + test.prefix = " " + if_leaf = Leaf(token.NAME, "if") + if_leaf.prefix = " " + inner_args.append(Node(syms.comp_if, [if_leaf, test])) + inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) + return Node(syms.atom, + [Leaf(token.LBRACE, "["), + inner, + Leaf(token.RBRACE, "]")]) + +def FromImport(package_name, name_leafs): + """ Return an import statement in the form: + from package import name_leafs""" + # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') + #assert package_name == '.' or '.' not in package_name, "FromImport has "\ + # "not been tested with dotted package names -- use at your own "\ + # "peril!" + + for leaf in name_leafs: + # Pull the leaves out of their old tree + leaf.remove() + + children = [Leaf(token.NAME, "from"), + Leaf(token.NAME, package_name, prefix=" "), + Leaf(token.NAME, "import", prefix=" "), + Node(syms.import_as_names, name_leafs)] + imp = Node(syms.import_from, children) + return imp + +def ImportAndCall(node, results, names): + """Returns an import statement and calls a method + of the module: + + import module + module.name()""" + obj = results["obj"].clone() + if obj.type == syms.arglist: + newarglist = obj.clone() + else: + newarglist = Node(syms.arglist, [obj.clone()]) + after = results["after"] + if after: + after = [n.clone() for n in after] + new = Node(syms.power, + Attr(Name(names[0]), Name(names[1])) + + [Node(syms.trailer, + [results["lpar"].clone(), + newarglist, + results["rpar"].clone()])] + after) + new.prefix = node.prefix + return new + + +########################################################### +### Determine whether a node represents a given literal +########################################################### + +def is_tuple(node): + """Does the node represent a tuple literal?""" + if isinstance(node, Node) and node.children == [LParen(), RParen()]: + return True + return (isinstance(node, Node) + and len(node.children) == 3 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[1], Node) + and isinstance(node.children[2], Leaf) + and node.children[0].value == "(" + and node.children[2].value == ")") + +def is_list(node): + """Does the node represent a list literal?""" + return (isinstance(node, Node) + and len(node.children) > 1 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[-1], Leaf) + and node.children[0].value == "[" + and node.children[-1].value == "]") + + +########################################################### +### Misc +########################################################### + +def parenthesize(node): + return Node(syms.atom, [LParen(), node, RParen()]) + + +consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum", + "min", "max", "enumerate"} + +def attr_chain(obj, attr): + """Follow an attribute chain. + + If you have a chain of objects where a.foo -> b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + """ + next = getattr(obj, attr) + while next: + yield next + next = getattr(next, attr) + +p0 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + """ +p1 = """ +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +""" +p2 = """ +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +""" +pats_built = False +def in_special_context(node): + """ Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + """ + global p0, p1, p2, pats_built + if not pats_built: + p0 = patcomp.compile_pattern(p0) + p1 = patcomp.compile_pattern(p1) + p2 = patcomp.compile_pattern(p2) + pats_built = True + patterns = [p0, p1, p2] + for pattern, parent in zip(patterns, attr_chain(node, "parent")): + results = {} + if pattern.match(parent, results) and results["node"] is node: + return True + return False + +def is_probably_builtin(node): + """ + Check that something isn't an attribute or function name etc. + """ + prev = node.prev_sibling + if prev is not None and prev.type == token.DOT: + # Attribute lookup. + return False + parent = node.parent + if parent.type in (syms.funcdef, syms.classdef): + return False + if parent.type == syms.expr_stmt and parent.children[0] is node: + # Assignment. + return False + if parent.type == syms.parameters or \ + (parent.type == syms.typedargslist and ( + (prev is not None and prev.type == token.COMMA) or + parent.children[0] is node + )): + # The name of an argument. + return False + return True + +def find_indentation(node): + """Find the indentation of *node*.""" + while node is not None: + if node.type == syms.suite and len(node.children) > 2: + indent = node.children[1] + if indent.type == token.INDENT: + return indent.value + node = node.parent + return "" + +########################################################### +### The following functions are to find bindings in a suite +########################################################### + +def make_suite(node): + if node.type == syms.suite: + return node + node = node.clone() + parent, node.parent = node.parent, None + suite = Node(syms.suite, [node]) + suite.parent = parent + return suite + +def find_root(node): + """Find the top level namespace.""" + # Scamper up to the top level namespace + while node.type != syms.file_input: + node = node.parent + if not node: + raise ValueError("root found before file_input node was found.") + return node + +def does_tree_import(package, name, node): + """ Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. """ + binding = find_binding(name, find_root(node), package) + return bool(binding) + +def is_import(node): + """Returns true if the node is an import statement.""" + return node.type in (syms.import_name, syms.import_from) + +def touch_import(package, name, node): + """ Works like `does_tree_import` but adds an import statement + if it was not imported. """ + def is_import_stmt(node): + return (node.type == syms.simple_stmt and node.children and + is_import(node.children[0])) + + root = find_root(node) + + if does_tree_import(package, name, root): + return + + # figure out where to insert the new import. First try to find + # the first import and then skip to the last one. + insert_pos = offset = 0 + for idx, node in enumerate(root.children): + if not is_import_stmt(node): + continue + for offset, node2 in enumerate(root.children[idx:]): + if not is_import_stmt(node2): + break + insert_pos = idx + offset + break + + # if there are no imports where we can insert, find the docstring. + # if that also fails, we stick to the beginning of the file + if insert_pos == 0: + for idx, node in enumerate(root.children): + if (node.type == syms.simple_stmt and node.children and + node.children[0].type == token.STRING): + insert_pos = idx + 1 + break + + if package is None: + import_ = Node(syms.import_name, [ + Leaf(token.NAME, "import"), + Leaf(token.NAME, name, prefix=" ") + ]) + else: + import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")]) + + children = [import_, Newline()] + root.insert_child(insert_pos, Node(syms.simple_stmt, children)) + + +_def_syms = {syms.classdef, syms.funcdef} +def find_binding(name, node, package=None): + """ Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.""" + for child in node.children: + ret = None + if child.type == syms.for_stmt: + if _find(name, child.children[1]): + return child + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type in (syms.if_stmt, syms.while_stmt): + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type == syms.try_stmt: + n = find_binding(name, make_suite(child.children[2]), package) + if n: + ret = n + else: + for i, kid in enumerate(child.children[3:]): + if kid.type == token.COLON and kid.value == ":": + # i+3 is the colon, i+4 is the suite + n = find_binding(name, make_suite(child.children[i+4]), package) + if n: ret = n + elif child.type in _def_syms and child.children[1].value == name: + ret = child + elif _is_import_binding(child, name, package): + ret = child + elif child.type == syms.simple_stmt: + ret = find_binding(name, child, package) + elif child.type == syms.expr_stmt: + if _find(name, child.children[0]): + ret = child + + if ret: + if not package: + return ret + if is_import(ret): + return ret + return None + +_block_syms = {syms.funcdef, syms.classdef, syms.trailer} +def _find(name, node): + nodes = [node] + while nodes: + node = nodes.pop() + if node.type > 256 and node.type not in _block_syms: + nodes.extend(node.children) + elif node.type == token.NAME and node.value == name: + return node + return None + +def _is_import_binding(node, name, package=None): + """ Will return node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. """ + + if node.type == syms.import_name and not package: + imp = node.children[1] + if imp.type == syms.dotted_as_names: + for child in imp.children: + if child.type == syms.dotted_as_name: + if child.children[2].value == name: + return node + elif child.type == token.NAME and child.value == name: + return node + elif imp.type == syms.dotted_as_name: + last = imp.children[-1] + if last.type == token.NAME and last.value == name: + return node + elif imp.type == token.NAME and imp.value == name: + return node + elif node.type == syms.import_from: + # str(...) is used to make life easier here, because + # from a.b import parses to ['import', ['a', '.', 'b'], ...] + if package and str(node.children[1]).strip() != package: + return None + n = node.children[3] + if package and _find("as", n): + # See test_from_import_as for explanation + return None + elif n.type == syms.import_as_names and _find(name, n): + return node + elif n.type == syms.import_as_name: + child = n.children[2] + if child.type == token.NAME and child.value == name: + return node + elif n.type == token.NAME and n.value == name: + return node + elif package and n.type == token.STAR: + return node + return None diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_basestring.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_basestring.py new file mode 100644 index 0000000000000000000000000000000000000000..5fe69a0f03b1b885e28e508a8670406c67a4896a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_basestring.py @@ -0,0 +1,14 @@ +"""Fixer for basestring -> str.""" +# Author: Christian Heimes + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixBasestring(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = "'basestring'" + + def transform(self, node, results): + return Name("str", prefix=node.prefix) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_buffer.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a1958ad3b93e502126396e02caca773f1f01fa --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_buffer.py @@ -0,0 +1,22 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes buffer(...) into memoryview(...).""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixBuffer(fixer_base.BaseFix): + BM_compatible = True + + explicit = True # The user must ask for this fixer + + PATTERN = """ + power< name='buffer' trailer< '(' [any] ')' > any* > + """ + + def transform(self, node, results): + name = results["name"] + name.replace(Name("memoryview", prefix=name.prefix)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_dict.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..d3655c9f1b2d9bf82d8ce3a2140cfeff9ba97323 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_dict.py @@ -0,0 +1,106 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for dict methods. + +d.keys() -> list(d.keys()) +d.items() -> list(d.items()) +d.values() -> list(d.values()) + +d.iterkeys() -> iter(d.keys()) +d.iteritems() -> iter(d.items()) +d.itervalues() -> iter(d.values()) + +d.viewkeys() -> d.keys() +d.viewitems() -> d.items() +d.viewvalues() -> d.values() + +Except in certain very specific contexts: the iter() can be dropped +when the context is list(), sorted(), iter() or for...in; the list() +can be dropped when the context is list() or sorted() (but not iter() +or for...in!). Special contexts that apply to both: list(), sorted(), tuple() +set(), any(), all(), sum(). + +Note: iter(d.keys()) could be written as iter(d) but since the +original d.iterkeys() was also redundant we don't fix this. And there +are (rare) contexts where it makes a difference (e.g. when passing it +as an argument to a function that introspects the argument). +""" + +# Local imports +from .. import pytree +from .. import patcomp +from .. import fixer_base +from ..fixer_util import Name, Call, Dot +from .. import fixer_util + + +iter_exempt = fixer_util.consuming_calls | {"iter"} + + +class FixDict(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< head=any+ + trailer< '.' method=('keys'|'items'|'values'| + 'iterkeys'|'iteritems'|'itervalues'| + 'viewkeys'|'viewitems'|'viewvalues') > + parens=trailer< '(' ')' > + tail=any* + > + """ + + def transform(self, node, results): + head = results["head"] + method = results["method"][0] # Extract node for method name + tail = results["tail"] + syms = self.syms + method_name = method.value + isiter = method_name.startswith("iter") + isview = method_name.startswith("view") + if isiter or isview: + method_name = method_name[4:] + assert method_name in ("keys", "items", "values"), repr(method) + head = [n.clone() for n in head] + tail = [n.clone() for n in tail] + special = not tail and self.in_special_context(node, isiter) + args = head + [pytree.Node(syms.trailer, + [Dot(), + Name(method_name, + prefix=method.prefix)]), + results["parens"].clone()] + new = pytree.Node(syms.power, args) + if not (special or isview): + new.prefix = "" + new = Call(Name("iter" if isiter else "list"), [new]) + if tail: + new = pytree.Node(syms.power, [new] + tail) + new.prefix = node.prefix + return new + + P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" + p1 = patcomp.compile_pattern(P1) + + P2 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + """ + p2 = patcomp.compile_pattern(P2) + + def in_special_context(self, node, isiter): + if node.parent is None: + return False + results = {} + if (node.parent.parent is not None and + self.p1.match(node.parent.parent, results) and + results["node"] is node): + if isiter: + # iter(d.iterkeys()) -> iter(d.keys()), etc. + return results["func"].value in iter_exempt + else: + # list(d.keys()) -> list(d.keys()), etc. + return results["func"].value in fixer_util.consuming_calls + if not isiter: + return False + # for ... in d.iterkeys() -> for ... in d.keys(), etc. + return self.p2.match(node.parent, results) and results["node"] is node diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_execfile.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_execfile.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c786fd4e8b6a141ab3619ba2b2576db158fcd4 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_execfile.py @@ -0,0 +1,53 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for execfile. + +This converts usages of the execfile function into calls to the built-in +exec() function. +""" + +from .. import fixer_base +from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node, + ArgList, String, syms) + + +class FixExecfile(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > + | + power< 'execfile' trailer< '(' filename=any ')' > > + """ + + def transform(self, node, results): + assert results + filename = results["filename"] + globals = results.get("globals") + locals = results.get("locals") + + # Copy over the prefix from the right parentheses end of the execfile + # call. + execfile_paren = node.children[-1].children[-1].clone() + # Construct open().read(). + open_args = ArgList([filename.clone(), Comma(), String('"rb"', ' ')], + rparen=execfile_paren) + open_call = Node(syms.power, [Name("open"), open_args]) + read = [Node(syms.trailer, [Dot(), Name('read')]), + Node(syms.trailer, [LParen(), RParen()])] + open_expr = [open_call] + read + # Wrap the open call in a compile call. This is so the filename will be + # preserved in the execed code. + filename_arg = filename.clone() + filename_arg.prefix = " " + exec_str = String("'exec'", " ") + compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str] + compile_call = Call(Name("compile"), compile_args, "") + # Finally, replace the execfile call with an exec call. + args = [compile_call] + if globals is not None: + args.extend([Comma(), globals.clone()]) + if locals is not None: + args.extend([Comma(), locals.clone()]) + return Call(Name("exec"), args, prefix=node.prefix) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_filter.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..38e9078f11ac88f346b7ba2468c69dc5a4f74dd5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_filter.py @@ -0,0 +1,94 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes filter(F, X) into list(filter(F, X)). + +We avoid the transformation if the filter() call is directly contained +in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or +for V in <>:. + +NOTE: This is still not correct if the original code was depending on +filter(F, X) to return a string if X is a string and a tuple if X is a +tuple. That would require type inference, which we don't do. Let +Python 2.6 figure it out. +""" + +# Local imports +from .. import fixer_base +from ..pytree import Node +from ..pygram import python_symbols as syms +from ..fixer_util import Name, ArgList, ListComp, in_special_context, parenthesize + + +class FixFilter(fixer_base.ConditionalFix): + BM_compatible = True + + PATTERN = """ + filter_lambda=power< + 'filter' + trailer< + '(' + arglist< + lambdef< 'lambda' + (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any + > + ',' + it=any + > + ')' + > + [extra_trailers=trailer*] + > + | + power< + 'filter' + trailer< '(' arglist< none='None' ',' seq=any > ')' > + [extra_trailers=trailer*] + > + | + power< + 'filter' + args=trailer< '(' [any] ')' > + [extra_trailers=trailer*] + > + """ + + skip_on = "future_builtins.filter" + + def transform(self, node, results): + if self.should_skip(node): + return + + trailers = [] + if 'extra_trailers' in results: + for t in results['extra_trailers']: + trailers.append(t.clone()) + + if "filter_lambda" in results: + xp = results.get("xp").clone() + if xp.type == syms.test: + xp.prefix = "" + xp = parenthesize(xp) + + new = ListComp(results.get("fp").clone(), + results.get("fp").clone(), + results.get("it").clone(), xp) + new = Node(syms.power, [new] + trailers, prefix="") + + elif "none" in results: + new = ListComp(Name("_f"), + Name("_f"), + results["seq"].clone(), + Name("_f")) + new = Node(syms.power, [new] + trailers, prefix="") + + else: + if in_special_context(node): + return None + + args = results['args'].clone() + new = Node(syms.power, [Name("filter"), args], prefix="") + new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) + new.prefix = "" + new.prefix = node.prefix + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_getcwdu.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_getcwdu.py new file mode 100644 index 0000000000000000000000000000000000000000..087eaedcb26f9cfa139f524a67464154a23d79ac --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_getcwdu.py @@ -0,0 +1,19 @@ +""" +Fixer that changes os.getcwdu() to os.getcwd(). +""" +# Author: Victor Stinner + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixGetcwdu(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< 'os' trailer< dot='.' name='getcwdu' > any* > + """ + + def transform(self, node, results): + name = results["name"] + name.replace(Name("getcwd", prefix=name.prefix)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_idioms.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_idioms.py new file mode 100644 index 0000000000000000000000000000000000000000..6905913d7cb79d2ead79ca7c0c88ea6c9b98d79d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_idioms.py @@ -0,0 +1,152 @@ +"""Adjust some old Python 2 idioms to their modern counterparts. + +* Change some type comparisons to isinstance() calls: + type(x) == T -> isinstance(x, T) + type(x) is T -> isinstance(x, T) + type(x) != T -> not isinstance(x, T) + type(x) is not T -> not isinstance(x, T) + +* Change "while 1:" into "while True:". + +* Change both + + v = list(EXPR) + v.sort() + foo(v) + +and the more general + + v = EXPR + v.sort() + foo(v) + +into + + v = sorted(EXPR) + foo(v) +""" +# Author: Jacques Frechet, Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms + +CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" +TYPE = "power< 'type' trailer< '(' x=any ')' > >" + +class FixIdioms(fixer_base.BaseFix): + explicit = True # The user must ask for this fixer + + PATTERN = r""" + isinstance=comparison< %s %s T=any > + | + isinstance=comparison< T=any %s %s > + | + while_stmt< 'while' while='1' ':' any+ > + | + sorted=any< + any* + simple_stmt< + expr_stmt< id1=any '=' + power< list='list' trailer< '(' (not arglist) any ')' > > + > + '\n' + > + sort= + simple_stmt< + power< id2=any + trailer< '.' 'sort' > trailer< '(' ')' > + > + '\n' + > + next=any* + > + | + sorted=any< + any* + simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > + sort= + simple_stmt< + power< id2=any + trailer< '.' 'sort' > trailer< '(' ')' > + > + '\n' + > + next=any* + > + """ % (TYPE, CMP, CMP, TYPE) + + def match(self, node): + r = super(FixIdioms, self).match(node) + # If we've matched one of the sort/sorted subpatterns above, we + # want to reject matches where the initial assignment and the + # subsequent .sort() call involve different identifiers. + if r and "sorted" in r: + if r["id1"] == r["id2"]: + return r + return None + return r + + def transform(self, node, results): + if "isinstance" in results: + return self.transform_isinstance(node, results) + elif "while" in results: + return self.transform_while(node, results) + elif "sorted" in results: + return self.transform_sort(node, results) + else: + raise RuntimeError("Invalid match") + + def transform_isinstance(self, node, results): + x = results["x"].clone() # The thing inside of type() + T = results["T"].clone() # The type being compared against + x.prefix = "" + T.prefix = " " + test = Call(Name("isinstance"), [x, Comma(), T]) + if "n" in results: + test.prefix = " " + test = Node(syms.not_test, [Name("not"), test]) + test.prefix = node.prefix + return test + + def transform_while(self, node, results): + one = results["while"] + one.replace(Name("True", prefix=one.prefix)) + + def transform_sort(self, node, results): + sort_stmt = results["sort"] + next_stmt = results["next"] + list_call = results.get("list") + simple_expr = results.get("expr") + + if list_call: + list_call.replace(Name("sorted", prefix=list_call.prefix)) + elif simple_expr: + new = simple_expr.clone() + new.prefix = "" + simple_expr.replace(Call(Name("sorted"), [new], + prefix=simple_expr.prefix)) + else: + raise RuntimeError("should not have reached here") + sort_stmt.remove() + + btwn = sort_stmt.prefix + # Keep any prefix lines between the sort_stmt and the list_call and + # shove them right after the sorted() call. + if "\n" in btwn: + if next_stmt: + # The new prefix should be everything from the sort_stmt's + # prefix up to the last newline, then the old prefix after a new + # line. + prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix) + next_stmt[0].prefix = "\n".join(prefix_lines) + else: + assert list_call.parent + assert list_call.next_sibling is None + # Put a blank line after list_call and set its prefix. + end_line = BlankLine() + list_call.parent.append_child(end_line) + assert list_call.next_sibling is end_line + # The new prefix should be everything up to the first new line + # of sort_stmt's prefix. + end_line.prefix = btwn.rpartition("\n")[0] diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_imports.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf4f2f642efb52d50eabdca67527a3cbf60014e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_imports.py @@ -0,0 +1,145 @@ +"""Fix incompatible imports and module references.""" +# Authors: Collin Winter, Nick Edds + +# Local imports +from .. import fixer_base +from ..fixer_util import Name, attr_chain + +MAPPING = {'StringIO': 'io', + 'cStringIO': 'io', + 'cPickle': 'pickle', + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'FileDialog': 'tkinter.filedialog', + 'tkFileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'Tix': 'tkinter.tix', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + # anydbm and whichdb are handled by fix_imports2 + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'SimpleHTTPServer': 'http.server', + 'CGIHTTPServer': 'http.server', + #'test.test_support': 'test.support', + 'commands': 'subprocess', + 'UserString' : 'collections', + 'UserList' : 'collections', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', +} + + +def alternates(members): + return "(" + "|".join(map(repr, members)) + ")" + + +def build_pattern(mapping=MAPPING): + mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) + bare_names = alternates(mapping.keys()) + + yield """name_import=import_name< 'import' ((%s) | + multiple_imports=dotted_as_names< any* (%s) any* >) > + """ % (mod_list, mod_list) + yield """import_from< 'from' (%s) 'import' ['('] + ( any | import_as_name< any 'as' any > | + import_as_names< any* >) [')'] > + """ % mod_list + yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | + multiple_imports=dotted_as_names< + any* dotted_as_name< (%s) 'as' any > any* >) > + """ % (mod_list, mod_list) + + # Find usages of module members in code e.g. thread.foo(bar) + yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names + + +class FixImports(fixer_base.BaseFix): + + BM_compatible = True + keep_line_order = True + # This is overridden in fix_imports2. + mapping = MAPPING + + # We want to run this fixer late, so fix_import doesn't try to make stdlib + # renames into relative imports. + run_order = 6 + + def build_pattern(self): + return "|".join(build_pattern(self.mapping)) + + def compile_pattern(self): + # We override this, so MAPPING can be pragmatically altered and the + # changes will be reflected in PATTERN. + self.PATTERN = self.build_pattern() + super(FixImports, self).compile_pattern() + + # Don't match the node if it's within another match. + def match(self, node): + match = super(FixImports, self).match + results = match(node) + if results: + # Module usage could be in the trailer of an attribute lookup, so we + # might have nested matches when "bare_with_attr" is present. + if "bare_with_attr" not in results and \ + any(match(obj) for obj in attr_chain(node, "parent")): + return False + return results + return False + + def start_tree(self, tree, filename): + super(FixImports, self).start_tree(tree, filename) + self.replace = {} + + def transform(self, node, results): + import_mod = results.get("module_name") + if import_mod: + mod_name = import_mod.value + new_name = self.mapping[mod_name] + import_mod.replace(Name(new_name, prefix=import_mod.prefix)) + if "name_import" in results: + # If it's not a "from x import x, y" or "import x as y" import, + # marked its usage to be replaced. + self.replace[mod_name] = new_name + if "multiple_imports" in results: + # This is a nasty hack to fix multiple imports on a line (e.g., + # "import StringIO, urlparse"). The problem is that I can't + # figure out an easy way to make a pattern recognize the keys of + # MAPPING randomly sprinkled in an import statement. + results = self.match(node) + if results: + self.transform(node, results) + else: + # Replace usage of the module. + bare_name = results["bare_with_attr"][0] + new_name = self.replace.get(bare_name.value) + if new_name: + bare_name.replace(Name(new_name, prefix=bare_name.prefix)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_intern.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_intern.py new file mode 100644 index 0000000000000000000000000000000000000000..d752843092aacd8bc6e84c80f5bc6116563b9d25 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_intern.py @@ -0,0 +1,39 @@ +# Copyright 2006 Georg Brandl. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for intern(). + +intern(s) -> sys.intern(s)""" + +# Local imports +from .. import fixer_base +from ..fixer_util import ImportAndCall, touch_import + + +class FixIntern(fixer_base.BaseFix): + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'intern' + trailer< lpar='(' + ( not(arglist | argument) any ','> ) + rpar=')' > + after=any* + > + """ + + def transform(self, node, results): + if results: + # I feel like we should be able to express this logic in the + # PATTERN above but I don't know how to do it so... + obj = results['obj'] + if obj: + if (obj.type == self.syms.argument and + obj.children[0].value in {'**', '*'}): + return # Make no change. + names = ('sys', 'intern') + new = ImportAndCall(node, results, names) + touch_import(None, 'sys', node) + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..8e78d6c689f4396421414248f15b39e01a844833 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools.py @@ -0,0 +1,43 @@ +""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and + itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) + + imports from itertools are fixed in fix_itertools_import.py + + If itertools is imported as something else (ie: import itertools as it; + it.izip(spam, eggs)) method calls will not get fixed. + """ + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixItertools(fixer_base.BaseFix): + BM_compatible = True + it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" + PATTERN = """ + power< it='itertools' + trailer< + dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > + | + power< func=%(it_funcs)s trailer< '(' [any] ')' > > + """ %(locals()) + + # Needs to be run after fix_(map|zip|filter) + run_order = 6 + + def transform(self, node, results): + prefix = None + func = results['func'][0] + if ('it' in results and + func.value not in ('ifilterfalse', 'izip_longest')): + dot, it = (results['dot'], results['it']) + # Remove the 'itertools' + prefix = it.prefix + it.remove() + # Replace the node which contains ('.', 'function') with the + # function (to be consistent with the second part of the pattern) + dot.remove() + func.parent.replace(func) + + prefix = prefix or func.prefix + func.replace(Name(func.value[1:], prefix=prefix)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..0ddbc7b8422991bacc398d8641091a478ee84f55 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py @@ -0,0 +1,57 @@ +""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ + +# Local imports +from lib2to3 import fixer_base +from lib2to3.fixer_util import BlankLine, syms, token + + +class FixItertoolsImports(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + import_from< 'from' 'itertools' 'import' imports=any > + """ %(locals()) + + def transform(self, node, results): + imports = results['imports'] + if imports.type == syms.import_as_name or not imports.children: + children = [imports] + else: + children = imports.children + for child in children[::2]: + if child.type == token.NAME: + member = child.value + name_node = child + elif child.type == token.STAR: + # Just leave the import as is. + return + else: + assert child.type == syms.import_as_name + name_node = child.children[0] + member_name = name_node.value + if member_name in ('imap', 'izip', 'ifilter'): + child.value = None + child.remove() + elif member_name in ('ifilterfalse', 'izip_longest'): + node.changed() + name_node.value = ('filterfalse' if member_name[1] == 'f' + else 'zip_longest') + + # Make sure the import statement is still sane + children = imports.children[:] or [imports] + remove_comma = True + for child in children: + if remove_comma and child.type == token.COMMA: + child.remove() + else: + remove_comma ^= True + + while children and children[-1].type == token.COMMA: + children.pop().remove() + + # If there are no imports left, just get rid of the entire statement + if (not (imports.children or getattr(imports, 'value', None)) or + imports.parent is None): + p = node.prefix + node = BlankLine() + node.prefix = p + return node diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_map.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_map.py new file mode 100644 index 0000000000000000000000000000000000000000..78cf81c6f94098aad11edf351ff0f31da9cdbccd --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_map.py @@ -0,0 +1,110 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there +exists a 'from future_builtins import map' statement in the top-level +namespace. + +As a special case, map(None, X) is changed into list(X). (This is +necessary because the semantics are changed in this case -- the new +map(None, X) is equivalent to [(x,) for x in X].) + +We avoid the transformation (except for the special case mentioned +above) if the map() call is directly contained in iter(<>), list(<>), +tuple(<>), sorted(<>), ...join(<>), or for V in <>:. + +NOTE: This is still not correct if the original code was depending on +map(F, X, Y, ...) to go on until the longest argument is exhausted, +substituting None for missing values -- like zip(), it now stops as +soon as the shortest argument is exhausted. +""" + +# Local imports +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Name, ArgList, Call, ListComp, in_special_context +from ..pygram import python_symbols as syms +from ..pytree import Node + + +class FixMap(fixer_base.ConditionalFix): + BM_compatible = True + + PATTERN = """ + map_none=power< + 'map' + trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > + [extra_trailers=trailer*] + > + | + map_lambda=power< + 'map' + trailer< + '(' + arglist< + lambdef< 'lambda' + (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any + > + ',' + it=any + > + ')' + > + [extra_trailers=trailer*] + > + | + power< + 'map' args=trailer< '(' [any] ')' > + [extra_trailers=trailer*] + > + """ + + skip_on = 'future_builtins.map' + + def transform(self, node, results): + if self.should_skip(node): + return + + trailers = [] + if 'extra_trailers' in results: + for t in results['extra_trailers']: + trailers.append(t.clone()) + + if node.parent.type == syms.simple_stmt: + self.warning(node, "You should use a for loop here") + new = node.clone() + new.prefix = "" + new = Call(Name("list"), [new]) + elif "map_lambda" in results: + new = ListComp(results["xp"].clone(), + results["fp"].clone(), + results["it"].clone()) + new = Node(syms.power, [new] + trailers, prefix="") + + else: + if "map_none" in results: + new = results["arg"].clone() + new.prefix = "" + else: + if "args" in results: + args = results["args"] + if args.type == syms.trailer and \ + args.children[1].type == syms.arglist and \ + args.children[1].children[0].type == token.NAME and \ + args.children[1].children[0].value == "None": + self.warning(node, "cannot convert map(None, ...) " + "with multiple arguments because map() " + "now truncates to the shortest sequence") + return + + new = Node(syms.power, [Name("map"), args.clone()]) + new.prefix = "" + + if in_special_context(node): + return None + + new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) + new.prefix = "" + + new.prefix = node.prefix + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_ne.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_ne.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ee10f4a63e0c211d2fb430095dfd784d68518e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_ne.py @@ -0,0 +1,23 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that turns <> into !=.""" + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base + + +class FixNe(fixer_base.BaseFix): + # This is so simple that we don't need the pattern compiler. + + _accept_type = token.NOTEQUAL + + def match(self, node): + # Override + return node.value == "<>" + + def transform(self, node, results): + new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix) + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_next.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_next.py new file mode 100644 index 0000000000000000000000000000000000000000..9f6305e1d49dc5327338912f2e6ed0b5794c5062 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_next.py @@ -0,0 +1,103 @@ +"""Fixer for it.next() -> next(it), per PEP 3114.""" +# Author: Collin Winter + +# Things that currently aren't covered: +# - listcomp "next" names aren't warned +# - "with" statement targets aren't checked + +# Local imports +from ..pgen2 import token +from ..pygram import python_symbols as syms +from .. import fixer_base +from ..fixer_util import Name, Call, find_binding + +bind_warning = "Calls to builtin next() possibly shadowed by global binding" + + +class FixNext(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > + | + power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > + | + classdef< 'class' any+ ':' + suite< any* + funcdef< 'def' + name='next' + parameters< '(' NAME ')' > any+ > + any* > > + | + global=global_stmt< 'global' any* 'next' any* > + """ + + order = "pre" # Pre-order tree traversal + + def start_tree(self, tree, filename): + super(FixNext, self).start_tree(tree, filename) + + n = find_binding('next', tree) + if n: + self.warning(n, bind_warning) + self.shadowed_next = True + else: + self.shadowed_next = False + + def transform(self, node, results): + assert results + + base = results.get("base") + attr = results.get("attr") + name = results.get("name") + + if base: + if self.shadowed_next: + attr.replace(Name("__next__", prefix=attr.prefix)) + else: + base = [n.clone() for n in base] + base[0].prefix = "" + node.replace(Call(Name("next", prefix=node.prefix), base)) + elif name: + n = Name("__next__", prefix=name.prefix) + name.replace(n) + elif attr: + # We don't do this transformation if we're assigning to "x.next". + # Unfortunately, it doesn't seem possible to do this in PATTERN, + # so it's being done here. + if is_assign_target(node): + head = results["head"] + if "".join([str(n) for n in head]).strip() == '__builtin__': + self.warning(node, bind_warning) + return + attr.replace(Name("__next__")) + elif "global" in results: + self.warning(node, bind_warning) + self.shadowed_next = True + + +### The following functions help test if node is part of an assignment +### target. + +def is_assign_target(node): + assign = find_assign(node) + if assign is None: + return False + + for child in assign.children: + if child.type == token.EQUAL: + return False + elif is_subtree(child, node): + return True + return False + +def find_assign(node): + if node.type == syms.expr_stmt: + return node + if node.type == syms.simple_stmt or node.parent is None: + return None + return find_assign(node.parent) + +def is_subtree(root, node): + if root == node: + return True + return any(is_subtree(c, node) for c in root.children) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_nonzero.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_nonzero.py new file mode 100644 index 0000000000000000000000000000000000000000..c2295969a7728f78617677b01aac8122fa15539c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_nonzero.py @@ -0,0 +1,21 @@ +"""Fixer for __nonzero__ -> __bool__ methods.""" +# Author: Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixNonzero(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + classdef< 'class' any+ ':' + suite< any* + funcdef< 'def' name='__nonzero__' + parameters< '(' NAME ')' > any+ > + any* > > + """ + + def transform(self, node, results): + name = results["name"] + new = Name("__bool__", prefix=name.prefix) + name.replace(new) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_numliterals.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_numliterals.py new file mode 100644 index 0000000000000000000000000000000000000000..79207d4aa368aee20a2ac9ae1e44072aabb2afd2 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_numliterals.py @@ -0,0 +1,28 @@ +"""Fixer that turns 1L into 1, 0755 into 0o755. +""" +# Copyright 2007 Georg Brandl. +# Licensed to PSF under a Contributor Agreement. + +# Local imports +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Number + + +class FixNumliterals(fixer_base.BaseFix): + # This is so simple that we don't need the pattern compiler. + + _accept_type = token.NUMBER + + def match(self, node): + # Override + return (node.value.startswith("0") or node.value[-1] in "Ll") + + def transform(self, node, results): + val = node.value + if val[-1] in 'Ll': + val = val[:-1] + elif val.startswith('0') and val.isdigit() and len(set(val)) > 1: + val = "0o" + val[1:] + + return Number(val, prefix=node.prefix) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reduce.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reduce.py new file mode 100644 index 0000000000000000000000000000000000000000..00e5aa1c33d4826e976cccee05b8f7398954459f --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reduce.py @@ -0,0 +1,35 @@ +# Copyright 2008 Armin Ronacher. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for reduce(). + +Makes sure reduce() is imported from the functools module if reduce is +used in that module. +""" + +from lib2to3 import fixer_base +from lib2to3.fixer_util import touch_import + + + +class FixReduce(fixer_base.BaseFix): + + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'reduce' + trailer< '(' + arglist< ( + (not(argument) any ',' + not(argument + > + """ + + def transform(self, node, results): + touch_import('functools', 'reduce', node) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reload.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reload.py new file mode 100644 index 0000000000000000000000000000000000000000..b30841131c51f9b6311d1e10629dd9e5049fd6ab --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_reload.py @@ -0,0 +1,36 @@ +"""Fixer for reload(). + +reload(s) -> importlib.reload(s)""" + +# Local imports +from .. import fixer_base +from ..fixer_util import ImportAndCall, touch_import + + +class FixReload(fixer_base.BaseFix): + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'reload' + trailer< lpar='(' + ( not(arglist | argument) any ','> ) + rpar=')' > + after=any* + > + """ + + def transform(self, node, results): + if results: + # I feel like we should be able to express this logic in the + # PATTERN above but I don't know how to do it so... + obj = results['obj'] + if obj: + if (obj.type == self.syms.argument and + obj.children[0].value in {'**', '*'}): + return # Make no change. + names = ('importlib', 'reload') + new = ImportAndCall(node, results, names) + touch_import(None, 'importlib', node) + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_renames.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_renames.py new file mode 100644 index 0000000000000000000000000000000000000000..c0e3705ab7be19c037b833bc43e76eafc21067d8 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_renames.py @@ -0,0 +1,70 @@ +"""Fix incompatible renames + +Fixes: + * sys.maxint -> sys.maxsize +""" +# Author: Christian Heimes +# based on Collin Winter's fix_import + +# Local imports +from .. import fixer_base +from ..fixer_util import Name, attr_chain + +MAPPING = {"sys": {"maxint" : "maxsize"}, + } +LOOKUP = {} + +def alternates(members): + return "(" + "|".join(map(repr, members)) + ")" + + +def build_pattern(): + #bare = set() + for module, replace in list(MAPPING.items()): + for old_attr, new_attr in list(replace.items()): + LOOKUP[(module, old_attr)] = new_attr + #bare.add(module) + #bare.add(old_attr) + #yield """ + # import_name< 'import' (module=%r + # | dotted_as_names< any* module=%r any* >) > + # """ % (module, module) + yield """ + import_from< 'from' module_name=%r 'import' + ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > + """ % (module, old_attr, old_attr) + yield """ + power< module_name=%r trailer< '.' attr_name=%r > any* > + """ % (module, old_attr) + #yield """bare_name=%s""" % alternates(bare) + + +class FixRenames(fixer_base.BaseFix): + BM_compatible = True + PATTERN = "|".join(build_pattern()) + + order = "pre" # Pre-order tree traversal + + # Don't match the node if it's within another match + def match(self, node): + match = super(FixRenames, self).match + results = match(node) + if results: + if any(match(obj) for obj in attr_chain(node, "parent")): + return False + return results + return False + + #def start_tree(self, tree, filename): + # super(FixRenames, self).start_tree(tree, filename) + # self.replace = {} + + def transform(self, node, results): + mod_name = results.get("module_name") + attr_name = results.get("attr_name") + #bare_name = results.get("bare_name") + #import_mod = results.get("module") + + if mod_name and attr_name: + new_attr = LOOKUP[(mod_name.value, attr_name.value)] + attr_name.replace(Name(new_attr, prefix=attr_name.prefix)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_standarderror.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_standarderror.py new file mode 100644 index 0000000000000000000000000000000000000000..dc742167e6e9d4680afb7afcfc64524f4c14aca3 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_standarderror.py @@ -0,0 +1,18 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for StandardError -> Exception.""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixStandarderror(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + 'StandardError' + """ + + def transform(self, node, results): + return Name("Exception", prefix=node.prefix) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_types.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_types.py new file mode 100644 index 0000000000000000000000000000000000000000..67bf51f2f5b85a6a2f6a4c6427a5d9893f0c4350 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_types.py @@ -0,0 +1,61 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for removing uses of the types module. + +These work for only the known names in the types module. The forms above +can include types. or not. ie, It is assumed the module is imported either as: + + import types + from types import ... # either * or specific types + +The import statements are not modified. + +There should be another fixer that handles at least the following constants: + + type([]) -> list + type(()) -> tuple + type('') -> str + +""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +_TYPE_MAPPING = { + 'BooleanType' : 'bool', + 'BufferType' : 'memoryview', + 'ClassType' : 'type', + 'ComplexType' : 'complex', + 'DictType': 'dict', + 'DictionaryType' : 'dict', + 'EllipsisType' : 'type(Ellipsis)', + #'FileType' : 'io.IOBase', + 'FloatType': 'float', + 'IntType': 'int', + 'ListType': 'list', + 'LongType': 'int', + 'ObjectType' : 'object', + 'NoneType': 'type(None)', + 'NotImplementedType' : 'type(NotImplemented)', + 'SliceType' : 'slice', + 'StringType': 'bytes', # XXX ? + 'StringTypes' : '(str,)', # XXX ? + 'TupleType': 'tuple', + 'TypeType' : 'type', + 'UnicodeType': 'str', + 'XRangeType' : 'range', + } + +_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] + +class FixTypes(fixer_base.BaseFix): + BM_compatible = True + PATTERN = '|'.join(_pats) + + def transform(self, node, results): + new_value = _TYPE_MAPPING.get(results["name"].value) + if new_value: + return Name(new_value, prefix=node.prefix) + return None diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_unicode.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..c7982c2b97c3e1cacf5812a9ddd9d20fdda66496 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_unicode.py @@ -0,0 +1,42 @@ +r"""Fixer for unicode. + +* Changes unicode to str and unichr to chr. + +* If "...\u..." is not unicode literal change it into "...\\u...". + +* Change u"..." into "...". + +""" + +from ..pgen2 import token +from .. import fixer_base + +_mapping = {"unichr" : "chr", "unicode" : "str"} + +class FixUnicode(fixer_base.BaseFix): + BM_compatible = True + PATTERN = "STRING | 'unicode' | 'unichr'" + + def start_tree(self, tree, filename): + super(FixUnicode, self).start_tree(tree, filename) + self.unicode_literals = 'unicode_literals' in tree.future_features + + def transform(self, node, results): + if node.type == token.NAME: + new = node.clone() + new.value = _mapping[node.value] + return new + elif node.type == token.STRING: + val = node.value + if not self.unicode_literals and val[0] in '\'"' and '\\' in val: + val = r'\\'.join([ + v.replace('\\u', r'\\u').replace('\\U', r'\\U') + for v in val.split(r'\\') + ]) + if val[0] in 'uU': + val = val[1:] + if val == node.value: + return node + new = node.clone() + new.value = val + return new diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_xreadlines.py b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_xreadlines.py new file mode 100644 index 0000000000000000000000000000000000000000..3e3f71ab045573d9438bcbe5da6115b77dc3d7e9 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/fixes/fix_xreadlines.py @@ -0,0 +1,25 @@ +"""Fix "for x in f.xreadlines()" -> "for x in f". + +This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" +# Author: Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixXreadlines(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > + | + power< any+ trailer< '.' no_call='xreadlines' > > + """ + + def transform(self, node, results): + no_call = results.get("no_call") + + if no_call: + no_call.replace(Name("__iter__", prefix=no_call.prefix)) + else: + node.replace([x.clone() for x in results["call"]]) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/main.py b/evalkit_cambrian/lib/python3.10/lib2to3/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f2849fd6be3fd93d0f0c48768f09270aea77a209 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/main.py @@ -0,0 +1,273 @@ +""" +Main program for 2to3. +""" + +from __future__ import with_statement, print_function + +import sys +import os +import difflib +import logging +import shutil +import optparse + +from . import refactor + + +def diff_texts(a, b, filename): + """Return a unified diff of two strings.""" + a = a.splitlines() + b = b.splitlines() + return difflib.unified_diff(a, b, filename, filename, + "(original)", "(refactored)", + lineterm="") + + +class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): + """ + A refactoring tool that can avoid overwriting its input files. + Prints output to stdout. + + Output files can optionally be written to a different directory and or + have an extra file suffix appended to their name for use in situations + where you do not want to replace the input files. + """ + + def __init__(self, fixers, options, explicit, nobackups, show_diffs, + input_base_dir='', output_dir='', append_suffix=''): + """ + Args: + fixers: A list of fixers to import. + options: A dict with RefactoringTool configuration. + explicit: A list of fixers to run even if they are explicit. + nobackups: If true no backup '.bak' files will be created for those + files that are being refactored. + show_diffs: Should diffs of the refactoring be printed to stdout? + input_base_dir: The base directory for all input files. This class + will strip this path prefix off of filenames before substituting + it with output_dir. Only meaningful if output_dir is supplied. + All files processed by refactor() must start with this path. + output_dir: If supplied, all converted files will be written into + this directory tree instead of input_base_dir. + append_suffix: If supplied, all files output by this tool will have + this appended to their filename. Useful for changing .py to + .py3 for example by passing append_suffix='3'. + """ + self.nobackups = nobackups + self.show_diffs = show_diffs + if input_base_dir and not input_base_dir.endswith(os.sep): + input_base_dir += os.sep + self._input_base_dir = input_base_dir + self._output_dir = output_dir + self._append_suffix = append_suffix + super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) + + def log_error(self, msg, *args, **kwargs): + self.errors.append((msg, args, kwargs)) + self.logger.error(msg, *args, **kwargs) + + def write_file(self, new_text, filename, old_text, encoding): + orig_filename = filename + if self._output_dir: + if filename.startswith(self._input_base_dir): + filename = os.path.join(self._output_dir, + filename[len(self._input_base_dir):]) + else: + raise ValueError('filename %s does not start with the ' + 'input_base_dir %s' % ( + filename, self._input_base_dir)) + if self._append_suffix: + filename += self._append_suffix + if orig_filename != filename: + output_dir = os.path.dirname(filename) + if not os.path.isdir(output_dir) and output_dir: + os.makedirs(output_dir) + self.log_message('Writing converted %s to %s.', orig_filename, + filename) + if not self.nobackups: + # Make backup + backup = filename + ".bak" + if os.path.lexists(backup): + try: + os.remove(backup) + except OSError: + self.log_message("Can't remove backup %s", backup) + try: + os.rename(filename, backup) + except OSError: + self.log_message("Can't rename %s to %s", filename, backup) + # Actually write the new file + write = super(StdoutRefactoringTool, self).write_file + write(new_text, filename, old_text, encoding) + if not self.nobackups: + shutil.copymode(backup, filename) + if orig_filename != filename: + # Preserve the file mode in the new output directory. + shutil.copymode(orig_filename, filename) + + def print_output(self, old, new, filename, equal): + if equal: + self.log_message("No changes to %s", filename) + else: + self.log_message("Refactored %s", filename) + if self.show_diffs: + diff_lines = diff_texts(old, new, filename) + try: + if self.output_lock is not None: + with self.output_lock: + for line in diff_lines: + print(line) + sys.stdout.flush() + else: + for line in diff_lines: + print(line) + except UnicodeEncodeError: + warn("couldn't encode %s's diff for your terminal" % + (filename,)) + return + +def warn(msg): + print("WARNING: %s" % (msg,), file=sys.stderr) + + +def main(fixer_pkg, args=None): + """Main program. + + Args: + fixer_pkg: the name of a package where the fixers are located. + args: optional; a list of command line arguments. If omitted, + sys.argv[1:] is used. + + Returns a suggested exit status (0, 1, 2). + """ + # Set up option parser + parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") + parser.add_option("-d", "--doctests_only", action="store_true", + help="Fix up doctests only") + parser.add_option("-f", "--fix", action="append", default=[], + help="Each FIX specifies a transformation; default: all") + parser.add_option("-j", "--processes", action="store", default=1, + type="int", help="Run 2to3 concurrently") + parser.add_option("-x", "--nofix", action="append", default=[], + help="Prevent a transformation from being run") + parser.add_option("-l", "--list-fixes", action="store_true", + help="List available transformations") + parser.add_option("-p", "--print-function", action="store_true", + help="Modify the grammar so that print() is a function") + parser.add_option("-e", "--exec-function", action="store_true", + help="Modify the grammar so that exec() is a function") + parser.add_option("-v", "--verbose", action="store_true", + help="More verbose logging") + parser.add_option("--no-diffs", action="store_true", + help="Don't show diffs of the refactoring") + parser.add_option("-w", "--write", action="store_true", + help="Write back modified files") + parser.add_option("-n", "--nobackups", action="store_true", default=False, + help="Don't write backups for modified files") + parser.add_option("-o", "--output-dir", action="store", type="str", + default="", help="Put output files in this directory " + "instead of overwriting the input files. Requires -n.") + parser.add_option("-W", "--write-unchanged-files", action="store_true", + help="Also write files even if no changes were required" + " (useful with --output-dir); implies -w.") + parser.add_option("--add-suffix", action="store", type="str", default="", + help="Append this string to all output filenames." + " Requires -n if non-empty. " + "ex: --add-suffix='3' will generate .py3 files.") + + # Parse command line arguments + refactor_stdin = False + flags = {} + options, args = parser.parse_args(args) + if options.write_unchanged_files: + flags["write_unchanged_files"] = True + if not options.write: + warn("--write-unchanged-files/-W implies -w.") + options.write = True + # If we allowed these, the original files would be renamed to backup names + # but not replaced. + if options.output_dir and not options.nobackups: + parser.error("Can't use --output-dir/-o without -n.") + if options.add_suffix and not options.nobackups: + parser.error("Can't use --add-suffix without -n.") + + if not options.write and options.no_diffs: + warn("not writing files and not printing diffs; that's not very useful") + if not options.write and options.nobackups: + parser.error("Can't use -n without -w") + if options.list_fixes: + print("Available transformations for the -f/--fix option:") + for fixname in refactor.get_all_fix_names(fixer_pkg): + print(fixname) + if not args: + return 0 + if not args: + print("At least one file or directory argument required.", file=sys.stderr) + print("Use --help to show usage.", file=sys.stderr) + return 2 + if "-" in args: + refactor_stdin = True + if options.write: + print("Can't write to stdin.", file=sys.stderr) + return 2 + if options.print_function: + flags["print_function"] = True + + if options.exec_function: + flags["exec_function"] = True + + # Set up logging handler + level = logging.DEBUG if options.verbose else logging.INFO + logging.basicConfig(format='%(name)s: %(message)s', level=level) + logger = logging.getLogger('lib2to3.main') + + # Initialize the refactoring tool + avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) + unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) + explicit = set() + if options.fix: + all_present = False + for fix in options.fix: + if fix == "all": + all_present = True + else: + explicit.add(fixer_pkg + ".fix_" + fix) + requested = avail_fixes.union(explicit) if all_present else explicit + else: + requested = avail_fixes.union(explicit) + fixer_names = requested.difference(unwanted_fixes) + input_base_dir = os.path.commonprefix(args) + if (input_base_dir and not input_base_dir.endswith(os.sep) + and not os.path.isdir(input_base_dir)): + # One or more similar names were passed, their directory is the base. + # os.path.commonprefix() is ignorant of path elements, this corrects + # for that weird API. + input_base_dir = os.path.dirname(input_base_dir) + if options.output_dir: + input_base_dir = input_base_dir.rstrip(os.sep) + logger.info('Output in %r will mirror the input directory %r layout.', + options.output_dir, input_base_dir) + rt = StdoutRefactoringTool( + sorted(fixer_names), flags, sorted(explicit), + options.nobackups, not options.no_diffs, + input_base_dir=input_base_dir, + output_dir=options.output_dir, + append_suffix=options.add_suffix) + + # Refactor all files and directories passed as arguments + if not rt.errors: + if refactor_stdin: + rt.refactor_stdin() + else: + try: + rt.refactor(args, options.write, options.doctests_only, + options.processes) + except refactor.MultiprocessingUnsupported: + assert options.processes > 1 + print("Sorry, -j isn't supported on this platform.", + file=sys.stderr) + return 1 + rt.summarize() + + # Return error status (0 if rt.errors is zero) + return int(bool(rt.errors)) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/patcomp.py b/evalkit_cambrian/lib/python3.10/lib2to3/patcomp.py new file mode 100644 index 0000000000000000000000000000000000000000..f57f4954b26ce7f59c44ce7cd67bf966fd594cd5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/patcomp.py @@ -0,0 +1,204 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +""" + +__author__ = "Guido van Rossum " + +# Python imports +import io + +# Fairly local imports +from .pgen2 import driver, literals, token, tokenize, parse, grammar + +# Really local imports +from . import pytree +from . import pygram + + +class PatternSyntaxError(Exception): + pass + + +def tokenize_wrapper(input): + """Tokenizes a string suppressing significant whitespace.""" + skip = {token.NEWLINE, token.INDENT, token.DEDENT} + tokens = tokenize.generate_tokens(io.StringIO(input).readline) + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if type not in skip: + yield quintuple + + +class PatternCompiler(object): + + def __init__(self, grammar_file=None): + """Initializer. + + Takes an optional alternative filename for the pattern grammar. + """ + if grammar_file is None: + self.grammar = pygram.pattern_grammar + self.syms = pygram.pattern_symbols + else: + self.grammar = driver.load_grammar(grammar_file) + self.syms = pygram.Symbols(self.grammar) + self.pygrammar = pygram.python_grammar + self.pysyms = pygram.python_symbols + self.driver = driver.Driver(self.grammar, convert=pattern_convert) + + def compile_pattern(self, input, debug=False, with_tree=False): + """Compiles a pattern string to a nested pytree.*Pattern object.""" + tokens = tokenize_wrapper(input) + try: + root = self.driver.parse_tokens(tokens, debug=debug) + except parse.ParseError as e: + raise PatternSyntaxError(str(e)) from None + if with_tree: + return self.compile_node(root), root + else: + return self.compile_node(root) + + def compile_node(self, node): + """Compiles a node, recursively. + + This is one big switch on the node type. + """ + # XXX Optimize certain Wildcard-containing-Wildcard patterns + # that can be merged + if node.type == self.syms.Matcher: + node = node.children[0] # Avoid unneeded recursion + + if node.type == self.syms.Alternatives: + # Skip the odd children since they are just '|' tokens + alts = [self.compile_node(ch) for ch in node.children[::2]] + if len(alts) == 1: + return alts[0] + p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) + return p.optimize() + + if node.type == self.syms.Alternative: + units = [self.compile_node(ch) for ch in node.children] + if len(units) == 1: + return units[0] + p = pytree.WildcardPattern([units], min=1, max=1) + return p.optimize() + + if node.type == self.syms.NegatedUnit: + pattern = self.compile_basic(node.children[1:]) + p = pytree.NegatedPattern(pattern) + return p.optimize() + + assert node.type == self.syms.Unit + + name = None + nodes = node.children + if len(nodes) >= 3 and nodes[1].type == token.EQUAL: + name = nodes[0].value + nodes = nodes[2:] + repeat = None + if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: + repeat = nodes[-1] + nodes = nodes[:-1] + + # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] + pattern = self.compile_basic(nodes, repeat) + + if repeat is not None: + assert repeat.type == self.syms.Repeater + children = repeat.children + child = children[0] + if child.type == token.STAR: + min = 0 + max = pytree.HUGE + elif child.type == token.PLUS: + min = 1 + max = pytree.HUGE + elif child.type == token.LBRACE: + assert children[-1].type == token.RBRACE + assert len(children) in (3, 5) + min = max = self.get_int(children[1]) + if len(children) == 5: + max = self.get_int(children[3]) + else: + assert False + if min != 1 or max != 1: + pattern = pattern.optimize() + pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) + + if name is not None: + pattern.name = name + return pattern.optimize() + + def compile_basic(self, nodes, repeat=None): + # Compile STRING | NAME [Details] | (...) | [...] + assert len(nodes) >= 1 + node = nodes[0] + if node.type == token.STRING: + value = str(literals.evalString(node.value)) + return pytree.LeafPattern(_type_of_literal(value), value) + elif node.type == token.NAME: + value = node.value + if value.isupper(): + if value not in TOKEN_MAP: + raise PatternSyntaxError("Invalid token: %r" % value) + if nodes[1:]: + raise PatternSyntaxError("Can't have details for token") + return pytree.LeafPattern(TOKEN_MAP[value]) + else: + if value == "any": + type = None + elif not value.startswith("_"): + type = getattr(self.pysyms, value, None) + if type is None: + raise PatternSyntaxError("Invalid symbol: %r" % value) + if nodes[1:]: # Details present + content = [self.compile_node(nodes[1].children[1])] + else: + content = None + return pytree.NodePattern(type, content) + elif node.value == "(": + return self.compile_node(nodes[1]) + elif node.value == "[": + assert repeat is None + subpattern = self.compile_node(nodes[1]) + return pytree.WildcardPattern([[subpattern]], min=0, max=1) + assert False, node + + def get_int(self, node): + assert node.type == token.NUMBER + return int(node.value) + + +# Map named tokens to the type value for a LeafPattern +TOKEN_MAP = {"NAME": token.NAME, + "STRING": token.STRING, + "NUMBER": token.NUMBER, + "TOKEN": None} + + +def _type_of_literal(value): + if value[0].isalpha(): + return token.NAME + elif value in grammar.opmap: + return grammar.opmap[value] + else: + return None + + +def pattern_convert(grammar, raw_node_info): + """Converts raw node information to a Node or Leaf instance.""" + type, value, context, children = raw_node_info + if children or type in grammar.number2symbol: + return pytree.Node(type, children, context=context) + else: + return pytree.Leaf(type, value, context=context) + + +def compile_pattern(pattern): + return PatternCompiler().compile_pattern(pattern) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/pygram.py b/evalkit_cambrian/lib/python3.10/lib2to3/pygram.py new file mode 100644 index 0000000000000000000000000000000000000000..24d9db9217f131adf50f6eca7d571ce69cbcbc3e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/pygram.py @@ -0,0 +1,43 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Export the Python grammar and symbols.""" + +# Python imports +import os + +# Local imports +from .pgen2 import token +from .pgen2 import driver +from . import pytree + +# The grammar file +_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") +_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), + "PatternGrammar.txt") + + +class Symbols(object): + + def __init__(self, grammar): + """Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + """ + for name, symbol in grammar.symbol2number.items(): + setattr(self, name, symbol) + + +python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) + +python_symbols = Symbols(python_grammar) + +python_grammar_no_print_statement = python_grammar.copy() +del python_grammar_no_print_statement.keywords["print"] + +python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy() +del python_grammar_no_print_and_exec_statement.keywords["exec"] + +pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) +pattern_symbols = Symbols(pattern_grammar) diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/pytree.py b/evalkit_cambrian/lib/python3.10/lib2to3/pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..729023df0284e15ad8d7ac9f30cf7ed8d42db272 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/pytree.py @@ -0,0 +1,853 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +""" +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +""" + +__author__ = "Guido van Rossum " + +import sys +from io import StringIO + +HUGE = 0x7FFFFFFF # maximum repeat count, default max + +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) + +class Base(object): + + """ + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + was_checked = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """ + Compare two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + __hash__ = None # For Py3 compatibility. + + def _eq(self, other): + """ + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + """ + raise NotImplementedError + + def clone(self): + """ + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def post_order(self): + """ + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def pre_order(self): + """ + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def replace(self, new): + """Replace this node with a new one in the parent.""" + assert self.parent is not None, str(self) + assert new is not None + if not isinstance(new, list): + new = [new] + l_children = [] + found = False + for ch in self.parent.children: + if ch is self: + assert not found, (self.parent.children, self, new) + if new is not None: + l_children.extend(new) + found = True + else: + l_children.append(ch) + assert found, (self.children, self, new) + self.parent.changed() + self.parent.children = l_children + for x in new: + x.parent = self.parent + self.parent = None + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def changed(self): + if self.parent: + self.parent.changed() + self.was_changed = True + + def remove(self): + """ + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + """ + if self.parent: + for i, node in enumerate(self.parent.children): + if node is self: + self.parent.changed() + del self.parent.children[i] + self.parent = None + return i + + @property + def next_sibling(self): + """ + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + @property + def prev_sibling(self): + """ + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def leaves(self): + for child in self.children: + yield from child.leaves() + + def depth(self): + if self.parent is None: + return 0 + return 1 + self.parent.depth() + + def get_suffix(self): + """ + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + """ + next_sib = self.next_sibling + if next_sib is None: + return "" + return next_sib.prefix + + if sys.version_info < (3, 0): + def __str__(self): + return str(self).encode("ascii") + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self,type, children, + context=None, + prefix=None, + fixers_applied=None): + """ + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.prefix = prefix + if fixers_applied: + self.fixers_applied = fixers_applied[:] + else: + self.fixers_applied = None + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%s, %r)" % (self.__class__.__name__, + type_repr(self.type), + self.children) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return "".join(map(str, self.children)) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Node(self.type, [ch.clone() for ch in self.children], + fixers_applied=self.fixers_applied) + + def post_order(self): + """Return a post-order iterator for the tree.""" + for child in self.children: + yield from child.post_order() + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + for child in self.children: + yield from child.pre_order() + + @property + def prefix(self): + """ + The whitespace and comments preceding this node in the input. + """ + if not self.children: + return "" + return self.children[0].prefix + + @prefix.setter + def prefix(self, prefix): + if self.children: + self.children[0].prefix = prefix + + def set_child(self, i, child): + """ + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children[i].parent = None + self.children[i] = child + self.changed() + + def insert_child(self, i, child): + """ + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + """ + child.parent = self + self.children.insert(i, child) + self.changed() + + def append_child(self, child): + """ + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children.append(child) + self.changed() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + _prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, + context=None, + prefix=None, + fixers_applied=[]): + """ + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self._prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self._prefix = prefix + self.fixers_applied = fixers_applied[:] + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, + self.value) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return self.prefix + str(self.value) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Leaf(self.type, self.value, + (self.prefix, (self.lineno, self.column)), + fixers_applied=self.fixers_applied) + + def leaves(self): + yield self + + def post_order(self): + """Return a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + + @property + def prefix(self): + """ + The whitespace and comments preceding this token in the input. + """ + return self._prefix + + @prefix.setter + def prefix(self, prefix): + self.changed() + self._prefix = prefix + +def convert(gr, raw_node): + """ + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + type, value, context, children = raw_node + if children or type in gr.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +class BasePattern(object): + + """ + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + """ + + # Defaults for instance variables + type = None # Node type (token if < 256, symbol if >= 256) + content = None # Optional content matching pattern + name = None # Optional name used to store match in results dict + + def __new__(cls, *args, **kwds): + """Constructor that prevents BasePattern from being instantiated.""" + assert cls is not BasePattern, "Cannot instantiate BasePattern" + return object.__new__(cls) + + def __repr__(self): + args = [type_repr(self.type), self.content, self.name] + while args and args[-1] is None: + del args[-1] + return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + + def optimize(self): + """ + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + """ + return self + + def match(self, node, results=None): + """ + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + """ + if self.type is not None and node.type != self.type: + return False + if self.content is not None: + r = None + if results is not None: + r = {} + if not self._submatch(node, r): + return False + if r: + results.update(r) + if results is not None and self.name: + results[self.name] = node + return True + + def match_seq(self, nodes, results=None): + """ + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + """ + if len(nodes) != 1: + return False + return self.match(nodes[0], results) + + def generate_matches(self, nodes): + """ + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + """ + r = {} + if nodes and self.match(nodes[0], r): + yield 1, r + + +class LeafPattern(BasePattern): + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert 0 <= type < 256, type + if content is not None: + assert isinstance(content, str), repr(content) + self.type = type + self.content = content + self.name = name + + def match(self, node, results=None): + """Override match() to insist on a leaf node.""" + if not isinstance(node, Leaf): + return False + return BasePattern.match(self, node, results) + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + return self.content == node.value + + +class NodePattern(BasePattern): + + wildcards = False + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert type >= 256, type + if content is not None: + assert not isinstance(content, str), repr(content) + content = list(content) + for i, item in enumerate(content): + assert isinstance(item, BasePattern), (i, item) + if isinstance(item, WildcardPattern): + self.wildcards = True + self.type = type + self.content = content + self.name = name + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + if self.wildcards: + for c, r in generate_matches(self.content, node.children): + if c == len(node.children): + if results is not None: + results.update(r) + return True + return False + if len(self.content) != len(node.children): + return False + for subpattern, child in zip(self.content, node.children): + if not subpattern.match(child, results): + return False + return True + + +class WildcardPattern(BasePattern): + + """ + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + """ + + def __init__(self, content=None, min=0, max=HUGE, name=None): + """ + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + """ + assert 0 <= min <= max <= HUGE, (min, max) + if content is not None: + content = tuple(map(tuple, content)) # Protect against alterations + # Check sanity of alternatives + assert len(content), repr(content) # Can't have zero alternatives + for alt in content: + assert len(alt), repr(alt) # Can have empty alternatives + self.content = content + self.min = min + self.max = max + self.name = name + + def optimize(self): + """Optimize certain stacked wildcard patterns.""" + subpattern = None + if (self.content is not None and + len(self.content) == 1 and len(self.content[0]) == 1): + subpattern = self.content[0][0] + if self.min == 1 and self.max == 1: + if self.content is None: + return NodePattern(name=self.name) + if subpattern is not None and self.name == subpattern.name: + return subpattern.optimize() + if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and + subpattern.min <= 1 and self.name == subpattern.name): + return WildcardPattern(subpattern.content, + self.min*subpattern.min, + self.max*subpattern.max, + subpattern.name) + return self + + def match(self, node, results=None): + """Does this pattern exactly match a node?""" + return self.match_seq([node], results) + + def match_seq(self, nodes, results=None): + """Does this pattern exactly match a sequence of nodes?""" + for c, r in self.generate_matches(nodes): + if c == len(nodes): + if results is not None: + results.update(r) + if self.name: + results[self.name] = list(nodes) + return True + return False + + def generate_matches(self, nodes): + """ + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + """ + if self.content is None: + # Shortcut for special case (see __init__.__doc__) + for count in range(self.min, 1 + min(len(nodes), self.max)): + r = {} + if self.name: + r[self.name] = nodes[:count] + yield count, r + elif self.name == "bare_name": + yield self._bare_name_matches(nodes) + else: + # The reason for this is that hitting the recursion limit usually + # results in some ugly messages about how RuntimeErrors are being + # ignored. We only have to do this on CPython, though, because other + # implementations don't have this nasty bug in the first place. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() + try: + for count, r in self._recursive_matches(nodes, 0): + if self.name: + r[self.name] = nodes[:count] + yield count, r + except RuntimeError: + # Fall back to the iterative pattern matching scheme if the + # recursive scheme hits the recursion limit (RecursionError). + for count, r in self._iterative_matches(nodes): + if self.name: + r[self.name] = nodes[:count] + yield count, r + finally: + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr + + def _iterative_matches(self, nodes): + """Helper to iteratively yield the matches.""" + nodelen = len(nodes) + if 0 >= self.min: + yield 0, {} + + results = [] + # generate matches that use just one alt from self.content + for alt in self.content: + for c, r in generate_matches(alt, nodes): + yield c, r + results.append((c, r)) + + # for each match, iterate down the nodes + while results: + new_results = [] + for c0, r0 in results: + # stop if the entire set of nodes has been matched + if c0 < nodelen and c0 <= self.max: + for alt in self.content: + for c1, r1 in generate_matches(alt, nodes[c0:]): + if c1 > 0: + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + new_results.append((c0 + c1, r)) + results = new_results + + def _bare_name_matches(self, nodes): + """Special optimized matcher for bare_name.""" + count = 0 + r = {} + done = False + max = len(nodes) + while not done and count < max: + done = True + for leaf in self.content: + if leaf[0].match(nodes[count], r): + count += 1 + done = False + break + r[self.name] = nodes[:count] + return count, r + + def _recursive_matches(self, nodes, count): + """Helper to recursively yield the matches.""" + assert self.content is not None + if count >= self.min: + yield 0, {} + if count < self.max: + for alt in self.content: + for c0, r0 in generate_matches(alt, nodes): + for c1, r1 in self._recursive_matches(nodes[c0:], count+1): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + + +class NegatedPattern(BasePattern): + + def __init__(self, content=None): + """ + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + """ + if content is not None: + assert isinstance(content, BasePattern), repr(content) + self.content = content + + def match(self, node): + # We never match a node in its entirety + return False + + def match_seq(self, nodes): + # We only match an empty sequence of nodes in its entirety + return len(nodes) == 0 + + def generate_matches(self, nodes): + if self.content is None: + # Return a match if there is an empty sequence + if len(nodes) == 0: + yield 0, {} + else: + # Return a match if the argument pattern has no matches + for c, r in self.content.generate_matches(nodes): + return + yield 0, {} + + +def generate_matches(patterns, nodes): + """ + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + """ + if not patterns: + yield 0, {} + else: + p, rest = patterns[0], patterns[1:] + for c0, r0 in p.generate_matches(nodes): + if not rest: + yield c0, r0 + else: + for c1, r1 in generate_matches(rest, nodes[c0:]): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r diff --git a/evalkit_cambrian/lib/python3.10/lib2to3/refactor.py b/evalkit_cambrian/lib/python3.10/lib2to3/refactor.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5aafffc6df07462dac0829ccfc2eb51c6938e5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/lib2to3/refactor.py @@ -0,0 +1,732 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Refactoring framework. + +Used as a main program, this can refactor any number of files and/or +recursively descend down directories. Imported as a module, this +provides infrastructure to write your own refactoring tool. +""" + +__author__ = "Guido van Rossum " + + +# Python imports +import io +import os +import pkgutil +import sys +import logging +import operator +import collections +from itertools import chain + +# Local imports +from .pgen2 import driver, tokenize, token +from .fixer_util import find_root +from . import pytree, pygram +from . import btm_matcher as bm + + +def get_all_fix_names(fixer_pkg, remove_prefix=True): + """Return a sorted list of all available fix names in the given package.""" + pkg = __import__(fixer_pkg, [], [], ["*"]) + fix_names = [] + for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__): + if name.startswith("fix_"): + if remove_prefix: + name = name[4:] + fix_names.append(name) + return fix_names + + +class _EveryNode(Exception): + pass + + +def _get_head_types(pat): + """ Accepts a pytree Pattern Node and returns a set + of the pattern types which will match first. """ + + if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): + # NodePatters must either have no type and no content + # or a type and content -- so they don't get any farther + # Always return leafs + if pat.type is None: + raise _EveryNode + return {pat.type} + + if isinstance(pat, pytree.NegatedPattern): + if pat.content: + return _get_head_types(pat.content) + raise _EveryNode # Negated Patterns don't have a type + + if isinstance(pat, pytree.WildcardPattern): + # Recurse on each node in content + r = set() + for p in pat.content: + for x in p: + r.update(_get_head_types(x)) + return r + + raise Exception("Oh no! I don't understand pattern %s" %(pat)) + + +def _get_headnode_dict(fixer_list): + """ Accepts a list of fixers and returns a dictionary + of head node type --> fixer list. """ + head_nodes = collections.defaultdict(list) + every = [] + for fixer in fixer_list: + if fixer.pattern: + try: + heads = _get_head_types(fixer.pattern) + except _EveryNode: + every.append(fixer) + else: + for node_type in heads: + head_nodes[node_type].append(fixer) + else: + if fixer._accept_type is not None: + head_nodes[fixer._accept_type].append(fixer) + else: + every.append(fixer) + for node_type in chain(pygram.python_grammar.symbol2number.values(), + pygram.python_grammar.tokens): + head_nodes[node_type].extend(every) + return dict(head_nodes) + + +def get_fixers_from_package(pkg_name): + """ + Return the fully qualified names for fixers in the package pkg_name. + """ + return [pkg_name + "." + fix_name + for fix_name in get_all_fix_names(pkg_name, False)] + +def _identity(obj): + return obj + + +def _detect_future_features(source): + have_docstring = False + gen = tokenize.generate_tokens(io.StringIO(source).readline) + def advance(): + tok = next(gen) + return tok[0], tok[1] + ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT}) + features = set() + try: + while True: + tp, value = advance() + if tp in ignore: + continue + elif tp == token.STRING: + if have_docstring: + break + have_docstring = True + elif tp == token.NAME and value == "from": + tp, value = advance() + if tp != token.NAME or value != "__future__": + break + tp, value = advance() + if tp != token.NAME or value != "import": + break + tp, value = advance() + if tp == token.OP and value == "(": + tp, value = advance() + while tp == token.NAME: + features.add(value) + tp, value = advance() + if tp != token.OP or value != ",": + break + tp, value = advance() + else: + break + except StopIteration: + pass + return frozenset(features) + + +class FixerError(Exception): + """A fixer could not be loaded.""" + + +class RefactoringTool(object): + + _default_options = {"print_function" : False, + "exec_function": False, + "write_unchanged_files" : False} + + CLASS_PREFIX = "Fix" # The prefix for fixer classes + FILE_PREFIX = "fix_" # The prefix for modules with a fixer within + + def __init__(self, fixer_names, options=None, explicit=None): + """Initializer. + + Args: + fixer_names: a list of fixers to import + options: a dict with configuration. + explicit: a list of fixers to run even if they are explicit. + """ + self.fixers = fixer_names + self.explicit = explicit or [] + self.options = self._default_options.copy() + if options is not None: + self.options.update(options) + self.grammar = pygram.python_grammar.copy() + + if self.options['print_function']: + del self.grammar.keywords["print"] + elif self.options['exec_function']: + del self.grammar.keywords["exec"] + + # When this is True, the refactor*() methods will call write_file() for + # files processed even if they were not changed during refactoring. If + # and only if the refactor method's write parameter was True. + self.write_unchanged_files = self.options.get("write_unchanged_files") + self.errors = [] + self.logger = logging.getLogger("RefactoringTool") + self.fixer_log = [] + self.wrote = False + self.driver = driver.Driver(self.grammar, + convert=pytree.convert, + logger=self.logger) + self.pre_order, self.post_order = self.get_fixers() + + + self.files = [] # List of files that were or should be modified + + self.BM = bm.BottomMatcher() + self.bmi_pre_order = [] # Bottom Matcher incompatible fixers + self.bmi_post_order = [] + + for fixer in chain(self.post_order, self.pre_order): + if fixer.BM_compatible: + self.BM.add_fixer(fixer) + # remove fixers that will be handled by the bottom-up + # matcher + elif fixer in self.pre_order: + self.bmi_pre_order.append(fixer) + elif fixer in self.post_order: + self.bmi_post_order.append(fixer) + + self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) + self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) + + + + def get_fixers(self): + """Inspects the options to load the requested patterns and handlers. + + Returns: + (pre_order, post_order), where pre_order is the list of fixers that + want a pre-order AST traversal, and post_order is the list that want + post-order traversal. + """ + pre_order_fixers = [] + post_order_fixers = [] + for fix_mod_path in self.fixers: + mod = __import__(fix_mod_path, {}, {}, ["*"]) + fix_name = fix_mod_path.rsplit(".", 1)[-1] + if fix_name.startswith(self.FILE_PREFIX): + fix_name = fix_name[len(self.FILE_PREFIX):] + parts = fix_name.split("_") + class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) + try: + fix_class = getattr(mod, class_name) + except AttributeError: + raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None + fixer = fix_class(self.options, self.fixer_log) + if fixer.explicit and self.explicit is not True and \ + fix_mod_path not in self.explicit: + self.log_message("Skipping optional fixer: %s", fix_name) + continue + + self.log_debug("Adding transformation: %s", fix_name) + if fixer.order == "pre": + pre_order_fixers.append(fixer) + elif fixer.order == "post": + post_order_fixers.append(fixer) + else: + raise FixerError("Illegal fixer order: %r" % fixer.order) + + key_func = operator.attrgetter("run_order") + pre_order_fixers.sort(key=key_func) + post_order_fixers.sort(key=key_func) + return (pre_order_fixers, post_order_fixers) + + def log_error(self, msg, *args, **kwds): + """Called when an error occurs.""" + raise + + def log_message(self, msg, *args): + """Hook to log a message.""" + if args: + msg = msg % args + self.logger.info(msg) + + def log_debug(self, msg, *args): + if args: + msg = msg % args + self.logger.debug(msg) + + def print_output(self, old_text, new_text, filename, equal): + """Called with the old version, new version, and filename of a + refactored file.""" + pass + + def refactor(self, items, write=False, doctests_only=False): + """Refactor a list of files and directories.""" + + for dir_or_file in items: + if os.path.isdir(dir_or_file): + self.refactor_dir(dir_or_file, write, doctests_only) + else: + self.refactor_file(dir_or_file, write, doctests_only) + + def refactor_dir(self, dir_name, write=False, doctests_only=False): + """Descends down a directory and refactor every Python file found. + + Python files are assumed to have a .py extension. + + Files and subdirectories starting with '.' are skipped. + """ + py_ext = os.extsep + "py" + for dirpath, dirnames, filenames in os.walk(dir_name): + self.log_debug("Descending into %s", dirpath) + dirnames.sort() + filenames.sort() + for name in filenames: + if (not name.startswith(".") and + os.path.splitext(name)[1] == py_ext): + fullname = os.path.join(dirpath, name) + self.refactor_file(fullname, write, doctests_only) + # Modify dirnames in-place to remove subdirs with leading dots + dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] + + def _read_python_source(self, filename): + """ + Do our best to decode a Python source file correctly. + """ + try: + f = open(filename, "rb") + except OSError as err: + self.log_error("Can't open %s: %s", filename, err) + return None, None + try: + encoding = tokenize.detect_encoding(f.readline)[0] + finally: + f.close() + with io.open(filename, "r", encoding=encoding, newline='') as f: + return f.read(), encoding + + def refactor_file(self, filename, write=False, doctests_only=False): + """Refactors a file.""" + input, encoding = self._read_python_source(filename) + if input is None: + # Reading the file failed. + return + input += "\n" # Silence certain parse errors + if doctests_only: + self.log_debug("Refactoring doctests in %s", filename) + output = self.refactor_docstring(input, filename) + if self.write_unchanged_files or output != input: + self.processed_file(output, filename, input, write, encoding) + else: + self.log_debug("No doctest changes in %s", filename) + else: + tree = self.refactor_string(input, filename) + if self.write_unchanged_files or (tree and tree.was_changed): + # The [:-1] is to take off the \n we added earlier + self.processed_file(str(tree)[:-1], filename, + write=write, encoding=encoding) + else: + self.log_debug("No changes in %s", filename) + + def refactor_string(self, data, name): + """Refactor a given input string. + + Args: + data: a string holding the code to be refactored. + name: a human-readable name for use in error/log messages. + + Returns: + An AST corresponding to the refactored input stream; None if + there were errors during the parse. + """ + features = _detect_future_features(data) + if "print_function" in features: + self.driver.grammar = pygram.python_grammar_no_print_statement + try: + tree = self.driver.parse_string(data) + except Exception as err: + self.log_error("Can't parse %s: %s: %s", + name, err.__class__.__name__, err) + return + finally: + self.driver.grammar = self.grammar + tree.future_features = features + self.log_debug("Refactoring %s", name) + self.refactor_tree(tree, name) + return tree + + def refactor_stdin(self, doctests_only=False): + input = sys.stdin.read() + if doctests_only: + self.log_debug("Refactoring doctests in stdin") + output = self.refactor_docstring(input, "") + if self.write_unchanged_files or output != input: + self.processed_file(output, "", input) + else: + self.log_debug("No doctest changes in stdin") + else: + tree = self.refactor_string(input, "") + if self.write_unchanged_files or (tree and tree.was_changed): + self.processed_file(str(tree), "", input) + else: + self.log_debug("No changes in stdin") + + def refactor_tree(self, tree, name): + """Refactors a parse tree (modifying the tree in place). + + For compatible patterns the bottom matcher module is + used. Otherwise the tree is traversed node-to-node for + matches. + + Args: + tree: a pytree.Node instance representing the root of the tree + to be refactored. + name: a human-readable name for this tree. + + Returns: + True if the tree was modified, False otherwise. + """ + + for fixer in chain(self.pre_order, self.post_order): + fixer.start_tree(tree, name) + + #use traditional matching for the incompatible fixers + self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) + self.traverse_by(self.bmi_post_order_heads, tree.post_order()) + + # obtain a set of candidate nodes + match_set = self.BM.run(tree.leaves()) + + while any(match_set.values()): + for fixer in self.BM.fixers: + if fixer in match_set and match_set[fixer]: + #sort by depth; apply fixers from bottom(of the AST) to top + match_set[fixer].sort(key=pytree.Base.depth, reverse=True) + + if fixer.keep_line_order: + #some fixers(eg fix_imports) must be applied + #with the original file's line order + match_set[fixer].sort(key=pytree.Base.get_lineno) + + for node in list(match_set[fixer]): + if node in match_set[fixer]: + match_set[fixer].remove(node) + + try: + find_root(node) + except ValueError: + # this node has been cut off from a + # previous transformation ; skip + continue + + if node.fixers_applied and fixer in node.fixers_applied: + # do not apply the same fixer again + continue + + results = fixer.match(node) + + if results: + new = fixer.transform(node, results) + if new is not None: + node.replace(new) + #new.fixers_applied.append(fixer) + for node in new.post_order(): + # do not apply the fixer again to + # this or any subnode + if not node.fixers_applied: + node.fixers_applied = [] + node.fixers_applied.append(fixer) + + # update the original match set for + # the added code + new_matches = self.BM.run(new.leaves()) + for fxr in new_matches: + if not fxr in match_set: + match_set[fxr]=[] + + match_set[fxr].extend(new_matches[fxr]) + + for fixer in chain(self.pre_order, self.post_order): + fixer.finish_tree(tree, name) + return tree.was_changed + + def traverse_by(self, fixers, traversal): + """Traverse an AST, applying a set of fixers to each node. + + This is a helper method for refactor_tree(). + + Args: + fixers: a list of fixer instances. + traversal: a generator that yields AST nodes. + + Returns: + None + """ + if not fixers: + return + for node in traversal: + for fixer in fixers[node.type]: + results = fixer.match(node) + if results: + new = fixer.transform(node, results) + if new is not None: + node.replace(new) + node = new + + def processed_file(self, new_text, filename, old_text=None, write=False, + encoding=None): + """ + Called when a file has been refactored and there may be changes. + """ + self.files.append(filename) + if old_text is None: + old_text = self._read_python_source(filename)[0] + if old_text is None: + return + equal = old_text == new_text + self.print_output(old_text, new_text, filename, equal) + if equal: + self.log_debug("No changes to %s", filename) + if not self.write_unchanged_files: + return + if write: + self.write_file(new_text, filename, old_text, encoding) + else: + self.log_debug("Not writing changes to %s", filename) + + def write_file(self, new_text, filename, old_text, encoding=None): + """Writes a string to a file. + + It first shows a unified diff between the old text and the new text, and + then rewrites the file; the latter is only done if the write option is + set. + """ + try: + fp = io.open(filename, "w", encoding=encoding, newline='') + except OSError as err: + self.log_error("Can't create %s: %s", filename, err) + return + + with fp: + try: + fp.write(new_text) + except OSError as err: + self.log_error("Can't write %s: %s", filename, err) + self.log_debug("Wrote changes to %s", filename) + self.wrote = True + + PS1 = ">>> " + PS2 = "... " + + def refactor_docstring(self, input, filename): + """Refactors a docstring, looking for doctests. + + This returns a modified version of the input string. It looks + for doctests, which start with a ">>>" prompt, and may be + continued with "..." prompts, as long as the "..." is indented + the same as the ">>>". + + (Unfortunately we can't use the doctest module's parser, + since, like most parsers, it is not geared towards preserving + the original source.) + """ + result = [] + block = None + block_lineno = None + indent = None + lineno = 0 + for line in input.splitlines(keepends=True): + lineno += 1 + if line.lstrip().startswith(self.PS1): + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + block_lineno = lineno + block = [line] + i = line.find(self.PS1) + indent = line[:i] + elif (indent is not None and + (line.startswith(indent + self.PS2) or + line == indent + self.PS2.rstrip() + "\n")): + block.append(line) + else: + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + block = None + indent = None + result.append(line) + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + return "".join(result) + + def refactor_doctest(self, block, lineno, indent, filename): + """Refactors one doctest. + + A doctest is given as a block of lines, the first of which starts + with ">>>" (possibly indented), while the remaining lines start + with "..." (identically indented). + + """ + try: + tree = self.parse_block(block, lineno, indent) + except Exception as err: + if self.logger.isEnabledFor(logging.DEBUG): + for line in block: + self.log_debug("Source: %s", line.rstrip("\n")) + self.log_error("Can't parse docstring in %s line %s: %s: %s", + filename, lineno, err.__class__.__name__, err) + return block + if self.refactor_tree(tree, filename): + new = str(tree).splitlines(keepends=True) + # Undo the adjustment of the line numbers in wrap_toks() below. + clipped, new = new[:lineno-1], new[lineno-1:] + assert clipped == ["\n"] * (lineno-1), clipped + if not new[-1].endswith("\n"): + new[-1] += "\n" + block = [indent + self.PS1 + new.pop(0)] + if new: + block += [indent + self.PS2 + line for line in new] + return block + + def summarize(self): + if self.wrote: + were = "were" + else: + were = "need to be" + if not self.files: + self.log_message("No files %s modified.", were) + else: + self.log_message("Files that %s modified:", were) + for file in self.files: + self.log_message(file) + if self.fixer_log: + self.log_message("Warnings/messages while refactoring:") + for message in self.fixer_log: + self.log_message(message) + if self.errors: + if len(self.errors) == 1: + self.log_message("There was 1 error:") + else: + self.log_message("There were %d errors:", len(self.errors)) + for msg, args, kwds in self.errors: + self.log_message(msg, *args, **kwds) + + def parse_block(self, block, lineno, indent): + """Parses a block into a tree. + + This is necessary to get correct line number / offset information + in the parser diagnostics and embedded into the parse tree. + """ + tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) + tree.future_features = frozenset() + return tree + + def wrap_toks(self, block, lineno, indent): + """Wraps a tokenize stream to systematically modify start/end.""" + tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__) + for type, value, (line0, col0), (line1, col1), line_text in tokens: + line0 += lineno - 1 + line1 += lineno - 1 + # Don't bother updating the columns; this is too complicated + # since line_text would also have to be updated and it would + # still break for tokens spanning lines. Let the user guess + # that the column numbers for doctests are relative to the + # end of the prompt string (PS1 or PS2). + yield type, value, (line0, col0), (line1, col1), line_text + + + def gen_lines(self, block, indent): + """Generates lines as expected by tokenize from a list of lines. + + This strips the first len(indent + self.PS1) characters off each line. + """ + prefix1 = indent + self.PS1 + prefix2 = indent + self.PS2 + prefix = prefix1 + for line in block: + if line.startswith(prefix): + yield line[len(prefix):] + elif line == prefix.rstrip() + "\n": + yield "\n" + else: + raise AssertionError("line=%r, prefix=%r" % (line, prefix)) + prefix = prefix2 + while True: + yield "" + + +class MultiprocessingUnsupported(Exception): + pass + + +class MultiprocessRefactoringTool(RefactoringTool): + + def __init__(self, *args, **kwargs): + super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) + self.queue = None + self.output_lock = None + + def refactor(self, items, write=False, doctests_only=False, + num_processes=1): + if num_processes == 1: + return super(MultiprocessRefactoringTool, self).refactor( + items, write, doctests_only) + try: + import multiprocessing + except ImportError: + raise MultiprocessingUnsupported + if self.queue is not None: + raise RuntimeError("already doing multiple processes") + self.queue = multiprocessing.JoinableQueue() + self.output_lock = multiprocessing.Lock() + processes = [multiprocessing.Process(target=self._child) + for i in range(num_processes)] + try: + for p in processes: + p.start() + super(MultiprocessRefactoringTool, self).refactor(items, write, + doctests_only) + finally: + self.queue.join() + for i in range(num_processes): + self.queue.put(None) + for p in processes: + if p.is_alive(): + p.join() + self.queue = None + + def _child(self): + task = self.queue.get() + while task is not None: + args, kwargs = task + try: + super(MultiprocessRefactoringTool, self).refactor_file( + *args, **kwargs) + finally: + self.queue.task_done() + task = self.queue.get() + + def refactor_file(self, *args, **kwargs): + if self.queue is not None: + self.queue.put((args, kwargs)) + else: + return super(MultiprocessRefactoringTool, self).refactor_file( + *args, **kwargs) diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3afb227d27837d4ba552e197ac92728ce216450f Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d66419c9751d3e7029aa75ef6783e906be376fed Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..827f7c33d29511435ce643491fab0d4f4d299a23 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b51b0702ca555fed15ceaa1094c1a3e734ed34 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..126e7df85bf7544d1429df400adb916ecf276251 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db4079072ee1d1e1b0aa84c92dfaa687b0014b0d Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33601e294177d4da423cea290d3844b372309537 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a67f5ee39776bc86a7fa51b1b2e9fa43b58f5531 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c70f0c2eb6152ca67262795b81487b94dc28398 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68323dbb601e36ba06467dfe4439638b84e25fb0 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e38646523cabef75f992558619cbc86aa34788f Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..418416c0447308e18371ae62a8b430ef04a0da63 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1d66715b3e82eb054160d17d631a9451d855530 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..060c9ce60397f7a09b12a874b416f16d74ca798a Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9456cfbd9cc06470a6070428283d379b4f74a15c Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a424afd5062508a9a790306375972d5ac43d29 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f357bab3f29384e65f5bcbf5548bdcdc470f364 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad51c76a5c907028405ca0f5cea9f8a2ba55527f Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e2885cd40a86be4222bf26e1f642b1213e5d56 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9ab2c477ada866618b51c0ff8a614fcb9ce0b15 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb81f02a5febd322f9cf6d6426b0850cf506ab64 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__init__.py b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1468609e347b3a0b9281e5c9e6ec311fcb37e5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9839598a2bf9f1e53a4655eea2ebf0c3e37b5ac0 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..526bf54053fd0b4e3e82bea79f703ad06508ab63 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/connection.py b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ce320fcf514083f3a6477e87abf40e9719285a --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/multiprocessing/dummy/connection.py @@ -0,0 +1,75 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + @property + def address(self): + return self._backlog_queue + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + with self._in.not_empty: + self._in.not_empty.wait(timeout) + return self._in.qsize() > 0 + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/pool.py b/evalkit_cambrian/lib/python3.10/multiprocessing/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..961d7e5991847af1ea8034a5a37d9159cf4614ae --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/multiprocessing/pool.py @@ -0,0 +1,957 @@ +# +# Module providing the `Pool` class for managing a process pool +# +# multiprocessing/pool.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Pool', 'ThreadPool'] + +# +# Imports +# + +import collections +import itertools +import os +import queue +import threading +import time +import traceback +import types +import warnings + +# If threading is available then ThreadPool should be provided. Therefore +# we avoid top-level imports which are liable to fail on some systems. +from . import util +from . import get_context, TimeoutError +from .connection import wait + +# +# Constants representing the state of a pool +# + +INIT = "INIT" +RUN = "RUN" +CLOSE = "CLOSE" +TERMINATE = "TERMINATE" + +# +# Miscellaneous +# + +job_counter = itertools.count() + +def mapstar(args): + return list(map(*args)) + +def starmapstar(args): + return list(itertools.starmap(args[0], args[1])) + +# +# Hack to embed stringification of remote traceback in local traceback +# + +class RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return rebuild_exc, (self.exc, self.tb) + +def rebuild_exc(exc, tb): + exc.__cause__ = RemoteTraceback(tb) + return exc + +# +# Code run by worker processes +# + +class MaybeEncodingError(Exception): + """Wraps possible unpickleable errors, so they can be + safely sent through the socket.""" + + def __init__(self, exc, value): + self.exc = repr(exc) + self.value = repr(value) + super(MaybeEncodingError, self).__init__(self.exc, self.value) + + def __str__(self): + return "Error sending result: '%s'. Reason: '%s'" % (self.value, + self.exc) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + +def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, + wrap_exception=False): + if (maxtasks is not None) and not (isinstance(maxtasks, int) + and maxtasks >= 1): + raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) + put = outqueue.put + get = inqueue.get + if hasattr(inqueue, '_writer'): + inqueue._writer.close() + outqueue._reader.close() + + if initializer is not None: + initializer(*initargs) + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + try: + task = get() + except (EOFError, OSError): + util.debug('worker got EOFError or OSError -- exiting') + break + + if task is None: + util.debug('worker got sentinel -- exiting') + break + + job, i, func, args, kwds = task + try: + result = (True, func(*args, **kwds)) + except Exception as e: + if wrap_exception and func is not _helper_reraises_exception: + e = ExceptionWithTraceback(e, e.__traceback__) + result = (False, e) + try: + put((job, i, result)) + except Exception as e: + wrapped = MaybeEncodingError(e, result[1]) + util.debug("Possible encoding error while sending result: %s" % ( + wrapped)) + put((job, i, (False, wrapped))) + + task = job = result = func = args = kwds = None + completed += 1 + util.debug('worker exiting after %d tasks' % completed) + +def _helper_reraises_exception(ex): + 'Pickle-able helper function for use by _guarded_task_generation.' + raise ex + +# +# Class representing a process pool +# + +class _PoolCache(dict): + """ + Class that implements a cache for the Pool class that will notify + the pool management threads every time the cache is emptied. The + notification is done by the use of a queue that is provided when + instantiating the cache. + """ + def __init__(self, /, *args, notifier=None, **kwds): + self.notifier = notifier + super().__init__(*args, **kwds) + + def __delitem__(self, item): + super().__delitem__(item) + + # Notify that the cache is empty. This is important because the + # pool keeps maintaining workers until the cache gets drained. This + # eliminates a race condition in which a task is finished after the + # the pool's _handle_workers method has enter another iteration of the + # loop. In this situation, the only event that can wake up the pool + # is the cache to be emptied (no more tasks available). + if not self: + self.notifier.put(None) + +class Pool(object): + ''' + Class which supports an async version of applying functions to arguments. + ''' + _wrap_exception = True + + @staticmethod + def Process(ctx, *args, **kwds): + return ctx.Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None, context=None): + # Attributes initialized early to make sure that they exist in + # __del__() if __init__() raises an exception + self._pool = [] + self._state = INIT + + self._ctx = context or get_context() + self._setup_queues() + self._taskqueue = queue.SimpleQueue() + # The _change_notifier queue exist to wake up self._handle_workers() + # when the cache (self._cache) is empty or when there is a change in + # the _state variable of the thread that runs _handle_workers. + self._change_notifier = self._ctx.SimpleQueue() + self._cache = _PoolCache(notifier=self._change_notifier) + self._maxtasksperchild = maxtasksperchild + self._initializer = initializer + self._initargs = initargs + + if processes is None: + processes = os.cpu_count() or 1 + if processes < 1: + raise ValueError("Number of processes must be at least 1") + if maxtasksperchild is not None: + if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: + raise ValueError("maxtasksperchild must be a positive int or None") + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + self._processes = processes + try: + self._repopulate_pool() + except Exception: + for p in self._pool: + if p.exitcode is None: + p.terminate() + for p in self._pool: + p.join() + raise + + sentinels = self._get_sentinels() + + self._worker_handler = threading.Thread( + target=Pool._handle_workers, + args=(self._cache, self._taskqueue, self._ctx, self.Process, + self._processes, self._pool, self._inqueue, self._outqueue, + self._initializer, self._initargs, self._maxtasksperchild, + self._wrap_exception, sentinels, self._change_notifier) + ) + self._worker_handler.daemon = True + self._worker_handler._state = RUN + self._worker_handler.start() + + + self._task_handler = threading.Thread( + target=Pool._handle_tasks, + args=(self._taskqueue, self._quick_put, self._outqueue, + self._pool, self._cache) + ) + self._task_handler.daemon = True + self._task_handler._state = RUN + self._task_handler.start() + + self._result_handler = threading.Thread( + target=Pool._handle_results, + args=(self._outqueue, self._quick_get, self._cache) + ) + self._result_handler.daemon = True + self._result_handler._state = RUN + self._result_handler.start() + + self._terminate = util.Finalize( + self, self._terminate_pool, + args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, + self._change_notifier, self._worker_handler, self._task_handler, + self._result_handler, self._cache), + exitpriority=15 + ) + self._state = RUN + + # Copy globals as function locals to make sure that they are available + # during Python shutdown when the Pool is destroyed. + def __del__(self, _warn=warnings.warn, RUN=RUN): + if self._state == RUN: + _warn(f"unclosed running multiprocessing pool {self!r}", + ResourceWarning, source=self) + if getattr(self, '_change_notifier', None) is not None: + self._change_notifier.put(None) + + def __repr__(self): + cls = self.__class__ + return (f'<{cls.__module__}.{cls.__qualname__} ' + f'state={self._state} ' + f'pool_size={len(self._pool)}>') + + def _get_sentinels(self): + task_queue_sentinels = [self._outqueue._reader] + self_notifier_sentinels = [self._change_notifier._reader] + return [*task_queue_sentinels, *self_notifier_sentinels] + + @staticmethod + def _get_worker_sentinels(workers): + return [worker.sentinel for worker in + workers if hasattr(worker, "sentinel")] + + @staticmethod + def _join_exited_workers(pool): + """Cleanup after any worker processes which have exited due to reaching + their specified lifetime. Returns True if any workers were cleaned up. + """ + cleaned = False + for i in reversed(range(len(pool))): + worker = pool[i] + if worker.exitcode is not None: + # worker exited + util.debug('cleaning up worker %d' % i) + worker.join() + cleaned = True + del pool[i] + return cleaned + + def _repopulate_pool(self): + return self._repopulate_pool_static(self._ctx, self.Process, + self._processes, + self._pool, self._inqueue, + self._outqueue, self._initializer, + self._initargs, + self._maxtasksperchild, + self._wrap_exception) + + @staticmethod + def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception): + """Bring the number of pool processes up to the specified number, + for use after reaping workers which have exited. + """ + for i in range(processes - len(pool)): + w = Process(ctx, target=worker, + args=(inqueue, outqueue, + initializer, + initargs, maxtasksperchild, + wrap_exception)) + w.name = w.name.replace('Process', 'PoolWorker') + w.daemon = True + w.start() + pool.append(w) + util.debug('added worker') + + @staticmethod + def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, + initializer, initargs, maxtasksperchild, + wrap_exception): + """Clean up any exited workers and start replacements for them. + """ + if Pool._join_exited_workers(pool): + Pool._repopulate_pool_static(ctx, Process, processes, pool, + inqueue, outqueue, initializer, + initargs, maxtasksperchild, + wrap_exception) + + def _setup_queues(self): + self._inqueue = self._ctx.SimpleQueue() + self._outqueue = self._ctx.SimpleQueue() + self._quick_put = self._inqueue._writer.send + self._quick_get = self._outqueue._reader.recv + + def _check_running(self): + if self._state != RUN: + raise ValueError("Pool not running") + + def apply(self, func, args=(), kwds={}): + ''' + Equivalent of `func(*args, **kwds)`. + Pool must be running. + ''' + return self.apply_async(func, args, kwds).get() + + def map(self, func, iterable, chunksize=None): + ''' + Apply `func` to each element in `iterable`, collecting the results + in a list that is returned. + ''' + return self._map_async(func, iterable, mapstar, chunksize).get() + + def starmap(self, func, iterable, chunksize=None): + ''' + Like `map()` method but the elements of the `iterable` are expected to + be iterables as well and will be unpacked as arguments. Hence + `func` and (a, b) becomes func(a, b). + ''' + return self._map_async(func, iterable, starmapstar, chunksize).get() + + def starmap_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `starmap()` method. + ''' + return self._map_async(func, iterable, starmapstar, chunksize, + callback, error_callback) + + def _guarded_task_generation(self, result_job, func, iterable): + '''Provides a generator of tasks for imap and imap_unordered with + appropriate handling for iterables which throw exceptions during + iteration.''' + try: + i = -1 + for i, x in enumerate(iterable): + yield (result_job, i, func, (x,), {}) + except Exception as e: + yield (result_job, i+1, _helper_reraises_exception, (e,), {}) + + def imap(self, func, iterable, chunksize=1): + ''' + Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. + ''' + self._check_running() + if chunksize == 1: + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0:n}".format( + chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def imap_unordered(self, func, iterable, chunksize=1): + ''' + Like `imap()` method but ordering of results is arbitrary. + ''' + self._check_running() + if chunksize == 1: + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0!r}".format(chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def apply_async(self, func, args=(), kwds={}, callback=None, + error_callback=None): + ''' + Asynchronous version of `apply()` method. + ''' + self._check_running() + result = ApplyResult(self, callback, error_callback) + self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) + return result + + def map_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `map()` method. + ''' + return self._map_async(func, iterable, mapstar, chunksize, callback, + error_callback) + + def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, + error_callback=None): + ''' + Helper function to implement map, starmap and their async counterparts. + ''' + self._check_running() + if not hasattr(iterable, '__len__'): + iterable = list(iterable) + + if chunksize is None: + chunksize, extra = divmod(len(iterable), len(self._pool) * 4) + if extra: + chunksize += 1 + if len(iterable) == 0: + chunksize = 0 + + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = MapResult(self, chunksize, len(iterable), callback, + error_callback=error_callback) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapper, + task_batches), + None + ) + ) + return result + + @staticmethod + def _wait_for_updates(sentinels, change_notifier, timeout=None): + wait(sentinels, timeout=timeout) + while not change_notifier.empty(): + change_notifier.get() + + @classmethod + def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, + pool, inqueue, outqueue, initializer, initargs, + maxtasksperchild, wrap_exception, sentinels, + change_notifier): + thread = threading.current_thread() + + # Keep maintaining workers until the cache gets drained, unless the pool + # is terminated. + while thread._state == RUN or (cache and thread._state != TERMINATE): + cls._maintain_pool(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception) + + current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] + + cls._wait_for_updates(current_sentinels, change_notifier) + # send sentinel to stop workers + taskqueue.put(None) + util.debug('worker handler exiting') + + @staticmethod + def _handle_tasks(taskqueue, put, outqueue, pool, cache): + thread = threading.current_thread() + + for taskseq, set_length in iter(taskqueue.get, None): + task = None + try: + # iterating taskseq cannot fail + for task in taskseq: + if thread._state != RUN: + util.debug('task handler found thread._state != RUN') + break + try: + put(task) + except Exception as e: + job, idx = task[:2] + try: + cache[job]._set(idx, (False, e)) + except KeyError: + pass + else: + if set_length: + util.debug('doing set_length()') + idx = task[1] if task else -1 + set_length(idx + 1) + continue + break + finally: + task = taskseq = job = None + else: + util.debug('task handler got sentinel') + + try: + # tell result handler to finish when cache is empty + util.debug('task handler sending sentinel to result handler') + outqueue.put(None) + + # tell workers there is no more work + util.debug('task handler sending sentinel to workers') + for p in pool: + put(None) + except OSError: + util.debug('task handler got OSError when sending sentinels') + + util.debug('task handler exiting') + + @staticmethod + def _handle_results(outqueue, get, cache): + thread = threading.current_thread() + + while 1: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if thread._state != RUN: + assert thread._state == TERMINATE, "Thread not in TERMINATE" + util.debug('result handler found thread._state=TERMINATE') + break + + if task is None: + util.debug('result handler got sentinel') + break + + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + while cache and thread._state != TERMINATE: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if task is None: + util.debug('result handler ignoring extra sentinel') + continue + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + if hasattr(outqueue, '_reader'): + util.debug('ensuring that outqueue is not full') + # If we don't make room available in outqueue then + # attempts to add the sentinel (None) to outqueue may + # block. There is guaranteed to be no more than 2 sentinels. + try: + for i in range(10): + if not outqueue._reader.poll(): + break + get() + except (OSError, EOFError): + pass + + util.debug('result handler exiting: len(cache)=%s, thread._state=%s', + len(cache), thread._state) + + @staticmethod + def _get_tasks(func, it, size): + it = iter(it) + while 1: + x = tuple(itertools.islice(it, size)) + if not x: + return + yield (func, x) + + def __reduce__(self): + raise NotImplementedError( + 'pool objects cannot be passed between processes or pickled' + ) + + def close(self): + util.debug('closing pool') + if self._state == RUN: + self._state = CLOSE + self._worker_handler._state = CLOSE + self._change_notifier.put(None) + + def terminate(self): + util.debug('terminating pool') + self._state = TERMINATE + self._terminate() + + def join(self): + util.debug('joining pool') + if self._state == RUN: + raise ValueError("Pool is still running") + elif self._state not in (CLOSE, TERMINATE): + raise ValueError("In unknown state") + self._worker_handler.join() + self._task_handler.join() + self._result_handler.join() + for p in self._pool: + p.join() + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # task_handler may be blocked trying to put items on inqueue + util.debug('removing tasks from inqueue until task handler finished') + inqueue._rlock.acquire() + while task_handler.is_alive() and inqueue._reader.poll(): + inqueue._reader.recv() + time.sleep(0) + + @classmethod + def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, + worker_handler, task_handler, result_handler, cache): + # this is guaranteed to only be called once + util.debug('finalizing pool') + + # Notify that the worker_handler state has been changed so the + # _handle_workers loop can be unblocked (and exited) in order to + # send the finalization sentinel all the workers. + worker_handler._state = TERMINATE + change_notifier.put(None) + + task_handler._state = TERMINATE + + util.debug('helping task handler/workers to finish') + cls._help_stuff_finish(inqueue, task_handler, len(pool)) + + if (not result_handler.is_alive()) and (len(cache) != 0): + raise AssertionError( + "Cannot have cache with result_hander not alive") + + result_handler._state = TERMINATE + change_notifier.put(None) + outqueue.put(None) # sentinel + + # We must wait for the worker handler to exit before terminating + # workers because we don't want workers to be restarted behind our back. + util.debug('joining worker handler') + if threading.current_thread() is not worker_handler: + worker_handler.join() + + # Terminate workers which haven't already finished. + if pool and hasattr(pool[0], 'terminate'): + util.debug('terminating workers') + for p in pool: + if p.exitcode is None: + p.terminate() + + util.debug('joining task handler') + if threading.current_thread() is not task_handler: + task_handler.join() + + util.debug('joining result handler') + if threading.current_thread() is not result_handler: + result_handler.join() + + if pool and hasattr(pool[0], 'terminate'): + util.debug('joining pool workers') + for p in pool: + if p.is_alive(): + # worker has not yet exited + util.debug('cleaning up worker %d' % p.pid) + p.join() + + def __enter__(self): + self._check_running() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Class whose instances are returned by `Pool.apply_async()` +# + +class ApplyResult(object): + + def __init__(self, pool, callback, error_callback): + self._pool = pool + self._event = threading.Event() + self._job = next(job_counter) + self._cache = pool._cache + self._callback = callback + self._error_callback = error_callback + self._cache[self._job] = self + + def ready(self): + return self._event.is_set() + + def successful(self): + if not self.ready(): + raise ValueError("{0!r} not ready".format(self)) + return self._success + + def wait(self, timeout=None): + self._event.wait(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if not self.ready(): + raise TimeoutError + if self._success: + return self._value + else: + raise self._value + + def _set(self, i, obj): + self._success, self._value = obj + if self._callback and self._success: + self._callback(self._value) + if self._error_callback and not self._success: + self._error_callback(self._value) + self._event.set() + del self._cache[self._job] + self._pool = None + + __class_getitem__ = classmethod(types.GenericAlias) + +AsyncResult = ApplyResult # create alias -- see #17805 + +# +# Class whose instances are returned by `Pool.map_async()` +# + +class MapResult(ApplyResult): + + def __init__(self, pool, chunksize, length, callback, error_callback): + ApplyResult.__init__(self, pool, callback, + error_callback=error_callback) + self._success = True + self._value = [None] * length + self._chunksize = chunksize + if chunksize <= 0: + self._number_left = 0 + self._event.set() + del self._cache[self._job] + else: + self._number_left = length//chunksize + bool(length % chunksize) + + def _set(self, i, success_result): + self._number_left -= 1 + success, result = success_result + if success and self._success: + self._value[i*self._chunksize:(i+1)*self._chunksize] = result + if self._number_left == 0: + if self._callback: + self._callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + else: + if not success and self._success: + # only store first exception + self._success = False + self._value = result + if self._number_left == 0: + # only consider the result ready once all jobs are done + if self._error_callback: + self._error_callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + +# +# Class whose instances are returned by `Pool.imap()` +# + +class IMapIterator(object): + + def __init__(self, pool): + self._pool = pool + self._cond = threading.Condition(threading.Lock()) + self._job = next(job_counter) + self._cache = pool._cache + self._items = collections.deque() + self._index = 0 + self._length = None + self._unsorted = {} + self._cache[self._job] = self + + def __iter__(self): + return self + + def next(self, timeout=None): + with self._cond: + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + self._cond.wait(timeout) + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + raise TimeoutError from None + + success, value = item + if success: + return value + raise value + + __next__ = next # XXX + + def _set(self, i, obj): + with self._cond: + if self._index == i: + self._items.append(obj) + self._index += 1 + while self._index in self._unsorted: + obj = self._unsorted.pop(self._index) + self._items.append(obj) + self._index += 1 + self._cond.notify() + else: + self._unsorted[i] = obj + + if self._index == self._length: + del self._cache[self._job] + self._pool = None + + def _set_length(self, length): + with self._cond: + self._length = length + if self._index == self._length: + self._cond.notify() + del self._cache[self._job] + self._pool = None + +# +# Class whose instances are returned by `Pool.imap_unordered()` +# + +class IMapUnorderedIterator(IMapIterator): + + def _set(self, i, obj): + with self._cond: + self._items.append(obj) + self._index += 1 + self._cond.notify() + if self._index == self._length: + del self._cache[self._job] + self._pool = None + +# +# +# + +class ThreadPool(Pool): + _wrap_exception = False + + @staticmethod + def Process(ctx, *args, **kwds): + from .dummy import Process + return Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=()): + Pool.__init__(self, processes, initializer, initargs) + + def _setup_queues(self): + self._inqueue = queue.SimpleQueue() + self._outqueue = queue.SimpleQueue() + self._quick_put = self._inqueue.put + self._quick_get = self._outqueue.get + + def _get_sentinels(self): + return [self._change_notifier._reader] + + @staticmethod + def _get_worker_sentinels(workers): + return [] + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # drain inqueue, and put sentinels at its head to make workers finish + try: + while True: + inqueue.get(block=False) + except queue.Empty: + pass + for i in range(size): + inqueue.put(None) + + def _wait_for_updates(self, sentinels, change_notifier, timeout): + time.sleep(timeout) diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/sharedctypes.py b/evalkit_cambrian/lib/python3.10/multiprocessing/sharedctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..6071707027bea46dccfc4d92fb3195060faf07ce --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/multiprocessing/sharedctypes.py @@ -0,0 +1,240 @@ +# +# Module which supports allocation of ctypes objects from shared memory +# +# multiprocessing/sharedctypes.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import ctypes +import weakref + +from . import heap +from . import get_context + +from .context import reduction, assert_spawning +_ForkingPickler = reduction.ForkingPickler + +__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] + +# +# +# + +typecode_to_type = { + 'c': ctypes.c_char, 'u': ctypes.c_wchar, + 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, + 'h': ctypes.c_short, 'H': ctypes.c_ushort, + 'i': ctypes.c_int, 'I': ctypes.c_uint, + 'l': ctypes.c_long, 'L': ctypes.c_ulong, + 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, + 'f': ctypes.c_float, 'd': ctypes.c_double + } + +# +# +# + +def _new_value(type_): + size = ctypes.sizeof(type_) + wrapper = heap.BufferWrapper(size) + return rebuild_ctype(type_, wrapper, None) + +def RawValue(typecode_or_type, *args): + ''' + Returns a ctypes object allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + obj.__init__(*args) + return obj + +def RawArray(typecode_or_type, size_or_initializer): + ''' + Returns a ctypes array allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + if isinstance(size_or_initializer, int): + type_ = type_ * size_or_initializer + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + return obj + else: + type_ = type_ * len(size_or_initializer) + result = _new_value(type_) + result.__init__(*size_or_initializer) + return result + +def Value(typecode_or_type, *args, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a Value + ''' + obj = RawValue(typecode_or_type, *args) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a RawArray + ''' + obj = RawArray(typecode_or_type, size_or_initializer) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def copy(obj): + new_obj = _new_value(type(obj)) + ctypes.pointer(new_obj)[0] = obj + return new_obj + +def synchronized(obj, lock=None, ctx=None): + assert not isinstance(obj, SynchronizedBase), 'object already synchronized' + ctx = ctx or get_context() + + if isinstance(obj, ctypes._SimpleCData): + return Synchronized(obj, lock, ctx) + elif isinstance(obj, ctypes.Array): + if obj._type_ is ctypes.c_char: + return SynchronizedString(obj, lock, ctx) + return SynchronizedArray(obj, lock, ctx) + else: + cls = type(obj) + try: + scls = class_cache[cls] + except KeyError: + names = [field[0] for field in cls._fields_] + d = {name: make_property(name) for name in names} + classname = 'Synchronized' + cls.__name__ + scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) + return scls(obj, lock, ctx) + +# +# Functions for pickling/unpickling +# + +def reduce_ctype(obj): + assert_spawning(obj) + if isinstance(obj, ctypes.Array): + return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) + else: + return rebuild_ctype, (type(obj), obj._wrapper, None) + +def rebuild_ctype(type_, wrapper, length): + if length is not None: + type_ = type_ * length + _ForkingPickler.register(type_, reduce_ctype) + buf = wrapper.create_memoryview() + obj = type_.from_buffer(buf) + obj._wrapper = wrapper + return obj + +# +# Function to create properties +# + +def make_property(name): + try: + return prop_cache[name] + except KeyError: + d = {} + exec(template % ((name,)*7), d) + prop_cache[name] = d[name] + return d[name] + +template = ''' +def get%s(self): + self.acquire() + try: + return self._obj.%s + finally: + self.release() +def set%s(self, value): + self.acquire() + try: + self._obj.%s = value + finally: + self.release() +%s = property(get%s, set%s) +''' + +prop_cache = {} +class_cache = weakref.WeakKeyDictionary() + +# +# Synchronized wrappers +# + +class SynchronizedBase(object): + + def __init__(self, obj, lock=None, ctx=None): + self._obj = obj + if lock: + self._lock = lock + else: + ctx = ctx or get_context(force=True) + self._lock = ctx.RLock() + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def __reduce__(self): + assert_spawning(self) + return synchronized, (self._obj, self._lock) + + def get_obj(self): + return self._obj + + def get_lock(self): + return self._lock + + def __repr__(self): + return '<%s wrapper for %s>' % (type(self).__name__, self._obj) + + +class Synchronized(SynchronizedBase): + value = make_property('value') + + +class SynchronizedArray(SynchronizedBase): + + def __len__(self): + return len(self._obj) + + def __getitem__(self, i): + with self: + return self._obj[i] + + def __setitem__(self, i, value): + with self: + self._obj[i] = value + + def __getslice__(self, start, stop): + with self: + return self._obj[start:stop] + + def __setslice__(self, start, stop, values): + with self: + self._obj[start:stop] = values + + +class SynchronizedString(SynchronizedArray): + value = make_property('value') + raw = make_property('raw') diff --git a/evalkit_cambrian/lib/python3.10/multiprocessing/synchronize.py b/evalkit_cambrian/lib/python3.10/multiprocessing/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..d0be48f1fd7a8f14b935f74d89c168d831b84dbc --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/multiprocessing/synchronize.py @@ -0,0 +1,394 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock, sem_unlink +except (ImportError): + raise ImportError("This platform lacks a functioning sem_open" + + " implementation, therefore, the required" + + " synchronization primitives needed will not" + + " function, see issue 3770.") + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + name = ctx.get_start_method() + unlink_now = sys.platform == 'win32' or name == 'fork' + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif self._semlock._get_value() == 1: + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count._semlock._get_value() - + self._woken_count._semlock._get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/evalkit_cambrian/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e738cb692541ea7aa3d6c8518dd74e2b74a842 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b158d2aa89e34fa95e53e17d29794fb111f70a3d9b32e894074df6a257c5dd5c +size 169622 diff --git a/evalkit_llava/lib/python3.10/lib-dynload/_decimal.cpython-310-x86_64-linux-gnu.so b/evalkit_llava/lib/python3.10/lib-dynload/_decimal.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dc98ac98baeba7fd523681fb5ca4469664fab886 --- /dev/null +++ b/evalkit_llava/lib/python3.10/lib-dynload/_decimal.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc76e957d5709b4ea4778b19d26be671770cd04d9a6ce7cf94a5b43c94285a26 +size 1420752